diff --git a/.gitignore b/.gitignore index 117cd399..12bfc4b8 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,6 @@ */scratch/* *.DS_Store* config_user.json -.DS_Store *.egg-info/ docs/build/ *.bat diff --git a/docs/capabilities/misc_capabilities.rst b/docs/capabilities/misc_capabilities.rst index 49d3ccdc..aa8ffed2 100644 --- a/docs/capabilities/misc_capabilities.rst +++ b/docs/capabilities/misc_capabilities.rst @@ -10,7 +10,7 @@ Miscellaneous Interpret Tokamak equilibrium ----------------------------- -MITIM has a quick g-eqdsk file reader and visualizer that is based on the ``omfit-classes`` package. +MITIM has a quick g-eqdsk file reader and visualizer that is based on the ``megpy`` package. To open and plot a g-eqdsk file: @@ -47,7 +47,7 @@ To open and plot an ``input.gacode`` file: .. code-block:: python from mitim_tools.gacode_tools import PROFILEStools - p = PROFILEStools.PROFILES_GACODE(file) + p = PROFILEStools.gacode_state(file) p.plot() It will plot results in a notebook-like plot with different tabs: diff --git a/docs/capabilities/optimization.rst b/docs/capabilities/optimization.rst index 7ee3c90b..4b4d7782 100644 --- a/docs/capabilities/optimization.rst +++ b/docs/capabilities/optimization.rst @@ -44,7 +44,7 @@ Select the location of the MITIM namelist (see :ref:`Understanding the MITIM nam .. code-block:: python folder = Path('MITIM-fusion/tests/scratch/mitim_tut') - namelist = Path('MITIM-fusion/templates/main.namelist.json') + namelist = Path('MITIM-fusion/templates/namelist.optimization.yaml') Then create your custom optimization object as a child of the parent ``STRATEGYtools.opt_evaluator`` class. You only need to modify what operations need to occur inside the ``run()`` (where operations/simulations happen) and ``scalarized_objective()`` (to define what is the target to maximize) methods. @@ -105,7 +105,7 @@ Now we can create and launch the MITIM optimization process from the beginning ( .. code-block:: python - MITIM_BO = STRATEGYtools.MITIM_BO( opt_fun1D, cold_startYN = True ) + MITIM_BO = STRATEGYtools.MITIM_BO( opt_fun1D, cold_start = True ) MITIM_BO.run() Once finished, we can plot the results easily with: @@ -118,7 +118,7 @@ Once finished, we can plot the results easily with: Understanding the MITIM namelist ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Checkout file ``MITIM-fusion/templates/main.namelist.json``, which has comprehensive comments. +Checkout file ``MITIM-fusion/templates/namelist.optimization.yaml``, which has comprehensive comments. *Under development* diff --git a/docs/capabilities/tglf_capabilities.rst b/docs/capabilities/tglf_capabilities.rst index 852b1534..8ac3ed7c 100644 --- a/docs/capabilities/tglf_capabilities.rst +++ b/docs/capabilities/tglf_capabilities.rst @@ -68,30 +68,31 @@ To generate the input files (input.tglf) to TGLF at each radial location, MITIM .. code-block:: python - cdf = tglf.prep(folder,inputgacode=inputgacode_file,cold_start=False ) + _ = tglf.prep(inputgacode_file,folder,cold_start=False) -.. tip:: - The ``.prep()`` method, when applied to a case that starts with an input.gacode file, launches a `TGYRO` run for a "zero" iteration to generate *input.tglf* at specific ``rho`` locations from the *input.gacode*. This method to generate input files is inspired by how the `OMFIT framework `_ works. +Now, we are ready to run TGLF. Once the ``prep()`` command has finished, one can run TGLF with different settings and assumptions. +That is why, at this point, a sub-folder name for this specific run can be provided. Similarly to the ``prep()`` command, a ``cold_start`` flag can be provided. + +The set of control inputs to TGLF (saturation rule, electromagnetic effects, basis functions, etc.) are provided following the following sequential logic: + +1. Each code has a set of default settings, which for TGLF are specified in ``templates/input.tglf.controls``. This is the base namelist of settings that will be used if no other specification is provided. +2. Then, a ``code_settings`` argument can be provided to the ``run()`` command. This argument refers to a specific set of settings that are specified in ``templates/input.tglf.models.yaml``, and that will overwrite the default settings in ``input.tglf.controls``. +3. Finally, an ``extraOptions`` argument can be provided to the ``run()`` command, which is a dictionary of specific settings to change from the previous two steps. -Now, we are ready to run TGLF. Once the ``prep()`` command has finished, one can run TGLF with different settings and assumptions. That is why, at this point, a sub-folder name for this specific run can be provided. Similarly to the ``prep()`` command, a ``cold_start`` flag can be provided. -The set of control inputs to TGLF (like saturation rule, electromagnetic effects, etc.) are provided in two ways. -First, the argument ``TGLFsettings`` indicates the base case to start with. -The user is referred to ``templates/input.tglf.models.json`` to understand the meaning of each setting, and ``templates/input.tglf.controls`` for the default setup. -Second, the argument ``extraOptions`` can be passed as a dictionary of variables to change. For example, the following two commands will run TGLF with saturation rule number 2 with and without electromagnetic effets. After each ``run()`` command, a ``read()`` is needed, to populate the *tglf.results* dictionary with the TGLF outputs (``label`` refers to the dictionary key for each run): .. code-block:: python - tglf.run( subFolderTGLF = 'yes_em_folder', - TGLFsettings = 5, - extraOptions = {}, + tglf.run( subfolder = 'yes_em_folder', + code_settings = 'SAT2', + extraOptions = {'USE_BPER':True}, cold_start = False ) tglf.read( label = 'yes_em' ) - tglf.run( subFolderTGLF = 'no_em_folder', - TGLFsettings = 5, + tglf.run( subfolder = 'no_em_folder', + code_settings = 'SAT2', extraOptions = {'USE_BPER':False}, cold_start = False ) @@ -139,7 +140,7 @@ Similarly as in the previous section, you need to run the ``prep()`` command, bu .. code-block:: python - cdf = tglf.prep(folder,cold_start=False) + cdf = tglf.prep_using_tgyro(folder,cold_start=False) .. note:: @@ -168,7 +169,7 @@ If you have a input.tglf file already, you can still use this script to run it. inputtglf_file = Path('MITIM-fusion/tests/data/input.tglf') tglf = TGLFtools.TGLF() - tglf.prep_from_tglf( folder, inputtglf_file, input_gacode = inputgacode_file ) + tglf.prep_from_file( folder, inputtglf_file, input_gacode = inputgacode_file ) The rest of the workflow is identical, including ``.run()``, ``.read()`` and ``.plot()``. @@ -190,7 +191,7 @@ The rest of the workflow is identical, including ``.run()``, ``.read()`` and ``. inputtglf_file = Path('MITIM-fusion/tests/data/input.tglf') tglf = TGLFtools.TGLF() - tglf.prep_from_tglf( folder, inputtglf_file ) + tglf.prep_from_file( folder, inputtglf_file ) tglf.read (folder = f'{folder}/', label = 'yes_em' ) tglf.plot( labels = ['yes_em'] ) @@ -215,26 +216,4 @@ Run 1D scans of TGLF input parameter TGLF aliases ------------ -MITIM provides a few useful aliases, including for the TGLF tools: - -- To plot results that exist in a folder ``run1/``, with or without a suffix and with or without an input.gacode file (for normalizations): - - .. code-block:: bash - - mitim_plot_tglf run1/ - mitim_plot_tglf run1/ --suffix _0.55 --gacode input.gacode - - -- To run TGLF in a folder ``run1/`` using input file ``input.tglf``, with or without an input.gacode file (for normalizations): - - .. code-block:: bash - - mitim_run_tglf --folder run1/ --tglf input.tglf - mitim_run_tglf --folder run1/ --tglf input.tglf --gacode input.gacode - -- To run a parameter scan in a folder ``scan1/`` using input file ``input.tglf``, with or without an input.gacode file (for normalizations): - - .. code-block:: bash - - mitim_run_tglf --folder scan1/ --tglf input.tglf --gacode input.gacode --scan RLTS_2 - +MITIM provides a few useful aliases, including for the TGLF tools: :ref:`Shell Scripts` diff --git a/docs/capabilities/tgyro_capabilities.rst b/docs/capabilities/tgyro_capabilities.rst index a099b69b..0efec858 100644 --- a/docs/capabilities/tgyro_capabilities.rst +++ b/docs/capabilities/tgyro_capabilities.rst @@ -35,7 +35,7 @@ Create a PROFILES class from the input.gacode file: .. code-block:: python - profiles = PROFILEStools.PROFILES_GACODE(gacode_file) + profiles = PROFILEStools.gacode_state(gacode_file) .. tip:: @@ -83,7 +83,7 @@ Now TGYRO can be run: PredictionSet = PredictionSet, TGLFsettings = TGLFsettings, TGYRO_solver_options = solver, - Physics_options = physics_options) + TGYRO_physics_options = physics_options) Read: @@ -120,7 +120,7 @@ Create a profiles class with the `input.gacode` file that TGYRO used to run and gacode_file = Path('MITIM-fusion/tests/data/input.gacode') folder = Path('MITIM-fusion/tests/scratch/tgyro_tut/run1') - profiles = PROFILEStools.PROFILES_GACODE(gacode_file) + profiles = PROFILEStools.gacode_state(gacode_file) tgyro_out = TGYROtools.TGYROoutput(folder,profiles=profiles) Plot results: @@ -133,11 +133,5 @@ Plot results: TGYRO aliases ------------- -MITIM provides a few useful aliases, including for the TGYRO tools: - -- To plot results that exist in a folder ``run1/``: - - .. code-block:: bash - - mitim_plot_tgyro run1/ +MITIM provides a few useful aliases, including for the TGYRO tools: :ref:`Shell Scripts` diff --git a/docs/capabilities/transp_capabilities.rst b/docs/capabilities/transp_capabilities.rst index 8d194bb9..8fb3bfae 100644 --- a/docs/capabilities/transp_capabilities.rst +++ b/docs/capabilities/transp_capabilities.rst @@ -123,24 +123,5 @@ If TRANSP has already been run and the .CDF results file already exists (``cdf_f TRANSP aliases -------------- -MITIM provides a few useful aliases, including for the TRANSP tools: - -- To read TRANSP results in CDF files (which stores the results in the ``cdfs`` list. First run can be plotted with ``cdfs[0].plot``): - - .. code-block:: bash - - mitim_read_transp 12345A01.CDF 12345A02.CDF - -- To interact with the TRANSP globus grid: - - .. code-block:: bash - - # To check status of runs under username pablorf - mitim_trcheck pablorf - - # To remove from the grid CMOD run numbers 88664P01, 88664P03 from user pablorf - mitim_trclean 88664P CMOD --numbers 1,3 - - # To get results file (intermediate or final) from CMOD run 152895P01 from user pablorf - mitim_trlook 152895P01 CMOD +MITIM provides a few useful aliases, including for the TRANSP tools: :ref:`Shell Scripts` diff --git a/docs/capabilities/vitals_capabilities.rst b/docs/capabilities/vitals_capabilities.rst index 5c21189f..8e9e5da6 100644 --- a/docs/capabilities/vitals_capabilities.rst +++ b/docs/capabilities/vitals_capabilities.rst @@ -38,8 +38,8 @@ As a starting point of VITALS, you need to prepare and run TGLF for the base cas rho = 0.5 tglf = TGLFtools.TGLF( rhos = [ rho ] ) - cdf = tglf.prep( folder, inputgacode = inputgacode_file) - tglf.run( subFolderTGLF = 'run_base', TGLFsettings = 5) + cdf = tglf.prep( inputgacode_file, folder ) + tglf.run( subfolder = 'run_base', code_settings = 'SAT3') tglf.read( label = 'run_base' ) @@ -120,7 +120,7 @@ Once the VITALS object has been created, parameters such as the TGLF control inp .. code-block:: python - vitals_fun.TGLFparameters['TGLFsettings'] = 5 + vitals_fun.TGLFparameters['code_settings'] = 'SAT3' vitals_fun.TGLFparameters['extraOptions'] = {} .. note:: diff --git a/docs/faq.rst b/docs/faq.rst index 9205ad98..bc59e530 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -30,7 +30,7 @@ Issues during MITIM installation .. code-block:: console - pip3 install -e MITIM-fusion\[pyqt\] --no-cache + pip3 install -e MITIM-fusion\[pyqt\] --no-cache-dir Issues during MITIM tests ------------------------- @@ -61,9 +61,3 @@ Issues during MITIM tests Make sure you that, if you have keys, you have added them to authorized_keys in both server and tunnel machines. - - -Issues during PORTALS simulations ---------------------------------- - -Nothing here yet. diff --git a/docs/index.rst b/docs/index.rst index e95508de..f4ab8834 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,8 +1,8 @@ MITIM: a toolbox for modeling tasks in plasma physics and fusion energy ======================================================================= -The **MITIM** (MIT Integrated Modeling) is a versatile and user-friendly Python library designed for *plasma physics* and *fusion energy* researchers, distributed as the `MITIM-fusion `_ GitHub repository. -Developed in 2018 by `Pablo Rodriguez-Fernandez `_ at the MIT Plasma Science and Fusion Center, this light-weight, command-line, +The **MITIM** (MIT Integrated Modeling) is a versatile and user-friendly Python library designed for plasma physics and fusion energy researchers, distributed as the `MITIM-fusion `_ GitHub repository. +Spearheaded by `Pablo Rodriguez-Fernandez `_ at the MIT Plasma Science and Fusion Center, this light-weight, command-line, object-oriented toolbox streamlines the execution and interpretation of physics models and simplifies complex optimization tasks. MITIM stands out for its modular nature, making it particularly useful for integrating models with optimization workflows. @@ -28,7 +28,7 @@ Overview -------- Developed at the MIT Plasma Science and Fusion Center, MITIM emerged in 2023 as a progression from the PORTALS project (*Performance Optimization of Reactors via Training of Active Learning Surrogates*). -This evolution marks a significant enhancement in our approach to transport and optimization in plasma physics research. +This evolution marked a significant enhancement in our approach to transport and optimization in plasma physics research. MITIM's core functionality revolves around the standalone execution of codes and the nuanced interpretation of results through object-oriented Python scripts. This enables researchers to seamlessly integrate these scripts into custom surrogate-based optimization frameworks, diff --git a/docs/installation.rst b/docs/installation.rst index 7558ecf2..5446bcf0 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -36,13 +36,6 @@ Use ``pip`` to install all the required MITIM requirements: The optional argument ``[pyqt]`` added in the intallation command above must only be used if the machine allows for graphic interfaces. If running in a computing cluster, remove that flag. The ``pyqt`` package is used to create condensed figures into a single notebook when interpreting and plotting simulation results. - - If you wish to install all capabilities (including compatibility with `OMFIT `_), it is recommended that ``pip`` is run as follows: - - .. code-block:: console - - pip3 install -e MITIM-fusion[pyqt,omfit] - If you were unsuccessful in the installation, check out our :ref:`Frequently Asked Questions` section. @@ -53,11 +46,11 @@ User configuration In ``MITIM-fusion/templates/``, there is a ``config_user_example.json`` with specifications of where to run certain codes and what the login requirements are. There are also options to specify the default verbose level and the default DPI for the figures in notebooks. Users need to specify their own configurations in a file that follows the same structure. -There are different options to handle this config file. +There are different options to handle this config file: 1. Create a new file named ``config_user.json`` **in the same folder** ``MITIM-fusion/templates/``. MITIM will automatically look for this file when running the code. 2. Create a new file anywhere in your machine. Then, **set the environment variable** ``MITIM_CONFIG`` to the path of this file. MITIM will automatically look for this file when running the code. -3. Create a new file anywhere in your machine. **Do this at the beginning of your script**: +3. Create a new file anywhere in your machine. Then, **add these lines at the beginning of your script**: .. code-block:: python @@ -87,16 +80,29 @@ In this example, the ``identity`` option is only required if you are running in { "preferences": { "tglf": "engaging", + "neo": "local", "tgyro": "perlmutter", "verbose_level": "5", "dpi_notebook": "80" }, + "local": { + "machine": "local", + "username": "YOUR_USERNAME", + "scratch": "/Users/YOUR_USERNAME/scratch/", + "modules": "", + "cores_per_node": 8, + "gpus_per_node": 0 + }, "engaging": { "machine": "eofe7.mit.edu", "username": "YOUR_USERNAME", "scratch": "/pool001/YOUR_USERNAME/scratch/", + "modules": "", + "cores_per_node": 64, + "gpus_per_node": 0, "slurm": { "partition": "sched_mit_psfc", + "exclusive": false, "exclude": "node584" } }, @@ -104,12 +110,16 @@ In this example, the ``identity`` option is only required if you are running in "machine": "perlmutter.nersc.gov", "username": "YOUR_USERNAME", "scratch": "/pscratch/sd/p/YOUR_USERNAME/scratch/", + "modules": "", "identity": "/Users/YOUR_USERNAME/.ssh/id_rsa_nersc", + "cores_per_node": 32, + "gpus_per_node": 4, "slurm": { "account": "YOUR_ACCOUNT", "partition": "YOUR_PARTITION", "constraint": "gpu", - "mem": "4GB" + "mem": "4GB", + "email": "optional@email" } } } @@ -119,20 +129,15 @@ MITIM will attempt to create SSH and SFTP connections to that machine, and will .. attention:: - Note that MITIM does not maintain or develop the simulation codes that are used within it, such as those from `GACODE `_ or `TRANSP `_. It assumes that proper permissions have been obtained and that working versions of those codes exist in the machine configured to run them. + Note that MITIM does not maintain or develop the simulation codes that are used within it, such as those from `GACODE `_ or `TRANSP `_. It assumes that proper permissions have been obtained and that working versions of those codes exist in the machine configured to run them. Please note that MITIM will try to run the codes with standard commands that the shell must understand. For example, to run the TGLF code, MITIM will want to execute the command ``tglf`` in the *eofe7.mit.edu* machine as specified in the example above. There are several ways to make sure that the shell understands the command: -.. dropdown:: 1. Source at shell initialization (recommended) +.. dropdown:: 1. Send specific commands per code (recommended) - Is the commands are available upon login in that machine (e.g. in your personal ``.bashrc`` file), MITIM will be able to run them. - Please note that aliases are usually not available in non-interactive shells, and it is recommended to use full paths and to avoid print (echo) statements. - -.. dropdown:: 2. Send specific commands per code - - Finally, you can populate the ``modules`` option per machine in your ``config_user.json`` file. For example: + You can populate the ``modules`` option per machine in your ``config_user.json`` file. For example: .. code-block:: console @@ -142,10 +147,13 @@ There are several ways to make sure that the shell understands the command: ... } - Note that you can the same machine listed several times in your ``config_user.json`` file, with different ``modules`` options per code. You just need to give it a different name per code. +.. dropdown:: 2. Source at shell initialization + + If the commands are available upon login in that machine (e.g. in your personal ``.bashrc`` file), MITIM will be able to run them. + Please note that aliases are usually not available in non-interactive shells, and it is recommended to use full paths and to avoid print (echo) statements. License and contributions diff --git a/docs/requirements.txt b/docs/requirements.txt index 9d91ba68..ae1a84d8 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -5,32 +5,3 @@ sphinx-copybutton sphinx-togglebutton sphinx-panels sphinxcontrib-images -h5py -matplotlib -importlib-metadata -IPython -psutil -pip -numpy -matplotlib -argparse -h5py -netCDF4 -xarray -pandas -xlsxwriter -statsmodels -dill -IPython -pyDOE -multiprocessing_on_dill -deap -paramiko -tqdm -shapely -freegs -botorch -scikit-image -psutil -onnx2pytorch -tensorflow \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index bae000fd..aa7981e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta" [project] name = "MITIM" -version = "3.0.0" +version = "4.0.0" description = "MIT Integrated Modeling Suite for Fusion Applications" readme = "README.md" -requires-python = ">=3.10, <3.13" # Notes: 3.9 has issues with the latest BOTORCH, 3.13 has issues with tensorflow (nn) and omfit_classesv (omfit_new) +requires-python = ">=3.10, <3.13" # Notes: 3.9 has issues with the latest botorch, 3.13 has issues with tensorflow license = {file = "LICENSE"} authors = [ {name = "P. Rodriguez-Fernandez", email = "pablorf@mit.edu"}, @@ -40,25 +40,22 @@ dependencies = [ "shapely", "freegs", "botorch", - "scikit-image", # Stricly not for MITIM, but good to have for pygacode "psutil", - "onnx2pytorch", # Stricly not for MITIM, but good to use ONNX models - "tensorflow" + "tensorflow", + "f90nml", + "pyyaml", + "megpy>=2.0", + "fibe>=0.3", + "scikit-image", # Stricly not for MITIM, but good to have for pygacode + #"onnx2pytorch", # Stricly not for MITIM, but good to use ONNX models + # "vmecpp", + # "quends @ git+https://github.com/sandialabs/quends.git" ] [project.optional-dependencies] pyqt = [ "PyQt6", ] -omfit = [ - "omfit_classes>3.2024.19.2", # Otherwise, it will need an old version of matplotlib, matplotlib<3.6 - "scipy<1.14.0", # As of 08/08/2024, because of https://github.com/gafusion/OMFIT-source/issues/7104 - "numpy<2.0.0", # For the xarray requirement below to work - "xarray==2022.6.0", # As of 08/08/2024, because https://github.com/gafusion/OMFIT-source/issues/7104. Froze this version because of the PLASMAstate xr reader (importlib_metadata issues) - "omas", - "fortranformat", - "openpyxl", -] test = [ "pytest", "coverage", @@ -70,20 +67,26 @@ test = [ "Source" = "https://github.com/pabloprf/MITIM-fusion" [project.scripts] -# mitim_tools interfaces: read, run, plot + mitim_plot_gacode = "mitim_tools.gacode_tools.scripts.read_gacode:main" mitim_plot_tgyro = "mitim_tools.gacode_tools.scripts.read_tgyro:main" mitim_plot_tglf = "mitim_tools.gacode_tools.scripts.read_tglf:main" # [--suffix _0.55] [--gacode input.gacode] mitim_plot_cgyro = "mitim_tools.gacode_tools.scripts.read_cgyro:main" mitim_plot_eq = "mitim_tools.gs_tools.scripts.read_eq:main" mitim_plot_transp = "mitim_tools.transp_tools.scripts.read_transp:main" - -mitim_run_tglf = "mitim_tools.gacode_tools.scripts.run_tglf:main" # (folder input.tglf) [--gacode input.gacode] [--scan RLTS_2] [--drives True] - -# Optimizations +mitim_plot_eped = "mitim_tools.eped_tools.scripts.plot_eped:main" +mitim_plot_maestro = "mitim_modules.maestro.scripts.plot_maestro:main" # --beats 2 (for the last two beats) --only transp mitim_plot_opt = "mitim_tools.opt_tools.scripts.read:main" # Not transferred: --type 4 --resolution 20 mitim_plot_portals = "mitim_modules.portals.scripts.read_portals:main" + +mitim_run_portals = "mitim_modules.portals.scripts.run_portals:main" # folder --input input.gacode --namelist namelist.portals.yaml --cold_start +mitim_run_maestro = "mitim_modules.maestro.scripts.run_maestro:main" # add file argument +mitim_run_tglf = "mitim_tools.gacode_tools.scripts.run_tglf:main" # (folder input.tglf) [--gacode input.gacode] [--scan RLTS_2] [--drives True] +mitim_run_transp = "mitim_tools.transp_tools.scripts.run_transp:main" # To run TRANSP (in folder with required files): mitim_run_transp 88664 P01 CMOD --version tshare --trmpi 32 --toricmpi 32 --ptrmpi 32 mitim_slurm = "mitim_tools.opt_tools.scripts.slurm:main" +mitim_compare_nml = "mitim_tools.misc_tools.scripts.compare_namelist:main" +mitim_scp = "mitim_tools.misc_tools.scripts.retrieve_files:main" # e.g. mitim_scp mfews15 --files /home/pablorf/file1 --folders /home/pablorf/folder1 +mitim_check_maestro = "mitim_modules.maestro.scripts.check_maestro:main" # TRANSP mitim_trcheck = "mitim_tools.transp_tools.scripts.run_check:main" # e.g. mitim_trcheck pablorf @@ -91,20 +94,6 @@ mitim_trcheck_p = "mitim_tools.transp_tools.scripts.run_check_periodic:main" # e mitim_trclean = "mitim_tools.transp_tools.scripts.run_clean:main" # e.g. mitim_trclean 88664P CMOD --numbers 1,2,3 mitim_trlook = "mitim_tools.transp_tools.scripts.run_look:main" # e.g. mitim_trlook 152895P01 CMOD --nofull --plot --remove -# MAESTRO -mitim_plot_maestro = "mitim_modules.maestro.scripts.plot_maestro:main" # --beats 2 (for the last two beats) --only transp -mitim_run_maestro = "mitim_modules.maestro.scripts.run_maestro:main" # add file argument - -# To run TRANSP (in folder with required files): mitim_run_transp 88664 P01 CMOD --version tshare --trmpi 32 --toricmpi 32 --ptrmpi 32 -mitim_run_transp = "mitim_tools.transp_tools.scripts.run_transp:main" - -# Others -mitim_compare_nml = "mitim_tools.misc_tools.scripts.compare_namelist:main" -mitim_scp = "mitim_tools.misc_tools.scripts.retrieve_files:main" # e.g. mitim_scp mfews15 --files /home/pablorf/file1 --folders /home/pablorf/folder1 -#eff_job="mitim_tools.misc_tools.PARALLELtools.py $1" # Give mitim.out or slurm_output.dat -mitim_check_maestro = "mitim_modules.maestro.scripts.check_maestro:main" - - [tool.pytest.ini_options] markers = [ ] diff --git a/regressions/data/input.gacode b/regressions/data/input.gacode deleted file mode 100644 index 403d660b..00000000 --- a/regressions/data/input.gacode +++ /dev/null @@ -1,1755 +0,0 @@ -# *original : Thu May 27 12:10:51 EDT 2021 -# *statefile : 10001.cdf -# *gfile : 10001.geq 27May2021 t~ 2.50000 -# *cerfile : null -# *vgen : null -# *tgyro : null -# -# nexp -41 -# nion -4 -# shot -12345 -# name -D C N D -# type -[therm] [therm] [therm] [fast] -# masse - 5.4488741E-04 -# mass - 2.0005209E+00 1.1930011E+01 1.3912254E+01 2.0005209E+00 -# ze --1.0000000E+00 -# z - 1.0000000E+00 6.0000000E+00 7.0000000E+00 1.0000000E+00 -# torfluxa | Wb/radian --4.9977993E-01 -# rcentr | m - 1.6118747E+00 -# bcentr | T --2.4105557E+00 -# current | MA --9.9696344E-01 -# rho | - - 1 0.0000000E+00 - 2 2.5000000E-02 - 3 5.0000000E-02 - 4 7.5000000E-02 - 5 1.0000000E-01 - 6 1.2500000E-01 - 7 1.5000000E-01 - 8 1.7500000E-01 - 9 2.0000000E-01 - 10 2.2500000E-01 - 11 2.5000000E-01 - 12 2.7500000E-01 - 13 3.0000000E-01 - 14 3.2500000E-01 - 15 3.5000000E-01 - 16 3.7500000E-01 - 17 4.0000000E-01 - 18 4.2500000E-01 - 19 4.5000000E-01 - 20 4.7500000E-01 - 21 5.0000000E-01 - 22 5.2500000E-01 - 23 5.5000000E-01 - 24 5.7500000E-01 - 25 6.0000000E-01 - 26 6.2500000E-01 - 27 6.5000000E-01 - 28 6.7500000E-01 - 29 7.0000000E-01 - 30 7.2500000E-01 - 31 7.5000000E-01 - 32 7.7500000E-01 - 33 8.0000000E-01 - 34 8.2500000E-01 - 35 8.5000000E-01 - 36 8.7500000E-01 - 37 9.0000000E-01 - 38 9.2500000E-01 - 39 9.5000000E-01 - 40 9.7500000E-01 - 41 1.0000000E+00 -# rmin | m - 1 0.0000000E+00 - 2 1.4227510E-02 - 3 2.8466934E-02 - 4 4.2718272E-02 - 5 5.6983341E-02 - 6 7.1235625E-02 - 7 8.5486982E-02 - 8 9.9745301E-02 - 9 1.1401303E-01 - 10 1.2828767E-01 - 11 1.4256987E-01 - 12 1.5685875E-01 - 13 1.7115613E-01 - 14 1.8546452E-01 - 15 1.9978907E-01 - 16 2.1413214E-01 - 17 2.2848774E-01 - 18 2.4284758E-01 - 19 2.5717641E-01 - 20 2.7141022E-01 - 21 2.8548256E-01 - 22 2.9936469E-01 - 23 3.1305558E-01 - 24 3.2655537E-01 - 25 3.3986493E-01 - 26 3.5297568E-01 - 27 3.6588018E-01 - 28 3.7857274E-01 - 29 3.9104437E-01 - 30 4.0328613E-01 - 31 4.1529208E-01 - 32 4.2704488E-01 - 33 4.3854427E-01 - 34 4.4977082E-01 - 35 4.6071034E-01 - 36 4.7134821E-01 - 37 4.8165281E-01 - 38 4.9157405E-01 - 39 5.0099974E-01 - 40 5.0972631E-01 - 41 5.1754440E-01 -# polflux | Wb/radian - 1 0.0000000E+00 - 2 -4.3432751E-04 - 3 -1.2679152E-03 - 4 -2.8260758E-03 - 5 -5.0212465E-03 - 6 -7.8402786E-03 - 7 -1.1286703E-02 - 8 -1.5359906E-02 - 9 -2.0057464E-02 - 10 -2.5379258E-02 - 11 -3.1326011E-02 - 12 -3.7898070E-02 - 13 -4.5096351E-02 - 14 -5.2922003E-02 - 15 -6.1375871E-02 - 16 -7.0456348E-02 - 17 -8.0154148E-02 - 18 -9.0432210E-02 - 19 -1.0116711E-01 - 20 -1.1211269E-01 - 21 -1.2302423E-01 - 22 -1.3379089E-01 - 23 -1.4438682E-01 - 24 -1.5479785E-01 - 25 -1.6500794E-01 - 26 -1.7500069E-01 - 27 -1.8476092E-01 - 28 -1.9427599E-01 - 29 -2.0353478E-01 - 30 -2.1252855E-01 - 31 -2.2125020E-01 - 32 -2.2969405E-01 - 33 -2.3785549E-01 - 34 -2.4573046E-01 - 35 -2.5331441E-01 - 36 -2.6060073E-01 - 37 -2.6757807E-01 - 38 -2.7421986E-01 - 39 -2.8046577E-01 - 40 -2.8620030E-01 - 41 -2.9125962E-01 -# q | - - 1 9.9589697E-01 - 2 9.9481734E-01 - 3 9.9365998E-01 - 4 9.9472206E-01 - 5 9.9748836E-01 - 6 9.9714908E-01 - 7 9.9685286E-01 - 8 9.9711845E-01 - 9 9.9766262E-01 - 10 9.9792996E-01 - 11 9.9807024E-01 - 12 9.9811617E-01 - 13 9.9798908E-01 - 14 9.9775859E-01 - 15 9.9752202E-01 - 16 9.9776944E-01 - 17 9.9968513E-01 - 18 1.0077790E+00 - 19 1.0327685E+00 - 20 1.0830403E+00 - 21 1.1518867E+00 - 22 1.2279928E+00 - 23 1.3081780E+00 - 24 1.3932073E+00 - 25 1.4838685E+00 - 26 1.5809555E+00 - 27 1.6849893E+00 - 28 1.7965734E+00 - 29 1.9163970E+00 - 30 2.0450639E+00 - 31 2.1834084E+00 - 32 2.3323207E+00 - 33 2.4929699E+00 - 34 2.6668344E+00 - 35 2.8561567E+00 - 36 3.0647015E+00 - 37 3.2991758E+00 - 38 3.5793870E+00 - 39 3.9449559E+00 - 40 4.4949697E+00 - 41 5.3049685E+00 -# w0 | rad/s - 1 3.7830865E+02 - 2 -9.2485499E+03 - 3 -1.7523088E+04 - 4 -1.5272594E+04 - 5 -1.4187521E+04 - 6 -1.3166442E+04 - 7 -1.2258841E+04 - 8 -1.1556091E+04 - 9 -1.1091054E+04 - 10 -1.0836130E+04 - 11 -1.0721431E+04 - 12 -1.0669172E+04 - 13 -1.0614913E+04 - 14 -1.0522472E+04 - 15 -1.0388046E+04 - 16 -1.0230429E+04 - 17 -1.0077721E+04 - 18 -9.9541613E+03 - 19 -9.8511888E+03 - 20 -9.7436746E+03 - 21 -9.6509289E+03 - 22 -9.5955715E+03 - 23 -9.5478774E+03 - 24 -9.4724843E+03 - 25 -9.3538176E+03 - 26 -9.1826275E+03 - 27 -8.9593418E+03 - 28 -8.6942391E+03 - 29 -8.4033600E+03 - 30 -8.1007805E+03 - 31 -7.7827320E+03 - 32 -7.4123408E+03 - 33 -6.9043116E+03 - 34 -6.1647569E+03 - 35 -5.1821014E+03 - 36 -4.0701056E+03 - 37 -3.0589547E+03 - 38 -2.3900318E+03 - 39 -2.1738929E+03 - 40 -2.3759322E+03 - 41 -2.7268136E+03 -# rmaj | m - 1 1.6484918E+00 - 2 1.6484700E+00 - 3 1.6484046E+00 - 4 1.6482956E+00 - 5 1.6481532E+00 - 6 1.6479721E+00 - 7 1.6477622E+00 - 8 1.6475201E+00 - 9 1.6472411E+00 - 10 1.6469243E+00 - 11 1.6465718E+00 - 12 1.6461826E+00 - 13 1.6457619E+00 - 14 1.6453072E+00 - 15 1.6448203E+00 - 16 1.6442986E+00 - 17 1.6437408E+00 - 18 1.6431463E+00 - 19 1.6425094E+00 - 20 1.6418103E+00 - 21 1.6410369E+00 - 22 1.6401749E+00 - 23 1.6392320E+00 - 24 1.6382086E+00 - 25 1.6371069E+00 - 26 1.6359279E+00 - 27 1.6346751E+00 - 28 1.6333557E+00 - 29 1.6319658E+00 - 30 1.6305141E+00 - 31 1.6289993E+00 - 32 1.6274306E+00 - 33 1.6258139E+00 - 34 1.6241542E+00 - 35 1.6224592E+00 - 36 1.6207333E+00 - 37 1.6189829E+00 - 38 1.6172315E+00 - 39 1.6154874E+00 - 40 1.6138009E+00 - 41 1.6122503E+00 -# zmag | m - 1 2.9382376E-02 - 2 2.9349916E-02 - 3 2.9317448E-02 - 4 2.9284970E-02 - 5 2.9237432E-02 - 6 2.9188873E-02 - 7 2.9125817E-02 - 8 2.9049672E-02 - 9 2.8958796E-02 - 10 2.8857254E-02 - 11 2.8743918E-02 - 12 2.8625477E-02 - 13 2.8494013E-02 - 14 2.8347401E-02 - 15 2.8187795E-02 - 16 2.8015947E-02 - 17 2.7840354E-02 - 18 2.7652074E-02 - 19 2.7433578E-02 - 20 2.7191291E-02 - 21 2.6895180E-02 - 22 2.6544653E-02 - 23 2.6122137E-02 - 24 2.5624563E-02 - 25 2.5033911E-02 - 26 2.4339530E-02 - 27 2.3522709E-02 - 28 2.2558018E-02 - 29 2.1424482E-02 - 30 2.0072877E-02 - 31 1.8478535E-02 - 32 1.6574497E-02 - 33 1.4296897E-02 - 34 1.1553924E-02 - 35 8.2313557E-03 - 36 4.1473295E-03 - 37 -9.4334423E-04 - 38 -7.5108330E-03 - 39 -1.6575151E-02 - 40 -3.0846409E-02 - 41 -5.5729841E-02 -# kappa | - - 1 1.2612226E+00 - 2 1.2603742E+00 - 3 1.2595256E+00 - 4 1.2586768E+00 - 5 1.2578944E+00 - 6 1.2579577E+00 - 7 1.2581564E+00 - 8 1.2582556E+00 - 9 1.2582614E+00 - 10 1.2582558E+00 - 11 1.2582070E+00 - 12 1.2581921E+00 - 13 1.2581933E+00 - 14 1.2581697E+00 - 15 1.2580387E+00 - 16 1.2578513E+00 - 17 1.2577270E+00 - 18 1.2577609E+00 - 19 1.2581936E+00 - 20 1.2594605E+00 - 21 1.2619693E+00 - 22 1.2658061E+00 - 23 1.2708189E+00 - 24 1.2768283E+00 - 25 1.2837512E+00 - 26 1.2915678E+00 - 27 1.3002511E+00 - 28 1.3098026E+00 - 29 1.3202648E+00 - 30 1.3316935E+00 - 31 1.3441186E+00 - 32 1.3576626E+00 - 33 1.3723851E+00 - 34 1.3884792E+00 - 35 1.4061057E+00 - 36 1.4255460E+00 - 37 1.4472327E+00 - 38 1.4720548E+00 - 39 1.5019639E+00 - 40 1.5418564E+00 - 41 1.6012201E+00 -# delta | - - 1 0.0000000E+00 - 2 2.1626237E-03 - 3 3.1039289E-03 - 4 2.8239216E-03 - 5 2.9387609E-03 - 6 3.3220484E-03 - 7 4.1765986E-03 - 8 5.1738285E-03 - 9 6.0758512E-03 - 10 6.8338356E-03 - 11 7.6027395E-03 - 12 8.2974865E-03 - 13 9.0584667E-03 - 14 9.7798816E-03 - 15 1.0524117E-02 - 16 1.1238660E-02 - 17 1.1945644E-02 - 18 1.2663210E-02 - 19 1.3421702E-02 - 20 1.4339771E-02 - 21 1.5584558E-02 - 22 1.7127728E-02 - 23 1.8929971E-02 - 24 2.0924548E-02 - 25 2.3197903E-02 - 26 2.5698506E-02 - 27 2.8476364E-02 - 28 3.1496444E-02 - 29 3.4844808E-02 - 30 3.8475434E-02 - 31 4.2409845E-02 - 32 4.6737469E-02 - 33 5.1387655E-02 - 34 5.6547826E-02 - 35 6.2108405E-02 - 36 6.8257691E-02 - 37 7.5032445E-02 - 38 8.2851371E-02 - 39 9.2288564E-02 - 40 1.0503056E-01 - 41 1.2352707E-01 -# zeta | - - 1 0.0000000E+00 - 2 7.6569916E-05 - 3 4.6449675E-05 - 4 -9.0360723E-05 - 5 5.4174777E-06 - 6 1.8536230E-05 - 7 -3.5086564E-05 - 8 -1.1570194E-04 - 9 -2.2138163E-04 - 10 -3.2814832E-04 - 11 -3.7379754E-04 - 12 -4.3425220E-04 - 13 -5.0977657E-04 - 14 -5.7290889E-04 - 15 -5.6807933E-04 - 16 -5.8939623E-04 - 17 -7.0124722E-04 - 18 -9.6730590E-04 - 19 -1.3502980E-03 - 20 -1.8396865E-03 - 21 -2.4607337E-03 - 22 -3.2861485E-03 - 23 -4.3271366E-03 - 24 -5.5161425E-03 - 25 -6.8910886E-03 - 26 -8.4632225E-03 - 27 -1.0219299E-02 - 28 -1.2188321E-02 - 29 -1.4414920E-02 - 30 -1.6955009E-02 - 31 -1.9834704E-02 - 32 -2.3094533E-02 - 33 -2.6828103E-02 - 34 -3.1125957E-02 - 35 -3.6049755E-02 - 36 -4.1788933E-02 - 37 -4.8545104E-02 - 38 -5.6830254E-02 - 39 -6.7735871E-02 - 40 -8.4186411E-02 - 41 -1.1142245E-01 -# shape_cos0 | - - 1 -5.8662572E-02 - 2 -5.8535242E-02 - 3 -5.8152796E-02 - 4 -5.7515236E-02 - 5 -5.7218122E-02 - 6 -5.7296212E-02 - 7 -5.7392179E-02 - 8 -5.7448972E-02 - 9 -5.7459553E-02 - 10 -5.7456732E-02 - 11 -5.7488482E-02 - 12 -5.7486927E-02 - 13 -5.7473575E-02 - 14 -5.7455085E-02 - 15 -5.7459913E-02 - 16 -5.7474107E-02 - 17 -5.7503650E-02 - 18 -5.7545897E-02 - 19 -5.7646890E-02 - 20 -5.7913652E-02 - 21 -5.8430580E-02 - 22 -5.9201593E-02 - 23 -6.0213227E-02 - 24 -6.1419493E-02 - 25 -6.2815503E-02 - 26 -6.4360743E-02 - 27 -6.6072019E-02 - 28 -6.7939722E-02 - 29 -6.9969876E-02 - 30 -7.2149653E-02 - 31 -7.4530316E-02 - 32 -7.7060199E-02 - 33 -7.9800591E-02 - 34 -8.2768420E-02 - 35 -8.5998217E-02 - 36 -8.9515669E-02 - 37 -9.3438627E-02 - 38 -9.7921957E-02 - 39 -1.0337191E-01 - 40 -1.1084729E-01 - 41 -1.2209109E-01 -# shape_cos1 | - - 1 0.0000000E+00 - 2 -4.4461858E-04 - 3 -1.0439010E-03 - 4 -1.7978473E-03 - 5 -2.6835803E-03 - 6 -3.2842355E-03 - 7 -3.9782791E-03 - 8 -4.6949673E-03 - 9 -5.4175395E-03 - 10 -6.0957064E-03 - 11 -6.7919373E-03 - 12 -7.4900463E-03 - 13 -8.2024793E-03 - 14 -8.9551743E-03 - 15 -9.7004222E-03 - 16 -1.0439161E-02 - 17 -1.1104747E-02 - 18 -1.1787320E-02 - 19 -1.2593190E-02 - 20 -1.3407079E-02 - 21 -1.4381027E-02 - 22 -1.5474662E-02 - 23 -1.6731514E-02 - 24 -1.8117909E-02 - 25 -1.9710195E-02 - 26 -2.1479009E-02 - 27 -2.3437316E-02 - 28 -2.5669795E-02 - 29 -2.8156234E-02 - 30 -3.1013693E-02 - 31 -3.4238694E-02 - 32 -3.7958174E-02 - 33 -4.2241522E-02 - 34 -4.7232231E-02 - 35 -5.3129964E-02 - 36 -6.0200454E-02 - 37 -6.8845062E-02 - 38 -7.9866821E-02 - 39 -9.5045048E-02 - 40 -1.1945163E-01 - 41 -1.6228171E-01 -# shape_cos2 | - - 1 0.0000000E+00 - 2 7.2192197E-05 - 3 7.0735956E-05 - 4 -4.3687241E-06 - 5 -1.0603327E-05 - 6 -4.3275232E-05 - 7 -1.1331768E-04 - 8 -1.5787045E-04 - 9 -1.9175522E-04 - 10 -1.6995380E-04 - 11 -7.0388918E-05 - 12 -1.9137174E-05 - 13 -1.1431546E-05 - 14 -4.2916010E-05 - 15 -8.6273841E-05 - 16 -1.1050112E-04 - 17 -1.2784355E-04 - 18 -7.9642203E-05 - 19 2.7556730E-05 - 20 1.7888252E-04 - 21 4.1168349E-04 - 22 7.2365237E-04 - 23 1.1271256E-03 - 24 1.6286857E-03 - 25 2.1418798E-03 - 26 2.7348310E-03 - 27 3.4120907E-03 - 28 4.1744165E-03 - 29 4.9820726E-03 - 30 5.9271521E-03 - 31 7.0343530E-03 - 32 8.2031070E-03 - 33 9.5264887E-03 - 34 1.1066750E-02 - 35 1.2819949E-02 - 36 1.4737644E-02 - 37 1.7056796E-02 - 38 1.9822086E-02 - 39 2.3467575E-02 - 40 2.8969429E-02 - 41 3.7521910E-02 -# shape_cos3 | - - 1 0.0000000E+00 - 2 -4.7478642E-04 - 3 -4.9121753E-04 - 4 -4.9293312E-05 - 5 6.9868404E-05 - 6 -1.5822842E-05 - 7 -4.5896679E-05 - 8 -5.1235964E-05 - 9 7.3496443E-06 - 10 8.4323699E-05 - 11 1.3785293E-04 - 12 5.8852964E-05 - 13 7.8393094E-06 - 14 -8.6178444E-06 - 15 5.2931422E-06 - 16 3.6944331E-05 - 17 2.3403505E-05 - 18 8.5620459E-06 - 19 3.5628952E-05 - 20 6.6051501E-05 - 21 1.3715763E-04 - 22 2.0531172E-04 - 23 3.1131649E-04 - 24 4.4009304E-04 - 25 5.8884437E-04 - 26 7.6711840E-04 - 27 9.9775281E-04 - 28 1.2587352E-03 - 29 1.5653083E-03 - 30 1.9722043E-03 - 31 2.4324423E-03 - 32 3.0082622E-03 - 33 3.7138507E-03 - 34 4.5857885E-03 - 35 5.6128249E-03 - 36 6.9186619E-03 - 37 8.5270717E-03 - 38 1.0611317E-02 - 39 1.3681252E-02 - 40 1.9260023E-02 - 41 2.9017335E-02 -# shape_cos4 | - - 1 0.0000000E+00 - 2 7.5724635E-05 - 3 8.5411565E-05 - 4 2.9060789E-05 - 5 3.5895487E-05 - 6 -1.8583796E-05 - 7 3.4613923E-05 - 8 7.2591938E-06 - 9 -4.1839144E-06 - 10 1.1194344E-05 - 11 -4.7610705E-05 - 12 -3.9936940E-05 - 13 -9.3037918E-06 - 14 2.5337836E-05 - 15 1.0421580E-07 - 16 -5.6850852E-05 - 17 -1.3691172E-04 - 18 -1.9777334E-04 - 19 -2.3197307E-04 - 20 -2.6133256E-04 - 21 -2.8913506E-04 - 22 -3.1163676E-04 - 23 -3.7554703E-04 - 24 -4.5578823E-04 - 25 -5.8549872E-04 - 26 -6.9946134E-04 - 27 -8.4293226E-04 - 28 -1.0092315E-03 - 29 -1.2043021E-03 - 30 -1.3954823E-03 - 31 -1.6751700E-03 - 32 -1.9080125E-03 - 33 -2.1953776E-03 - 34 -2.5215730E-03 - 35 -2.8875676E-03 - 36 -3.2418906E-03 - 37 -3.6558225E-03 - 38 -4.0924187E-03 - 39 -4.5898400E-03 - 40 -5.4053571E-03 - 41 -5.9925165E-03 -# shape_cos5 | - - 1 0.0000000E+00 - 2 -9.4718188E-06 - 3 -2.8146665E-05 - 4 -5.6024538E-05 - 5 -5.6537558E-05 - 6 1.3122965E-05 - 7 9.0082583E-06 - 8 2.2700256E-06 - 9 1.8172401E-06 - 10 1.8091308E-05 - 11 1.1142437E-05 - 12 1.4416728E-05 - 13 5.2923660E-06 - 14 -4.3541310E-05 - 15 -8.5220517E-05 - 16 -1.1596211E-04 - 17 -6.1875568E-05 - 18 -2.3261463E-05 - 19 -6.0052917E-05 - 20 -4.3640286E-05 - 21 -5.1549240E-05 - 22 -3.5028867E-05 - 23 -3.6163444E-05 - 24 -1.5993548E-05 - 25 -3.2908926E-05 - 26 -4.2214539E-05 - 27 -3.5891594E-05 - 28 -6.5653821E-05 - 29 -6.8239325E-05 - 30 -1.1468567E-04 - 31 -1.2996660E-04 - 32 -1.5916760E-04 - 33 -1.7693671E-04 - 34 -1.7600763E-04 - 35 -1.6731070E-04 - 36 -1.4103138E-04 - 37 -7.9456973E-05 - 38 -7.7212748E-07 - 39 1.0407734E-04 - 40 -9.9575790E-05 - 41 2.4230842E-06 -# shape_sin3 | - - 1 0.0000000E+00 - 2 1.5497501E-04 - 3 1.4466918E-04 - 4 -3.0917479E-05 - 5 2.5706487E-05 - 6 -2.0484797E-05 - 7 -1.6707030E-05 - 8 -2.3101044E-05 - 9 -7.1543558E-06 - 10 1.9399798E-05 - 11 1.5679049E-05 - 12 2.6255181E-06 - 13 -3.9962087E-05 - 14 -4.9897783E-05 - 15 -7.5144997E-05 - 16 -1.1107971E-04 - 17 -1.7225144E-04 - 18 -2.2953545E-04 - 19 -2.3972675E-04 - 20 -2.7833211E-04 - 21 -3.6702691E-04 - 22 -5.7035120E-04 - 23 -7.7020329E-04 - 24 -9.4802967E-04 - 25 -1.2156259E-03 - 26 -1.5121068E-03 - 27 -1.8492729E-03 - 28 -2.1646693E-03 - 29 -2.5964527E-03 - 30 -3.0255678E-03 - 31 -3.5189776E-03 - 32 -4.0936669E-03 - 33 -4.6531585E-03 - 34 -5.3610623E-03 - 35 -6.0647479E-03 - 36 -6.9024497E-03 - 37 -7.8137350E-03 - 38 -8.9456143E-03 - 39 -1.0428333E-02 - 40 -1.2894876E-02 - 41 -1.6209363E-02 -# shape_sin4 | - - 1 0.0000000E+00 - 2 4.3715166E-05 - 3 4.2055509E-05 - 4 -4.9789711E-06 - 5 1.6665040E-05 - 6 2.2864134E-07 - 7 6.8712165E-06 - 8 -1.1076064E-05 - 9 1.7543751E-05 - 10 5.3965458E-05 - 11 1.1959236E-04 - 12 8.8072297E-05 - 13 -1.6889151E-05 - 14 -1.2884948E-04 - 15 -1.1948547E-04 - 16 -4.2024970E-05 - 17 -4.5361861E-05 - 18 -1.2934318E-04 - 19 -1.8281903E-04 - 20 -2.0525205E-04 - 21 -2.3692553E-04 - 22 -2.7650921E-04 - 23 -3.6658018E-04 - 24 -4.7181258E-04 - 25 -5.9445226E-04 - 26 -7.4663843E-04 - 27 -9.1499227E-04 - 28 -1.0941730E-03 - 29 -1.3180811E-03 - 30 -1.5933342E-03 - 31 -1.8887696E-03 - 32 -2.2654706E-03 - 33 -2.6660894E-03 - 34 -3.1580414E-03 - 35 -3.7225598E-03 - 36 -4.3711340E-03 - 37 -5.1009725E-03 - 38 -6.0099862E-03 - 39 -7.2654897E-03 - 40 -9.5401085E-03 - 41 -1.3040626E-02 -# shape_sin5 | - - 1 0.0000000E+00 - 2 1.1874480E-04 - 3 1.0552789E-04 - 4 -3.9650720E-05 - 5 2.6051125E-05 - 6 -2.0642626E-06 - 7 1.0407386E-05 - 8 -2.3711840E-06 - 9 4.6353497E-06 - 10 6.8462924E-07 - 11 3.0937255E-05 - 12 -2.4715696E-05 - 13 -5.1202293E-06 - 14 -1.7059166E-05 - 15 2.4552106E-06 - 16 5.7589577E-07 - 17 -4.1281977E-06 - 18 -1.3802499E-05 - 19 -5.4375305E-05 - 20 -8.7940563E-05 - 21 -5.6754140E-05 - 22 -1.8097531E-05 - 23 1.0520767E-05 - 24 -1.2390009E-05 - 25 -8.1574545E-06 - 26 -2.5886422E-05 - 27 -2.4119318E-05 - 28 -6.1954614E-05 - 29 -6.4755355E-05 - 30 -9.9013735E-05 - 31 -1.7242680E-04 - 32 -2.2414220E-04 - 33 -3.5749457E-04 - 34 -4.1976306E-04 - 35 -5.8923559E-04 - 36 -7.6364100E-04 - 37 -1.0502220E-03 - 38 -1.3569811E-03 - 39 -1.7230707E-03 - 40 -2.0217193E-03 - 41 -2.7780079E-03 -# ne | 10^19/m^3 - 1 2.4293253E+00 - 2 2.4286951E+00 - 3 2.4276761E+00 - 4 2.4257952E+00 - 5 2.4228931E+00 - 6 2.4188349E+00 - 7 2.4135116E+00 - 8 2.4068280E+00 - 9 2.3986810E+00 - 10 2.3889806E+00 - 11 2.3776592E+00 - 12 2.3646434E+00 - 13 2.3498588E+00 - 14 2.3332491E+00 - 15 2.3148026E+00 - 16 2.2945379E+00 - 17 2.2723876E+00 - 18 2.2481282E+00 - 19 2.2215476E+00 - 20 2.1925802E+00 - 21 2.1612886E+00 - 22 2.1278651E+00 - 23 2.0926416E+00 - 24 2.0560173E+00 - 25 2.0183876E+00 - 26 1.9801511E+00 - 27 1.9416858E+00 - 28 1.9033119E+00 - 29 1.8652815E+00 - 30 1.8277941E+00 - 31 1.7910084E+00 - 32 1.7550357E+00 - 33 1.7199429E+00 - 34 1.6857772E+00 - 35 1.6522772E+00 - 36 1.6185600E+00 - 37 1.5836065E+00 - 38 1.5468093E+00 - 39 1.5036345E+00 - 40 1.3755444E+00 - 41 1.1679028E+00 -# ni | 10^19/m^3 - 1 2.0278446E+00 6.5420474E-02 5.6074748E-08 8.7732740E-03 - 2 2.0276837E+00 6.5403494E-02 5.6060194E-08 8.4066287E-03 - 3 2.0271004E+00 6.5376054E-02 5.6036674E-08 8.2315887E-03 - 4 2.0255932E+00 6.5325401E-02 5.5993257E-08 8.2662531E-03 - 5 2.0231756E+00 6.5247246E-02 5.5926267E-08 8.2002150E-03 - 6 2.0198691E+00 6.5137969E-02 5.5832600E-08 8.0746971E-03 - 7 2.0154750E+00 6.4994609E-02 5.5709720E-08 8.0621822E-03 - 8 2.0098094E+00 6.4814627E-02 5.5555450E-08 8.1987158E-03 - 9 2.0028107E+00 6.4595229E-02 5.5367395E-08 8.3981715E-03 - 10 1.9944827E+00 6.4333996E-02 5.5143480E-08 8.5887448E-03 - 11 1.9847597E+00 6.4029133E-02 5.4882169E-08 8.8606636E-03 - 12 1.9734829E+00 6.3678610E-02 5.4581721E-08 9.3158210E-03 - 13 1.9605517E+00 6.3280456E-02 5.4240446E-08 9.9311182E-03 - 14 1.9459545E+00 6.2833185E-02 5.3857069E-08 1.0659186E-02 - 15 1.9296997E+00 6.2336438E-02 5.3431286E-08 1.1509500E-02 - 16 1.9118043E+00 6.1790703E-02 5.2963513E-08 1.2468119E-02 - 17 1.8924705E+00 6.1194204E-02 5.2452228E-08 1.3034327E-02 - 18 1.8723427E+00 6.0540925E-02 5.1892273E-08 1.2045315E-02 - 19 1.8518956E+00 5.9825117E-02 5.1278723E-08 9.3563098E-03 - 20 1.8303081E+00 5.9045037E-02 5.0610082E-08 6.6460702E-03 - 21 1.8061967E+00 5.8202374E-02 4.9887799E-08 5.1082187E-03 - 22 1.7792879E+00 5.7302296E-02 4.9116302E-08 4.4176786E-03 - 23 1.7502952E+00 5.6353742E-02 4.8303255E-08 4.0291828E-03 - 24 1.7199287E+00 5.5367474E-02 4.7457882E-08 3.7381111E-03 - 25 1.6886543E+00 5.4354121E-02 4.6589293E-08 3.4782367E-03 - 26 1.6568529E+00 5.3324432E-02 4.5706701E-08 3.2238446E-03 - 27 1.6248523E+00 5.2288583E-02 4.4818831E-08 2.9794729E-03 - 28 1.5929106E+00 5.1255194E-02 4.3933067E-08 2.7603782E-03 - 29 1.5612327E+00 5.0231045E-02 4.3055225E-08 2.5634039E-03 - 30 1.5300013E+00 4.9221532E-02 4.2189927E-08 2.3624729E-03 - 31 1.4993742E+00 4.8230922E-02 4.1340831E-08 2.1341914E-03 - 32 1.4694559E+00 4.7262198E-02 4.0510496E-08 1.8784234E-03 - 33 1.4402936E+00 4.6317160E-02 3.9700462E-08 1.6129714E-03 - 34 1.4119103E+00 4.5397095E-02 3.8911834E-08 1.3542979E-03 - 35 1.3840749E+00 4.4494958E-02 3.8138574E-08 1.1100722E-03 - 36 1.3560432E+00 4.3586968E-02 3.7360296E-08 8.7914272E-04 - 37 1.3269623E+00 4.2645689E-02 3.6553484E-08 6.5980662E-04 - 38 1.2963197E+00 4.1654756E-02 3.5704113E-08 4.6092023E-04 - 39 1.2602991E+00 4.0492084E-02 3.4707536E-08 3.0363867E-04 - 40 1.1530288E+00 3.7042699E-02 3.1750917E-08 2.1476978E-04 - 41 9.7902665E-01 3.1451034E-02 2.6958056E-08 2.1476978E-04 -# te | keV - 1 3.3836121E+00 - 2 3.3667085E+00 - 3 3.3413738E+00 - 4 3.2995637E+00 - 5 3.2423742E+00 - 6 3.1714978E+00 - 7 3.0890267E+00 - 8 2.9971364E+00 - 9 2.8978224E+00 - 10 2.7929280E+00 - 11 2.6841704E+00 - 12 2.5730409E+00 - 13 2.4608589E+00 - 14 2.3488297E+00 - 15 2.2380601E+00 - 16 2.1295268E+00 - 17 2.0233270E+00 - 18 1.9181176E+00 - 19 1.8123573E+00 - 20 1.7053801E+00 - 21 1.5973095E+00 - 22 1.4889246E+00 - 23 1.3814095E+00 - 24 1.2760906E+00 - 25 1.1743032E+00 - 26 1.0771959E+00 - 27 9.8554524E-01 - 28 8.9985040E-01 - 29 8.2038232E-01 - 30 7.4718859E-01 - 31 6.8013323E-01 - 32 6.1891065E-01 - 33 5.6314616E-01 - 34 5.1242447E-01 - 35 4.6339510E-01 - 36 4.0792724E-01 - 37 3.4053767E-01 - 38 2.6438829E-01 - 39 1.8861037E-01 - 40 1.2534898E-01 - 41 7.1676107E-02 -# ti | keV - 1 1.0530279E+00 1.0607021E+00 1.0607021E+00 1.1101037E+01 - 2 1.0471973E+00 1.0548581E+00 1.0548581E+00 1.1060268E+01 - 3 1.0394862E+00 1.0470875E+00 1.0470875E+00 1.1907518E+01 - 4 1.0288854E+00 1.0363941E+00 1.0363941E+00 1.3444714E+01 - 5 1.0167961E+00 1.0241806E+00 1.0241806E+00 1.4464764E+01 - 6 1.0038177E+00 1.0110295E+00 1.0110295E+00 1.4810818E+01 - 7 9.8986159E-01 9.9688353E-01 9.9688353E-01 1.5153939E+01 - 8 9.7457164E-01 9.8137593E-01 9.8137593E-01 1.5289471E+01 - 9 9.5764313E-01 9.6421567E-01 9.6421567E-01 1.4719245E+01 - 10 9.3906547E-01 9.4542323E-01 9.4542323E-01 1.3808869E+01 - 11 9.1923425E-01 9.2540152E-01 9.2540152E-01 1.3080950E+01 - 12 8.9872573E-01 9.0471349E-01 9.0471349E-01 1.2859517E+01 - 13 8.7795024E-01 8.8376482E-01 8.8376482E-01 1.3261659E+01 - 14 8.5690003E-01 8.6254597E-01 8.6254597E-01 1.3699394E+01 - 15 8.3502370E-01 8.4049864E-01 8.4049864E-01 1.3912696E+01 - 16 8.1139099E-01 8.1668569E-01 8.1668569E-01 1.4461817E+01 - 17 7.8510735E-01 7.9020049E-01 7.9020049E-01 1.5501012E+01 - 18 7.5578493E-01 7.6063024E-01 7.6063024E-01 1.6201223E+01 - 19 7.2384515E-01 7.2837158E-01 7.2837158E-01 1.5661225E+01 - 20 6.9042566E-01 6.9458945E-01 6.9458945E-01 1.3969171E+01 - 21 6.5702436E-01 6.6085572E-01 6.6085572E-01 1.2130061E+01 - 22 6.2505975E-01 6.2862097E-01 6.2862097E-01 1.0939708E+01 - 23 5.9538211E-01 5.9871280E-01 5.9871280E-01 1.0298946E+01 - 24 5.6808217E-01 5.7120076E-01 5.7120076E-01 9.9763451E+00 - 25 5.4261165E-01 5.4553127E-01 5.4553127E-01 9.8521340E+00 - 26 5.1807063E-01 5.2079525E-01 5.2079525E-01 9.6835060E+00 - 27 4.9365491E-01 4.9618140E-01 4.9618140E-01 1.4684507E+01 - 28 4.6901488E-01 4.7134447E-01 4.7134447E-01 8.1738697E+00 - 29 4.4439533E-01 4.4653414E-01 4.4653414E-01 7.9667000E+00 - 30 4.2033147E-01 4.2228503E-01 4.2228503E-01 6.0111928E+00 - 31 3.9675481E-01 3.9852741E-01 3.9852741E-01 5.9075046E+00 - 32 3.7207266E-01 3.7367044E-01 3.7367044E-01 5.7961970E+00 - 33 3.4363739E-01 3.4507277E-01 3.4507277E-01 5.6710201E+00 - 34 3.0937458E-01 3.1066751E-01 3.1066751E-01 5.5306084E+00 - 35 2.6974294E-01 2.7090514E-01 2.7090514E-01 5.3846747E+00 - 36 2.2781668E-01 2.2883461E-01 2.2883461E-01 5.2500185E+00 - 37 1.8696457E-01 1.8781259E-01 1.8781259E-01 5.1416131E+00 - 38 1.4926798E-01 1.4992861E-01 1.4992861E-01 5.0797503E+00 - 39 1.1545574E-01 1.1592265E-01 1.1592265E-01 5.0706745E+00 - 40 8.5627841E-02 8.5922439E-02 8.5922439E-02 5.0892086E+00 - 41 5.7799389E-02 5.7799389E-02 5.7799389E-02 5.0892086E+00 -# ptot | Pa - 1 1.6984435E+04 - 2 1.6921380E+04 - 3 1.6802913E+04 - 4 1.6590480E+04 - 5 1.6309002E+04 - 6 1.5960051E+04 - 7 1.5557551E+04 - 8 1.5115887E+04 - 9 1.4639103E+04 - 10 1.4128929E+04 - 11 1.3592580E+04 - 12 1.3043132E+04 - 13 1.2493270E+04 - 14 1.1947614E+04 - 15 1.1406194E+04 - 16 1.0872117E+04 - 17 1.0337492E+04 - 18 9.7650871E+03 - 19 9.1223969E+03 - 20 8.4354174E+03 - 21 7.7644973E+03 - 22 7.1392322E+03 - 23 6.5549249E+03 - 24 6.0039922E+03 - 25 5.4843084E+03 - 26 4.9956281E+03 - 27 4.5382897E+03 - 28 4.1132578E+03 - 29 3.7215775E+03 - 30 3.3633910E+03 - 31 3.0365410E+03 - 32 2.7348885E+03 - 33 2.4500398E+03 - 34 2.1756497E+03 - 35 1.9042182E+03 - 36 1.6212433E+03 - 37 1.3194779E+03 - 38 1.0118879E+03 - 39 7.2216199E+02 - 40 4.6183686E+02 - 41 2.1372818E+02 -# johm | MA/m^2 - 1 7.1905884E+00 - 2 7.1905884E+00 - 3 3.9773601E+00 - 4 3.3056118E+00 - 5 2.9787690E+00 - 6 2.7980631E+00 - 7 2.7651828E+00 - 8 2.7292832E+00 - 9 2.6896830E+00 - 10 2.6580158E+00 - 11 2.6331764E+00 - 12 2.6156047E+00 - 13 2.6038671E+00 - 14 2.5922532E+00 - 15 2.5751515E+00 - 16 2.5319227E+00 - 17 2.3907021E+00 - 18 1.9749940E+00 - 19 1.3166589E+00 - 20 9.5633239E-01 - 21 8.4210598E-01 - 22 7.6520527E-01 - 23 6.8474791E-01 - 24 6.0713517E-01 - 25 5.3542373E-01 - 26 4.7080751E-01 - 27 4.1350953E-01 - 28 3.6341951E-01 - 29 3.2008348E-01 - 30 2.8293101E-01 - 31 2.5122276E-01 - 32 2.2422882E-01 - 33 2.0128615E-01 - 34 1.8187052E-01 - 35 1.6334421E-01 - 36 1.4193072E-01 - 37 1.1736287E-01 - 38 9.0651664E-02 - 39 6.2838758E-02 - 40 3.6315194E-02 - 41 0.0000000E+00 -# jbs | MA/m^2 - 1 9.7313252E-02 - 2 9.7313252E-02 - 3 6.6863572E-02 - 4 6.3141855E-02 - 5 6.1528603E-02 - 6 6.0929653E-02 - 7 6.0271980E-02 - 8 5.9418384E-02 - 9 5.8250989E-02 - 10 5.6685682E-02 - 11 5.4816889E-02 - 12 5.2723454E-02 - 13 5.0492099E-02 - 14 4.8212133E-02 - 15 4.5963830E-02 - 16 4.3996188E-02 - 17 4.2699463E-02 - 18 4.2297172E-02 - 19 4.2865982E-02 - 20 4.4256392E-02 - 21 4.5573942E-02 - 22 4.6099157E-02 - 23 4.5954726E-02 - 24 4.5328408E-02 - 25 4.4340748E-02 - 26 4.3104757E-02 - 27 4.1650376E-02 - 28 3.9971507E-02 - 29 3.8120231E-02 - 30 3.6292764E-02 - 31 3.4830994E-02 - 32 3.3972872E-02 - 33 3.3666983E-02 - 34 3.4632662E-02 - 35 3.8058929E-02 - 36 4.1950134E-02 - 37 4.1468661E-02 - 38 3.2681635E-02 - 39 1.6909313E-02 - 40 6.6196369E-03 - 41 0.0000000E+00 -# jbstor | MA/m^2 - 1 2.4485066E+00 - 2 2.4485066E+00 - 3 2.4479041E+00 - 4 2.4455887E+00 - 5 2.4316776E+00 - 6 2.4443725E+00 - 7 2.4458027E+00 - 8 2.4423301E+00 - 9 2.4402693E+00 - 10 2.4431179E+00 - 11 2.4452004E+00 - 12 2.4478093E+00 - 13 2.4519972E+00 - 14 2.4560847E+00 - 15 2.4584722E+00 - 16 2.4521170E+00 - 17 2.4185777E+00 - 18 2.2791196E+00 - 19 1.8884881E+00 - 20 1.3398258E+00 - 21 9.8358460E-01 - 22 8.5570169E-01 - 23 7.8085321E-01 - 24 7.0621938E-01 - 25 6.3359853E-01 - 26 5.6417345E-01 - 27 5.0182545E-01 - 28 4.4522103E-01 - 29 3.9533718E-01 - 30 3.5194705E-01 - 31 3.1431180E-01 - 32 2.8218757E-01 - 33 2.5516548E-01 - 34 2.3264400E-01 - 35 2.1360791E-01 - 36 1.9694457E-01 - 37 1.8055293E-01 - 38 1.5360877E-01 - 39 1.1693461E-01 - 40 8.1458002E-02 - 41 -1.8007793E+01 -# z_eff | - - 1 1.8079467E+00 - 2 1.8079463E+00 - 3 1.8079144E+00 - 4 1.8078797E+00 - 5 1.8078965E+00 - 6 1.8079064E+00 - 7 1.8078875E+00 - 8 1.8078625E+00 - 9 1.8078518E+00 - 10 1.8078531E+00 - 11 1.8078391E+00 - 12 1.8078076E+00 - 13 1.8077796E+00 - 14 1.8077593E+00 - 15 1.8077370E+00 - 16 1.8077166E+00 - 17 1.8077847E+00 - 18 1.8080630E+00 - 19 1.8083746E+00 - 20 1.8083850E+00 - 21 1.8081729E+00 - 22 1.8080165E+00 - 23 1.8079603E+00 - 24 1.8079424E+00 - 25 1.8079373E+00 - 26 1.8079372E+00 - 27 1.8079361E+00 - 28 1.8079318E+00 - 29 1.8079279E+00 - 30 1.8079298E+00 - 31 1.8079368E+00 - 32 1.8079442E+00 - 33 1.8079477E+00 - 34 1.8079474E+00 - 35 1.8079450E+00 - 36 1.8079428E+00 - 37 1.8079413E+00 - 38 1.8079373E+00 - 39 1.8079276E+00 - 40 1.8079115E+00 - 41 1.8078547E+00 -# vtor | m/s - 1 0.0000000E+00 -2.7195909E+04 0.0000000E+00 0.0000000E+00 - 2 0.0000000E+00 -2.6806808E+04 0.0000000E+00 0.0000000E+00 - 3 0.0000000E+00 -2.5863262E+04 0.0000000E+00 0.0000000E+00 - 4 0.0000000E+00 -2.4512188E+04 0.0000000E+00 0.0000000E+00 - 5 0.0000000E+00 -2.2979925E+04 0.0000000E+00 0.0000000E+00 - 6 0.0000000E+00 -2.1515148E+04 0.0000000E+00 0.0000000E+00 - 7 0.0000000E+00 -2.0314724E+04 0.0000000E+00 0.0000000E+00 - 8 0.0000000E+00 -1.9489222E+04 0.0000000E+00 0.0000000E+00 - 9 0.0000000E+00 -1.9047102E+04 0.0000000E+00 0.0000000E+00 - 10 0.0000000E+00 -1.8903804E+04 0.0000000E+00 0.0000000E+00 - 11 0.0000000E+00 -1.8930851E+04 0.0000000E+00 0.0000000E+00 - 12 0.0000000E+00 -1.9000560E+04 0.0000000E+00 0.0000000E+00 - 13 0.0000000E+00 -1.9021693E+04 0.0000000E+00 0.0000000E+00 - 14 0.0000000E+00 -1.8958078E+04 0.0000000E+00 0.0000000E+00 - 15 0.0000000E+00 -1.8825341E+04 0.0000000E+00 0.0000000E+00 - 16 0.0000000E+00 -1.8665837E+04 0.0000000E+00 0.0000000E+00 - 17 0.0000000E+00 -1.8526823E+04 0.0000000E+00 0.0000000E+00 - 18 0.0000000E+00 -1.8440053E+04 0.0000000E+00 0.0000000E+00 - 19 0.0000000E+00 -1.8413089E+04 0.0000000E+00 0.0000000E+00 - 20 0.0000000E+00 -1.8434556E+04 0.0000000E+00 0.0000000E+00 - 21 0.0000000E+00 -1.8480962E+04 0.0000000E+00 0.0000000E+00 - 22 0.0000000E+00 -1.8522340E+04 0.0000000E+00 0.0000000E+00 - 23 0.0000000E+00 -1.8525993E+04 0.0000000E+00 0.0000000E+00 - 24 0.0000000E+00 -1.8460698E+04 0.0000000E+00 0.0000000E+00 - 25 0.0000000E+00 -1.8296738E+04 0.0000000E+00 0.0000000E+00 - 26 0.0000000E+00 -1.8020270E+04 0.0000000E+00 0.0000000E+00 - 27 0.0000000E+00 -1.7640929E+04 0.0000000E+00 0.0000000E+00 - 28 0.0000000E+00 -1.7184038E+04 0.0000000E+00 0.0000000E+00 - 29 0.0000000E+00 -1.6683458E+04 0.0000000E+00 0.0000000E+00 - 30 0.0000000E+00 -1.6155762E+04 0.0000000E+00 0.0000000E+00 - 31 0.0000000E+00 -1.5563561E+04 0.0000000E+00 0.0000000E+00 - 32 0.0000000E+00 -1.4785670E+04 0.0000000E+00 0.0000000E+00 - 33 0.0000000E+00 -1.3603553E+04 0.0000000E+00 0.0000000E+00 - 34 0.0000000E+00 -1.1873456E+04 0.0000000E+00 0.0000000E+00 - 35 0.0000000E+00 -9.6897540E+03 0.0000000E+00 0.0000000E+00 - 36 0.0000000E+00 -7.4171597E+03 0.0000000E+00 0.0000000E+00 - 37 0.0000000E+00 -5.6141376E+03 0.0000000E+00 0.0000000E+00 - 38 0.0000000E+00 -4.7027245E+03 0.0000000E+00 0.0000000E+00 - 39 0.0000000E+00 -4.7596468E+03 0.0000000E+00 0.0000000E+00 - 40 0.0000000E+00 -5.6680031E+03 0.0000000E+00 0.0000000E+00 - 41 0.0000000E+00 -6.1323113E+03 0.0000000E+00 0.0000000E+00 -# qohme | MW/m^3 - 1 4.6130765E-02 - 2 4.6130765E-02 - 3 4.7789783E-02 - 4 5.2363318E-02 - 5 5.5364175E-02 - 6 5.9140283E-02 - 7 6.4067318E-02 - 8 7.0906607E-02 - 9 7.8079559E-02 - 10 8.5955397E-02 - 11 9.4737516E-02 - 12 1.0459736E-01 - 13 1.1589246E-01 - 14 1.2884447E-01 - 15 1.4319878E-01 - 16 1.5842548E-01 - 17 1.7175997E-01 - 18 1.7225355E-01 - 19 1.3308279E-01 - 20 6.7613494E-02 - 21 4.1156239E-02 - 22 3.6146986E-02 - 23 3.4006854E-02 - 24 3.1276394E-02 - 25 2.8345691E-02 - 26 2.5470393E-02 - 27 2.2773695E-02 - 28 2.0305332E-02 - 29 1.8090911E-02 - 30 1.6134738E-02 - 31 1.4432887E-02 - 32 1.2975629E-02 - 33 1.1746080E-02 - 34 1.0716319E-02 - 35 9.9026692E-03 - 36 9.2468074E-03 - 37 8.4933782E-03 - 38 7.4053481E-03 - 39 5.8425750E-03 - 40 3.8040674E-03 - 41 2.0009524E-03 -# qbeame | MW/m^3 - 1 5.5986838E-03 - 2 5.5986838E-03 - 3 5.6351768E-03 - 4 5.6550861E-03 - 5 5.6833173E-03 - 6 5.5695400E-03 - 7 5.5575399E-03 - 8 5.4599331E-03 - 9 5.3861793E-03 - 10 5.2581675E-03 - 11 5.1929722E-03 - 12 5.1971953E-03 - 13 5.1775196E-03 - 14 4.9901744E-03 - 15 4.9618998E-03 - 16 5.0845729E-03 - 17 5.0202464E-03 - 18 4.6564555E-03 - 19 6.0302687E-03 - 20 5.6319960E-03 - 21 5.0208355E-03 - 22 4.5896176E-03 - 23 4.4061531E-03 - 24 4.2584543E-03 - 25 4.2434980E-03 - 26 4.4038692E-03 - 27 4.5393166E-03 - 28 4.6001853E-03 - 29 4.7039031E-03 - 30 4.8518295E-03 - 31 4.9373472E-03 - 32 4.9000228E-03 - 33 4.7262114E-03 - 34 4.3806801E-03 - 35 3.9341851E-03 - 36 3.5724695E-03 - 37 3.3381735E-03 - 38 3.1902177E-03 - 39 3.1537835E-03 - 40 3.1630009E-03 - 41 3.1353493E-03 -# qbeami | MW/m^3 - 1 1.4405481E-02 - 2 1.4405481E-02 - 3 1.4317571E-02 - 4 1.4327663E-02 - 5 1.4143115E-02 - 6 1.3960983E-02 - 7 1.3820618E-02 - 8 1.3565788E-02 - 9 1.3189639E-02 - 10 1.2755732E-02 - 11 1.2174514E-02 - 12 1.1618291E-02 - 13 1.0987794E-02 - 14 1.0449563E-02 - 15 1.0153680E-02 - 16 1.0010544E-02 - 17 9.7800973E-03 - 18 9.5159852E-03 - 19 8.7131278E-03 - 20 7.2629089E-03 - 21 5.8477395E-03 - 22 4.8342608E-03 - 23 4.1985347E-03 - 24 3.7099999E-03 - 25 3.3467261E-03 - 26 3.1085471E-03 - 27 2.8696458E-03 - 28 2.6172263E-03 - 29 2.4111043E-03 - 30 2.2243511E-03 - 31 2.0162267E-03 - 32 1.7913977E-03 - 33 1.5603521E-03 - 34 1.3199292E-03 - 35 1.0880217E-03 - 36 8.8893628E-04 - 37 7.0310167E-04 - 38 5.1708801E-04 - 39 3.4922111E-04 - 40 2.1365043E-04 - 41 1.3159068E-04 -# qrfe | MW/m^3 - 1 0.0000000E+00 - 2 0.0000000E+00 - 3 2.0760634E-16 - 4 1.9255264E-03 - 5 1.5408664E-01 - 6 1.7594500E+00 - 7 3.7113208E+00 - 8 1.4050996E+00 - 9 8.3454415E-02 - 10 1.7835978E-04 - 11 1.9043850E-09 - 12 1.9464523E-21 - 13 0.0000000E+00 - 14 0.0000000E+00 - 15 0.0000000E+00 - 16 0.0000000E+00 - 17 0.0000000E+00 - 18 0.0000000E+00 - 19 0.0000000E+00 - 20 0.0000000E+00 - 21 0.0000000E+00 - 22 0.0000000E+00 - 23 0.0000000E+00 - 24 0.0000000E+00 - 25 0.0000000E+00 - 26 0.0000000E+00 - 27 0.0000000E+00 - 28 0.0000000E+00 - 29 0.0000000E+00 - 30 0.0000000E+00 - 31 0.0000000E+00 - 32 0.0000000E+00 - 33 0.0000000E+00 - 34 0.0000000E+00 - 35 0.0000000E+00 - 36 0.0000000E+00 - 37 0.0000000E+00 - 38 0.0000000E+00 - 39 0.0000000E+00 - 40 0.0000000E+00 - 41 0.0000000E+00 -# qbrem | MW/m^3 - 1 1.1407030E-03 - 2 1.1407030E-03 - 3 1.1372623E-03 - 4 1.1302036E-03 - 5 1.1195274E-03 - 6 1.1052900E-03 - 7 1.0877081E-03 - 8 1.0670703E-03 - 9 1.0436777E-03 - 10 1.0177800E-03 - 11 9.8969303E-04 - 12 9.5970617E-04 - 13 9.2808271E-04 - 14 8.9509314E-04 - 15 8.6102423E-04 - 16 8.2620395E-04 - 17 7.9093690E-04 - 18 7.5511073E-04 - 19 7.1828214E-04 - 20 6.8029120E-04 - 21 6.4125602E-04 - 22 6.0150922E-04 - 23 5.6155372E-04 - 24 5.2197510E-04 - 25 4.8332172E-04 - 26 4.4608988E-04 - 27 4.1067157E-04 - 28 3.7731206E-04 - 29 3.4617295E-04 - 30 3.1730024E-04 - 31 2.9069053E-04 - 32 2.6626369E-04 - 33 2.4390307E-04 - 34 2.2346944E-04 - 35 2.0482013E-04 - 36 1.8643430E-04 - 37 1.6579858E-04 - 38 1.4218127E-04 - 39 1.1654208E-04 - 40 8.9846896E-05 - 41 5.3419278E-05 -# qsync | MW/m^3 - 1 3.4265930E-05 - 2 3.4265930E-05 - 3 3.3868640E-05 - 4 3.3080967E-05 - 5 3.1936549E-05 - 6 3.0487198E-05 - 7 2.8800489E-05 - 8 2.6948354E-05 - 9 2.4997433E-05 - 10 2.3003629E-05 - 11 2.1016937E-05 - 12 1.9077037E-05 - 13 1.7213967E-05 - 14 1.5450536E-05 - 15 1.3803203E-05 - 16 1.2282945E-05 - 17 1.0894861E-05 - 18 9.6228222E-06 - 19 8.4396766E-06 - 20 7.3335824E-06 - 21 6.3040395E-06 - 22 5.3567997E-06 - 23 4.4999470E-06 - 24 3.7392995E-06 - 25 3.0769638E-06 - 26 2.5111427E-06 - 27 2.0356874E-06 - 28 1.6413022E-06 - 29 1.3180942E-06 - 30 1.0555725E-06 - 31 8.4399399E-07 - 32 6.7426784E-07 - 33 5.3852648E-07 - 34 4.3017446E-07 - 35 3.4374068E-07 - 36 2.6661549E-07 - 37 1.8758491E-07 - 38 1.1339990E-07 - 39 5.6777628E-08 - 40 2.3049458E-08 - 41 7.9937621E-09 -# qline | MW/m^3 - 1 1.8309857E-04 - 2 1.8309857E-04 - 3 1.8350316E-04 - 4 1.8429378E-04 - 5 1.8544951E-04 - 6 1.8692874E-04 - 7 1.8868706E-04 - 8 1.9067508E-04 - 9 1.9284469E-04 - 10 1.9514846E-04 - 11 1.9754279E-04 - 12 1.9998356E-04 - 13 2.0242588E-04 - 14 2.0482330E-04 - 15 2.0712826E-04 - 16 2.0929799E-04 - 17 2.1128816E-04 - 18 2.1310687E-04 - 19 2.1482555E-04 - 20 2.1651461E-04 - 21 2.1824110E-04 - 22 2.2006086E-04 - 23 2.2201490E-04 - 24 2.2414797E-04 - 25 2.2648244E-04 - 26 2.2901990E-04 - 27 2.3176828E-04 - 28 2.3474486E-04 - 29 2.3794270E-04 - 30 2.4135784E-04 - 31 2.4497734E-04 - 32 2.4880270E-04 - 33 2.5283919E-04 - 34 2.5708747E-04 - 35 2.6156744E-04 - 36 2.6826856E-04 - 37 2.8211328E-04 - 38 3.0861058E-04 - 39 3.5508440E-04 - 40 4.2766988E-04 - 41 4.1913117E-04 -# qei | MW/m^3 - 1 4.6352476E-02 - 2 4.6352476E-02 - 3 4.6449918E-02 - 4 4.6590313E-02 - 5 4.6730615E-02 - 6 4.6851438E-02 - 7 4.6940893E-02 - 8 4.6996802E-02 - 9 4.7022387E-02 - 10 4.7018569E-02 - 11 4.6979334E-02 - 12 4.6885245E-02 - 13 4.6715237E-02 - 14 4.6457306E-02 - 15 4.6117992E-02 - 16 4.5726753E-02 - 17 4.5329079E-02 - 18 4.4966823E-02 - 19 4.4660799E-02 - 20 4.4383728E-02 - 21 4.4045232E-02 - 22 4.3548464E-02 - 23 4.2837095E-02 - 24 4.1892592E-02 - 25 4.0722365E-02 - 26 3.9371524E-02 - 27 3.7913289E-02 - 28 3.6414353E-02 - 29 3.4914721E-02 - 30 3.3400515E-02 - 31 3.1850715E-02 - 32 3.0356975E-02 - 33 2.9279283E-02 - 34 2.9142444E-02 - 35 3.0439143E-02 - 36 3.2773125E-02 - 37 3.5104263E-02 - 38 3.6709147E-02 - 39 3.6878544E-02 - 40 3.4035776E-02 - 41 2.3958948E-02 -# qione | MW/m^3 - 1 5.3910153E-04 - 2 5.3910153E-04 - 3 6.4662612E-04 - 4 5.1594719E-04 - 5 5.4196026E-04 - 6 5.1458619E-04 - 7 4.8159116E-04 - 8 4.4626645E-04 - 9 4.0458568E-04 - 10 3.5937640E-04 - 11 3.1636265E-04 - 12 2.7567996E-04 - 13 2.3617426E-04 - 14 1.9521375E-04 - 15 -2.3638917E-04 - 16 -4.5174482E-03 - 17 -8.7415949E-03 - 18 -1.2308841E-02 - 19 -1.5288069E-02 - 20 -1.7836978E-02 - 21 -1.9886693E-02 - 22 -2.1229683E-02 - 23 -2.1930009E-02 - 24 -2.2214804E-02 - 25 -2.2113272E-02 - 26 -2.1861081E-02 - 27 -2.1553233E-02 - 28 -2.1368320E-02 - 29 -2.1503486E-02 - 30 -2.1947920E-02 - 31 -2.2857179E-02 - 32 -2.3890393E-02 - 33 -2.4594876E-02 - 34 -2.4244476E-02 - 35 -2.2430691E-02 - 36 -1.9646313E-02 - 37 -1.8776171E-02 - 38 -2.5329159E-02 - 39 -4.6660067E-02 - 40 -8.5298847E-02 - 41 -1.3110814E-01 -# qioni | MW/m^3 - 1 -4.3365118E-05 - 2 -4.3365118E-05 - 3 4.6273391E-05 - 4 1.1687450E-04 - 5 2.5538126E-04 - 6 4.0699940E-04 - 7 6.1636439E-04 - 8 8.5328732E-04 - 9 1.0708240E-03 - 10 1.2767844E-03 - 11 1.4871044E-03 - 12 1.6969209E-03 - 13 1.8934763E-03 - 14 2.0604200E-03 - 15 2.1918021E-03 - 16 2.2936575E-03 - 17 2.3538625E-03 - 18 2.3073135E-03 - 19 2.0717009E-03 - 20 1.6262683E-03 - 21 1.1805596E-03 - 22 9.0621738E-04 - 23 7.7980288E-04 - 24 7.1539913E-04 - 25 6.6939431E-04 - 26 6.3212188E-04 - 27 5.9433255E-04 - 28 5.5217150E-04 - 29 5.1022364E-04 - 30 4.4632849E-04 - 31 3.7097058E-04 - 32 2.7705870E-04 - 33 1.8096651E-04 - 34 1.1580443E-04 - 35 5.9330882E-05 - 36 8.4653834E-06 - 37 -1.4957496E-04 - 38 -4.2324867E-04 - 39 -7.9818329E-04 - 40 -1.3273326E-03 - 41 -2.1180075E-03 -# qpar_beam | MW/m^3 - 1 2.4985475E+19 - 2 2.4985475E+19 - 3 2.4909300E+19 - 4 2.4469435E+19 - 5 2.3722559E+19 - 6 2.2542627E+19 - 7 2.1171301E+19 - 8 1.9730667E+19 - 9 1.8298035E+19 - 10 1.6918777E+19 - 11 1.5621143E+19 - 12 1.4446000E+19 - 13 1.3424083E+19 - 14 1.2543931E+19 - 15 1.1783028E+19 - 16 1.1129678E+19 - 17 1.0623925E+19 - 18 1.0318852E+19 - 19 1.0149944E+19 - 20 9.7980021E+18 - 21 9.3636597E+18 - 22 9.1245919E+18 - 23 9.0601580E+18 - 24 9.0636970E+18 - 25 9.1298515E+18 - 26 9.2883475E+18 - 27 9.5562222E+18 - 28 9.9814408E+18 - 29 1.0558753E+19 - 30 1.1457713E+19 - 31 1.2659438E+19 - 32 1.4369589E+19 - 33 1.6856701E+19 - 34 2.0552333E+19 - 35 2.9784198E+19 - 36 4.5386516E+19 - 37 6.6375066E+19 - 38 9.6410362E+19 - 39 1.4417803E+20 - 40 2.2775269E+20 - 41 3.6342333E+20 -# qmom | MW/m^3 - 1 -1.2650594E-02 - 2 -1.2650594E-02 - 3 -1.4097844E-02 - 4 -1.6653485E-02 - 5 -1.7668344E-02 - 6 -1.7228887E-02 - 7 -1.5860580E-02 - 8 -1.4932274E-02 - 9 -1.5723618E-02 - 10 -1.7290906E-02 - 11 -2.0566867E-02 - 12 -2.6171732E-02 - 13 -3.1534842E-02 - 14 -3.5252005E-02 - 15 -3.8457154E-02 - 16 -4.2292588E-02 - 17 -4.6056189E-02 - 18 -4.6690907E-02 - 19 -4.2154801E-02 - 20 -3.6650383E-02 - 21 -3.2884915E-02 - 22 -3.0288787E-02 - 23 -2.8592868E-02 - 24 -2.7244455E-02 - 25 -2.5390866E-02 - 26 -2.3772698E-02 - 27 -2.2751361E-02 - 28 -2.1762852E-02 - 29 -2.0286211E-02 - 30 -1.8369397E-02 - 31 -1.6560286E-02 - 32 -1.4846543E-02 - 33 -1.2943964E-02 - 34 -1.1145865E-02 - 35 -9.6091325E-03 - 36 -7.9979634E-03 - 37 -6.2357745E-03 - 38 -4.4916535E-03 - 39 -2.5427757E-03 - 40 3.7336849E-04 - 41 5.0418548E-03 diff --git a/regressions/portals_regressions.py b/regressions/portals_regressions.py deleted file mode 100644 index 64f32a9a..00000000 --- a/regressions/portals_regressions.py +++ /dev/null @@ -1,155 +0,0 @@ -import argparse -import torch -import os -from mitim_tools.misc_tools import LOGtools -from mitim_tools.opt_tools import STRATEGYtools -from mitim_modules.portals import PORTALSmain -from mitim_modules.powertorch.physics import TRANSPORTtools -from mitim_tools.misc_tools.LOGtools import printMsg as print -from mitim_tools import __mitimroot__ -from IPython import embed - -# Get test number -parser = argparse.ArgumentParser() -parser.add_argument("test", type=int) -args = parser.parse_args() -test = args.test - -if test == 0: - tests = [1,2] -else: - tests = [test] - -# Set up case -inputgacode = __mitimroot__ + "/regressions/data/input.gacode" - -# --------------------------------------------------------------------------------------------- -# TESTS -# --------------------------------------------------------------------------------------------- - -def conditions_regressions(variables): - - conditions = True - - # Checks - for var in variables: - conditions &= var[0] == var[1] - - # Results - if conditions: - print("\t PASSED") - else: - print("\t FAILED",typeMsg='w') - - -for test in tests: - - folderWork = __mitimroot__ + "/regressions/scratch/portals_regression_{test}/" - - if test == 1: - - print("\n>>>>> Running PORTALS test 1: Standard run with constant diffusivities") - - os.system(f"rm -rf {folderWork} && mkdir {folderWork}") - with LOGtools.redirect_all_output_to_file(f'{folderWork}/regression.log'): - portals_fun = PORTALSmain.portals(folderWork) - portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 2 - portals_fun.optimization_options["initialization_options"]["initial_training"] = 3 - portals_fun.INITparameters["removeFast"] = True - - portals_fun.MODELparameters["ProfilesPredicted"] = ["te", "ti"] - portals_fun.optimization_options["acquisition_options"]["optimizers"] = ["botorch"] - - portals_fun.PORTALSparameters["transport_evaluator"] = TRANSPORTtools.diffusion_model - ModelOptions = {'chi_e': torch.ones(5)*0.5,'chi_i': torch.ones(5)*2.0} - - portals_fun.prep(inputgacode, folderWork, ModelOptions=ModelOptions) - mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, cold_start=False, askQuestions=False) - mitim_bo.run() - - # Checks - conditions_regressions([ - [mitim_bo.optimization_data.data['QeTurb_1'][0],0.0129878663484079], - [mitim_bo.optimization_data.data['QeTurb_1'][1],0.0174629359509858], - [mitim_bo.optimization_data.data['QeTurb_1'][2],0.0222306543202599], - [mitim_bo.optimization_data.data['QeTurb_1'][3],0.0037220182305746], - [mitim_bo.optimization_data.data['QeTurb_1'][4],0.0301250769357799], - [mitim_bo.optimization_data.data['QeTurb_1'][5],0.0436471750834417], - [mitim_bo.optimization_data.data['QiTurb_5'][0],0.0114099018688661], - [mitim_bo.optimization_data.data['QiTurb_5'][1],0.0103728562456646], - [mitim_bo.optimization_data.data['QiTurb_5'][2],0.0095916319760464], - [mitim_bo.optimization_data.data['QiTurb_5'][3],0.0063868247281859], - [mitim_bo.optimization_data.data['QiTurb_5'][4],0.0062216868661381], - [mitim_bo.optimization_data.data['QiTurb_5'][5],0.0061692702220821], - ]) - - if test == 2: - - print("\n>>>>> Running PORTALS test 2: Standard run with TGLF") - - os.system(f"rm -rf {folderWork} && mkdir {folderWork}") - with LOGtools.redirect_all_output_to_file(f'{folderWork}/regression.log'): - - portals_fun = PORTALSmain.portals(folderWork) - portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 1 - portals_fun.optimization_options["initialization_options"]["initial_training"] = 3 - portals_fun.MODELparameters["RhoLocations"] = [0.25, 0.45, 0.65, 0.85] - portals_fun.INITparameters["removeFast"] = True - portals_fun.INITparameters["quasineutrality"] = True - portals_fun.INITparameters["sameDensityGradients"] = True - portals_fun.MODELparameters["transport_model"]["TGLFsettings"] = 2 - - portals_fun.MODELparameters["ProfilesPredicted"] = ["te", "ti", "ne"] - - portals_fun.prep(inputgacode, folderWork) - mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, cold_start=False, askQuestions=False) - mitim_bo.run() - - # Checks - conditions_regressions([ - [mitim_bo.optimization_data.data['QeTar_3'][0],0.0276660734686889], - [mitim_bo.optimization_data.data['QeTar_3'][1],0.026050457428488], - [mitim_bo.optimization_data.data['QeTar_3'][2],0.0245681162983153], - [mitim_bo.optimization_data.data['QeTar_3'][3],0.0225138750256145], - [mitim_bo.optimization_data.data['QeTar_3'][4],0.0238676726307135], - [mitim_bo.optimization_data.data['QiTurb_4'][0],0.01904210194957 ], - [mitim_bo.optimization_data.data['QiTurb_4'][1],0.015054384849328], - [mitim_bo.optimization_data.data['QiTurb_4'][2],0.012453620533174], - [mitim_bo.optimization_data.data['QiTurb_4'][3],0.009167817359775], - [mitim_bo.optimization_data.data['QiTurb_4'][4],0.010592748091966], - [mitim_bo.optimization_data.data['QeTurb_1'][0],0.0008148021791468 ], - [mitim_bo.optimization_data.data['QeTurb_1'][1],0.005048271135896 ], - [mitim_bo.optimization_data.data['QeTurb_1'][2],0.0316597732275 ], - [mitim_bo.optimization_data.data['QeTurb_1'][3],0.4672666906836 ], - [mitim_bo.optimization_data.data['QeTurb_1'][4],-0.0006023859321252], - ]) - - if test == 3: - - print("\n>>>>> Running PORTALS test 3: Run with TGLF multi-channel") - - # os.system(f"rm -rf {folderWork} && mkdir {folderWork}") - # with LOGtools.redirect_all_output_to_file(f'{folderWork}/regression.log'): - - # portals_fun = PORTALSmain.portals(folderWork) - # portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 2 - # portals_fun.optimization_options["initialization_options"]["initial_training"] = 3 - # portals_fun.INITparameters["removeFast"] = True - - # portals_fun.MODELparameters["ProfilesPredicted"] = ["te", "ti", "ne",'nZ','w0'] - - # portals_fun.PORTALSparameters["ImpurityOfInterest"] = 'W' - # portals_fun.PORTALSparameters["surrogateForTurbExch"] = True - - # portals_fun.prep(inputgacode, folderWork) - # mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, cold_start=False, askQuestions=False) - # mitim_bo.run() - - # with open(mitim_bo.optimization_object.optimization_extra, "rb") as f: - # mitim_runs = pickle_dill.load(f) - - # # Checks - # conditions_regressions([ - # [mitim_bo.optimization_data.data['QeTurb_1'][5],0.0713711320661], - # [mitim_runs[5]['powerstate'].plasma['PexchTurb'][0,3].item(),-0.0009466626542564001] - # ]) diff --git a/src/mitim_modules/freegsu/utils/FREEGSUparams.py b/src/mitim_modules/freegsu/utils/FREEGSUparams.py index bd84cee9..f8c13f8a 100644 --- a/src/mitim_modules/freegsu/utils/FREEGSUparams.py +++ b/src/mitim_modules/freegsu/utils/FREEGSUparams.py @@ -108,7 +108,7 @@ def createProblemParameters( dvs_min.extend(dvs_min2) dvs_max.extend(dvs_max2) - transformation = produceNewInputs + transformation = input_transform_freegs else: transformation = None @@ -376,7 +376,7 @@ def extractCont(x, cont): return v -def produceNewInputs(X, output, bounds, ParamProfile): +def input_transform_freegs(X, output, bounds, ParamProfile): """ X will be a tensor (with or without gradients) batch*dim, unnormalized """ diff --git a/src/mitim_modules/maestro/MAESTROmain.py b/src/mitim_modules/maestro/MAESTROmain.py index 73fcd981..403f2282 100644 --- a/src/mitim_modules/maestro/MAESTROmain.py +++ b/src/mitim_modules/maestro/MAESTROmain.py @@ -19,9 +19,10 @@ MAESTRO: Modular and Accelerated Engine for Simulation of Transport and Reactor Optimization (If MAESTRO is the orchestrator, then BEAT is each of the beats (steps) that MAESTRO orchestrates) - ''' +ENABLE_EMBED = False # If True, will enable IPython embed, useful for debugging + class maestro: def __init__( @@ -52,13 +53,15 @@ def __init__( self.folder_output = self.folder / "Outputs" self.folder_logs = self.folder_output / "Logs" + self.folder_performance = self.folder_output / "Performance" self.folder_beats = self.folder / "Beats" self.folder_logs.mkdir(parents=True, exist_ok=True) self.folder_beats.mkdir(parents=True, exist_ok=True) + self.folder_performance.mkdir(parents=True, exist_ok=True) # If terminal outputs, I also want to keep track of what has happened in a log file - if terminal_outputs and overall_log_file: + if terminal_outputs and overall_log_file and not ENABLE_EMBED: sys.stdout = LOGtools.Logger(logFile=self.folder_output / "maestro.log", writeAlsoTerminal=True) branch, commit_hash = IOtools.get_git_info(__mitimroot__) @@ -121,7 +124,7 @@ def define_creator(self, method, **kwargs_creator): ''' To initialize some profile functional form ''' - if method == 'eped': + if method == 'eped' or method == 'eped' or 'eped_initializer': self.beat.initialize.profile_creator = creator_from_eped(self.beat.initialize,**kwargs_creator) elif method == 'parameterization': self.beat.initialize.profile_creator = creator_from_parameterization(self.beat.initialize,**kwargs_creator) @@ -134,7 +137,7 @@ def define_creator(self, method, **kwargs_creator): # Beat operations # -------------------------------------------------------------------------------------------- - @mitim_timer('\t\t* Checker', name_timer=None) + @mitim_timer(lambda self: f'Beat #{self.counter_current} ({self.beat.name}) - Checker') def check(self, beat_check = None, cold_start = False, **kwargs): ''' Note: @@ -149,7 +152,7 @@ def check(self, beat_check = None, cold_start = False, **kwargs): print('\t- Checking...') log_file = self.folder_logs / f'beat_{self.counter_current}_check.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): output_file = None if not cold_start: @@ -169,13 +172,14 @@ def check(self, beat_check = None, cold_start = False, **kwargs): return output_file is not None - @mitim_timer('\t\t* Initializer', name_timer=None) + @mitim_timer(lambda self: f'Beat #{self.counter_current} ({self.beat.name}) - Initializer', + log_file = lambda self: self.folder_performance / "timing.jsonl") def initialize(self, *args, **kwargs): print('\t- Initializing...') if self.beat.run_flag: log_file = self.folder_logs / f'beat_{self.counter_current}_ini.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): # Initialize: produce self.profiles_current self.beat.initialize(*args, **kwargs) @@ -183,7 +187,7 @@ def initialize(self, *args, **kwargs): print('\t\t- Skipping beat initialization because this beat was already run', typeMsg = 'i') log_file = self.folder_logs / f'beat_{self.counter_current}_inform.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): # Initializer can also save important parameters self.beat.initialize._inform_save() @@ -193,15 +197,16 @@ def initialize(self, *args, **kwargs): if self.profiles_with_engineering_parameters is None: # First initialization, freeze engineering parameters - self._freeze_parameters(profiles = PROFILEStools.PROFILES_GACODE(self.beat.initialize.folder / 'input.gacode')) + self._freeze_parameters(profiles = PROFILEStools.gacode_state(self.beat.initialize.folder / 'input.gacode')) - @mitim_timer('\t\t* Preparation', name_timer=None) + @mitim_timer(lambda self: f'Beat #{self.counter_current} ({self.beat.name}) - Preparation', + log_file = lambda self: self.folder_performance / "timing.jsonl") def prepare(self, *args, **kwargs): print('\t- Preparing...') if self.beat.run_flag: log_file = self.folder_logs / f'beat_{self.counter_current}_prep.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): # Initialize if necessary if not self.beat.initialize_called: @@ -209,20 +214,21 @@ def prepare(self, *args, **kwargs): self.beat.initialize() # ----------------------------- - self.beat.profiles_current.deriveQuantities() + self.beat.profiles_current.derive_quantities() self.beat.prepare(*args, **kwargs) else: print('\t\t- Skipping beat preparation because this beat was already run', typeMsg = 'i') - @mitim_timer('\t\t* Run + finalization', name_timer=None) + @mitim_timer(lambda self: f'Beat #{self.counter_current} ({self.beat.name}) - Run + Finalization', + log_file = lambda self: self.folder_performance / "timing.jsonl") def run(self, **kwargs): # Run print('\t- Running...') if self.beat.run_flag: log_file = self.folder_logs / f'beat_{self.counter_current}_run.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): self.beat.run(**kwargs) else: print('\t\t- Skipping beat run because this beat was already run', typeMsg = 'i') @@ -230,7 +236,7 @@ def run(self, **kwargs): # Finalize, merging and freezing should occur even if the run has not been performed because the results are already there print('\t- Finalizing beat...') log_file = self.folder_logs / f'beat_{self.counter_current}_finalize.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): # Finalize self.beat.finalize(**kwargs) @@ -244,7 +250,7 @@ def run(self, **kwargs): # Inform next beats log_file = self.folder_logs / f'beat_{self.counter_current}_inform.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file): self.beat._inform_save() # To save space, we can remove the contents of the run_ folder, as everything needed is in the output folder @@ -255,23 +261,24 @@ def run(self, **kwargs): def _freeze_parameters(self, profiles = None): if profiles is None: - profiles = PROFILEStools.PROFILES_GACODE(self.beat.folder_output / 'input.gacode') + profiles = PROFILEStools.gacode_state(self.beat.folder_output / 'input.gacode') print('\t\t- Freezing engineering parameters from MAESTRO') self.profiles_with_engineering_parameters = copy.deepcopy(profiles) - self.profiles_with_engineering_parameters.writeCurrentStatus(file= (self.folder_output / 'input.gacode_frozen')) + self.profiles_with_engineering_parameters.write_state(file= (self.folder_output / 'input.gacode_frozen')) - @mitim_timer('\t\t* Finalizing', name_timer=None) + @mitim_timer(lambda self: f'Beat #{self.counter_current} ({self.beat.name}) - Finalizing', + log_file = lambda self: self.folder_performance / "timing.jsonl") def finalize(self): print(f'- MAESTRO finalizing ******************************* {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}') log_file = self.folder_output / 'beat_final' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file, msg = f'\t\t* Log info being saved to {IOtools.clipstr(log_file)}'): final_file= (self.folder_output / 'input.gacode_final') - self.beat.profiles_output.writeCurrentStatus(file= final_file) + self.beat.profiles_output.write_state(file= final_file) print(f'\t\t- Final input.gacode saved to {IOtools.clipstr(final_file)}') @@ -279,7 +286,7 @@ def finalize(self): # Plotting operations # -------------------------------------------------------------------------------------------- - @mitim_timer('\t\t* Plotting', name_timer=None) + @mitim_timer(lambda self: f'Beat #{self.counter_current} ({self.beat.name}) - Plotting') def plot(self, fn = None, num_beats = 2, only_beats = None, full_plot = True): print('*** Plotting MAESTRO ******************************************************************** ') @@ -291,11 +298,14 @@ def plot(self, fn = None, num_beats = 2, only_beats = None, full_plot = True): wasProvided = True self.fn = fn - self._plot_beats(self.fn, num_beats = num_beats, only_beats = only_beats, full_plot = full_plot) - self._plot_results(self.fn) + if num_beats>0: + self._plot_beats(self.fn, num_beats = num_beats, only_beats = only_beats, full_plot = full_plot) + ps, ps_lab = self._plot_results(self.fn) if not wasProvided: self.fn.show() + + return ps, ps_lab def _plot_beats(self, fn, num_beats = 2, only_beats = None, full_plot = True): @@ -306,7 +316,7 @@ def _plot_beats(self, fn, num_beats = 2, only_beats = None, full_plot = True): print(f'\t- Plotting beat #{counter}...') log_file = self.folder_logs / f'plot_{counter}.log' if (not self.terminal_outputs) else None - with LOGtools.conditional_log_to_file(log_file=log_file): + with LOGtools.conditional_log_to_file(write_log=not ENABLE_EMBED,log_file=log_file): msg = beat.plot(fn = self.fn, counter = i, full_plot = full_plot) print(msg) @@ -314,7 +324,7 @@ def _plot_results(self, fn): print('\t- Plotting MAESTRO results...') - MAESTROplot.plot_results(self, fn) + return MAESTROplot.plot_results(self, fn) diff --git a/src/mitim_modules/maestro/scripts/check_maestro.py b/src/mitim_modules/maestro/scripts/check_maestro.py index 089a4388..37d8c4eb 100644 --- a/src/mitim_modules/maestro/scripts/check_maestro.py +++ b/src/mitim_modules/maestro/scripts/check_maestro.py @@ -75,7 +75,7 @@ def check_cases(folders): minutes = (time_in_queue.seconds % 3600) // 60 job_status = f"{state.strip()} for {hours}h {minutes}m ({cores} cores on {partition})" - if (outputs_folder / 'beat_final').exists(): + if (outputs_folder / 'beat_final').exists() and slurm_output.exists(): mod_time = datetime.fromtimestamp((outputs_folder / 'beat_final').stat().st_mtime).strftime('%Y-%m-%d %H:%M:%S') with open(slurm_output, 'r') as f: lines = f.readlines() diff --git a/src/mitim_modules/maestro/scripts/plot_maestro.py b/src/mitim_modules/maestro/scripts/plot_maestro.py index 98c13499..5c53c1fd 100644 --- a/src/mitim_modules/maestro/scripts/plot_maestro.py +++ b/src/mitim_modules/maestro/scripts/plot_maestro.py @@ -1,8 +1,10 @@ import argparse from mitim_modules.maestro.utils import MAESTROplot -from mitim_tools.misc_tools import IOtools, GUItools, FARMINGtools +from mitim_tools.misc_tools import GRAPHICStools, IOtools, GUItools from mitim_tools.opt_tools import STRATEGYtools +from mitim_tools.misc_tools.utils import remote_tools from pathlib import Path +from IPython import embed """ Quick way to plot several input.gacode files together (assumes unix in remote) @@ -33,46 +35,110 @@ def fix_maestro(folders): def main(): parser = argparse.ArgumentParser() - parser.add_argument("folders", type=str, nargs="*") - parser.add_argument("--remote",type=str, required=False, default=None) - parser.add_argument("--remote_folders",type=str, nargs="*", required=False, default=None) - parser.add_argument("--beats", type=int, required=False, default=2) # Last beats to plot - parser.add_argument("--only", type=str, required=False, default=None) - parser.add_argument("--full", required=False, default=False, action="store_true") - parser.add_argument('--fix', required=False, default=False, action='store_true') + + # Standard options + parser.add_argument("folders", type=str, nargs="*", + help="Paths to the folders to read.") + + # MAESTRO specific options + parser.add_argument("--beats", type=int, required=False, default=2, + help="Number of beats to plot. If 0, it will not plot beat information.") + parser.add_argument("--only", type=str, required=False, default=None, + help="If provided, it will only plot the specified beats (e.g., transp)") + parser.add_argument("--full", required=False, default=False, action="store_true", + help="If set, it will plot the full beat information.") + + # Remote options + parser.add_argument("--remote",type=str, required=False, default=None, + help="Remote machine to retrieve the folders from. If not provided, it will read the local folders.") + parser.add_argument("--remote_folder_parent",type=str, required=False, default=None, + help="Parent folder in the remote machine where the folders are located. If not provided, it will use --remote_folders.") + parser.add_argument("--remote_folders",type=str, nargs="*", required=False, default=None, + help="List of folders in the remote machine to retrieve. If not provided, it will use the local folder structures.") + parser.add_argument("--remote_minimal", required=False, default=False, action="store_true", + help="If set, it will only retrieve the folder structure with a few key files.") + parser.add_argument('--fix', required=False, default=False, action='store_true', + help="If set, it will fix the pkl optimization portals in the remote folders.") args = parser.parse_args() - remote = args.remote - folders = args.folders - fix = args.fix - - # Retrieve remote - if remote is not None: - if args.remote_folders is not None: - folders_remote = args.remote_folders - else: - folders_remote = folders - _, folders = FARMINGtools.retrieve_files_from_remote(IOtools.expandPath('./'), remote, folders_remote = folders_remote, purge_tmp_files = True) + + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Retrieve from remote + # -------------------------------------------------------------------------------------------------------------------------------------------- + + only_folder_structure_with_files = None + if args.remote_minimal: + only_folder_structure_with_files = ["beat_results/input.gacode", "input.gacode_final","initializer_geqdsk/input.gacode", "timing.jsonl"] + + folders = remote_tools.retrieve_remote_folders(args.folders, args.remote, args.remote_folder_parent, args.remote_folders, only_folder_structure_with_files) + + # -------------------------------------------------------------------------------------------------------------------------------------------- # Fix pkl optimization portals in remote - if fix: + # -------------------------------------------------------------------------------------------------------------------------------------------- + + if args.fix: fix_maestro([Path(folder) for folder in folders]) - # ----- + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Actual interpreting and plotting + # -------------------------------------------------------------------------------------------------------------------------------------------- - folders = [IOtools.expandPath(folder) for folder in folders] - beats = args.beats + beats = args.beats if not args.remote_minimal else 0 only = args.only full = args.full + folders = [IOtools.expandPath(folder) for folder in folders] + fn = GUItools.FigureNotebook("MAESTRO") + if len(folders) > 1: + fig = fn.add_figure(label='MAESTRO special ALL', tab_color=4) + + axsAll = fig.subplot_mosaic( + """ + ABGI + ABGI + AEGI + DEHJ + DFHJ + DFHJ + """ + ) + + fig = fn.add_figure(label='MAESTRO timings ALL', tab_color=4) + axsTiming = fig.subplot_mosaic(""" + A + B + """,sharex=True) + + colors = GRAPHICStools.listColors() + ms = [] - for folder in folders: - m = MAESTROplot.plotMAESTRO(folder, fn = fn, num_beats=beats, only_beats = only, full_plot = full) + x, scripts = [], [] + x0, scripts0 = [], [] + for i,folder in enumerate(folders): + m, ps, ps_lab = MAESTROplot.plotMAESTRO(folder, fn = fn, num_beats=beats, only_beats = only, full_plot = full) ms.append(m) + # Plot all special quantities together + if len(folders) > 1: + MAESTROplot.plot_special_quantities(ps, ps_lab, axsAll, color=colors[i], label = f'Case #{i}', legYN = i==0) + if (m.folder_performance / 'timing.jsonl').exists(): + x0, scripts0 = IOtools.plot_timings(m.folder_performance / 'timing.jsonl', axs = axsTiming, label = f'Case #{i}', color=colors[i]) + + # Only keep the longest + if len(x0) > len(x): + x = x0 + scripts = scripts0 + + if len(folders) > 1: + for let in ['A','B']: + axsTiming[let].set_xlim(left=0) + axsTiming[let].set_ylim(bottom=0) + axsTiming[let].set_xticks(x, scripts, rotation=10, ha="right", fontsize=8) + fn.show() # Import IPython and embed an interactive session diff --git a/src/mitim_modules/maestro/scripts/run_maestro.py b/src/mitim_modules/maestro/scripts/run_maestro.py index 56279d35..a3934e56 100644 --- a/src/mitim_modules/maestro/scripts/run_maestro.py +++ b/src/mitim_modules/maestro/scripts/run_maestro.py @@ -1,5 +1,6 @@ import argparse -import shutil +from pathlib import Path +from functools import partial from mitim_tools.misc_tools import IOtools from mitim_tools.gacode_tools import PROFILEStools from mitim_modules.maestro.MAESTROmain import maestro @@ -8,9 +9,18 @@ from mitim_tools.misc_tools import PLASMAtools from IPython import embed +def profiles_postprocessing_fun(file_profs, lumpImpurities = True, enforce_same_density_gradients = True): + p = PROFILEStools.gacode_state(file_profs) + if lumpImpurities: + p.lumpImpurities() + if enforce_same_density_gradients: + p.enforce_same_density_gradients() + p.write_state(file=file_profs) + return p + def parse_maestro_nml(file_path): # Extract engineering parameters, initializations, and desired beats to run - maestro_namelist = IOtools.read_mitim_nml(file_path) + maestro_namelist = IOtools.read_mitim_yaml(file_path) if "seed" in maestro_namelist: seed = maestro_namelist["seed"] @@ -78,6 +88,14 @@ def parse_maestro_nml(file_path): delta_sep = maestro_namelist["machine"]["separatrix"]["parameters"]["delta_sep"] n_mxh = maestro_namelist["machine"]["separatrix"]["parameters"]["n_mxh"] geometry = {'R': R, 'a': a, 'kappa_sep': kappa_sep, 'delta_sep': delta_sep, 'zeta_sep': 0.0, 'z0': 0.0, 'coeffs_MXH' : n_mxh} + elif separatrix_type == 'fibe': + R = maestro_namelist["machine"]["separatrix"]["parameters"]["R"] + a = maestro_namelist["machine"]["separatrix"]["parameters"]["a"] + kappa_sep = maestro_namelist["machine"]["separatrix"]["parameters"]["kappa_sep"] + delta_sep = maestro_namelist["machine"]["separatrix"]["parameters"]["delta_sep"] + zeta_sep = maestro_namelist["machine"]["separatrix"]["parameters"]["zeta_sep"] + n_mxh = maestro_namelist["machine"]["separatrix"]["parameters"]["n_mxh"] + geometry = {'R': R, 'a': a, 'kappa_sep': kappa_sep, 'delta_sep': delta_sep, 'zeta_sep': 0.0, 'z0': 0.0, 'coeffs_MXH' : n_mxh} elif separatrix_type == "geqdsk": # Initialize geometry from geqdsk file geqdsk_file = maestro_namelist["machine"]["separatrix"]["parameters"]["geqdsk_file"] @@ -92,7 +110,7 @@ def parse_maestro_nml(file_path): beat_namelists = {} - for beat_type in ["eped", "transp", "transp_soft", "portals", "portals_soft"]: + for beat_type in ["eped","eped_initializer", "transp", "transp_soft", "portals", "portals_soft"]: if f"{beat_type}_beat" in maestro_namelist["maestro"]: @@ -121,6 +139,8 @@ def parse_maestro_nml(file_path): beat_namelist = maestro_namelist["maestro"][f"{beat_type}_beat"][f"{beat_type}_namelist"] + + # *************************************************************************** # Nothin yet # *************************************************************************** @@ -138,14 +158,11 @@ def parse_maestro_nml(file_path): enforce_same_density_gradients = maestro_namelist["maestro"]["portals_beat"]["transport_preprocessing"]["enforce_same_density_gradients"] # add postprocessing function - def profiles_postprocessing_fun(file_profs): - p = PROFILEStools.PROFILES_GACODE(file_profs) - if lumpImpurities: - p.lumpImpurities() - if enforce_same_density_gradients: - p.enforce_same_density_gradients() - p.writeCurrentStatus(file=file_profs) - beat_namelist['PORTALSparameters']['profiles_postprocessing_fun'] = profiles_postprocessing_fun + beat_namelist['portals_parameters']['transport']['profiles_postprocessing_fun'] = partial(profiles_postprocessing_fun, lumpImpurities=lumpImpurities, enforce_same_density_gradients=enforce_same_density_gradients) + + elif beat_type == "eped_initializer" and "eped_beat" in maestro_namelist["maestro"]: + print('Using the eped_beat namelist for the eped_initializer') + beat_namelist = maestro_namelist["maestro"]["eped_beat"]["eped_namelist"] else: raise ValueError(f"[MITIM] {beat_type} beat not found in the MAESTRO namelist") @@ -156,7 +173,7 @@ def profiles_postprocessing_fun(file_profs): return parameters_engineering, parameters_initialize, geometry, beat_namelists, maestro_beats, seed -@mitim_timer('\t\t* MAESTRO') +@mitim_timer('MAESTRO') def run_maestro_local( parameters_engineering, parameters_initialize, @@ -178,7 +195,13 @@ def run_maestro_local( if folder is None: folder = IOtools.expandPath('./') - m = maestro(folder, master_seed = seed, terminal_outputs = terminal_outputs, master_cold_start = force_cold_start, keep_all_files = keep_all_files) + m = maestro( + folder, + master_seed = seed, + terminal_outputs = terminal_outputs, + overall_log_file = True, + master_cold_start = force_cold_start, + keep_all_files = keep_all_files) # ------------------------------------------------------------------------- # Loop through beats @@ -205,10 +228,10 @@ def run_maestro_local( # **************************************************************************** if not creator_added: m.define_creator( - 'eped', + 'eped_initializer', BetaN = parameters_initialize["BetaN_initialization"], nu_ne = parameters_initialize["peaking_initialization"], - **beat_namelists["eped"], + **beat_namelists["eped_initializer"], **parameters_engineering ) m.initialize(BetaN = parameters_initialize["BetaN_initialization"], **geometry, **parameters_engineering) @@ -221,6 +244,8 @@ def run_maestro_local( run_namelist = {} if maestro_beats["beats"][0] in ["transp", "transp_soft"]: run_namelist = {'mpisettings' : {"trmpi": cpus, "toricmpi": cpus, "ptrmpi": 1}} + elif maestro_beats["beats"][0] in ["eped", "eped_initializer"]: + run_namelist = {'cold_start': force_cold_start, 'cpus': cpus} m.prepare(**beat_namelists[maestro_beats["beats"][0]]) m.run(**run_namelist) @@ -234,21 +259,22 @@ def run_maestro_local( def main(): parser = argparse.ArgumentParser(description='Parse MAESTRO namelist') parser.add_argument('folder', type=str, help='Folder to run MAESTRO') - parser.add_argument('file_path', type=str, help='Path to MAESTRO namelist file') - parser.add_argument('cpus', type=int, help='Number of CPUs to use') + parser.add_argument("--namelist", type=str, required=False, default=None) # namelist.maestro.yaml file, otherwise what's in the current folder + parser.add_argument('--cpus', type=int, required=False, default=8, help='Number of CPUs to use') parser.add_argument('--terminal', action='store_true', help='Print terminal outputs') args = parser.parse_args() + folder = IOtools.expandPath(args.folder) - file_path = args.file_path + maestro_namelist = args.namelist cpus = args.cpus terminal_outputs = args.terminal + maestro_namelist = Path(maestro_namelist) if maestro_namelist is not None else IOtools.expandPath('.') / "namelist.maestro.yaml" + if not folder.exists(): folder.mkdir(parents=True, exist_ok=True) - shutil.copy2(file_path, folder / 'maestro_namelist.json') - - run_maestro_local(*parse_maestro_nml(file_path),folder=folder,cpus = cpus, terminal_outputs = terminal_outputs) + run_maestro_local(*parse_maestro_nml(maestro_namelist),folder=folder,cpus = cpus, terminal_outputs = terminal_outputs) if __name__ == "__main__": diff --git a/src/mitim_modules/maestro/tmp_tests/maestro_test1.py b/src/mitim_modules/maestro/tmp_tests/maestro_test1.py deleted file mode 100644 index f155818a..00000000 --- a/src/mitim_modules/maestro/tmp_tests/maestro_test1.py +++ /dev/null @@ -1,136 +0,0 @@ -from tensorflow._api.v2 import compat -from mitim_modules.maestro.MAESTROmain import maestro - -mfe_im_path = '/Users/pablorf/MFE-IM' -folder = '/Users/pablorf/PROJECTS/project_2024_ARCim/maestro_runs/runs_v2/arcV2B_run15/' - -# ----------------------------------------------------------------------------------------------------------------------- -# Parameters -# ----------------------------------------------------------------------------------------------------------------------- - -parameters = {'Ip_MA': 10.95, 'B_T': 10.8, 'Zeff': 1.5, 'PichT_MW': 18.0, 'neped_20' : 1.8 , 'Tesep_keV': 0.1, 'nesep_20': 2.0/3.0} -parameters_mix = {'DTplasma': True, 'lowZ_impurity': 9.0, 'impurity_ratio_WtoZ': 0.00286*0.5, 'minority': [1,1,0.02]} - -#initializer, geometry = 'freegs', {'R': 4.25, 'a': 1.17, 'kappa_sep': 1.77, 'delta_sep': 0.58, 'zeta_sep': 0.0, 'z0': 0.0} -initializer, geometry = 'geqdsk', {'geqdsk_file': f'{mfe_im_path}/private_data/ARCV2B.geqdsk', 'coeffs_MXH' : 7} - -BetaN_initialization = 1.5 - -# ----------------------------------------------------------------------------------------------------------------------- -# Namelists -# ----------------------------------------------------------------------------------------------------------------------- - -# To see what values this namelist can take: mitim_tools/transp_tools/NMLtools.py: _default_params() -transp_namelist = { - 'flattop_window': 1.0, # <--- To allow stationarity - 'extractAC': True, # <--- To extract TORIC and NUBEAM extra files - 'dtEquilMax_ms': 10.0, # Default - 'dtHeating_ms' : 5.0, # Default - 'dtOut_ms' : 10.0, - 'dtIn_ms' : 10.0, - 'nzones' : 60, - 'nzones_energetic' : 20, # Default but lower than what I used to use - 'nzones_distfun' : 10, # Default but lower than what I used to use - 'MCparticles' : 1e4, - 'toric_ntheta' : 64, # Default values of TORIC, but lower than what I used to use - 'toric_nrho' : 128, # Default values of TORIC, but lower than what I used to use - 'Pich': parameters['PichT_MW']>0.0, - 'DTplasma': parameters_mix['DTplasma'], - 'Minorities': parameters_mix['minority'], - "zlump" :[ [74.0, 184.0, 0.1*parameters_mix['impurity_ratio_WtoZ']], - [parameters_mix['lowZ_impurity'], parameters_mix['lowZ_impurity']*2, 0.1] ], - } - -# To see what values this namelist can take: mitim_modules/portals/PORTALSmain.py: __init__() -portals_namelist = { "PORTALSparameters": {"launchEvaluationsAsSlurmJobs": True,"forceZeroParticleFlux": True, 'use_tglf_scan_trick': 0.02}, - "MODELparameters": { "RoaLocations": [0.35,0.55,0.75,0.875,0.9], - "ProfilesPredicted": ["te", "ti", "ne"], - "Physics_options": {"TypeTarget": 3}, - "transport_model": {"turbulence":'TGLF',"TGLFsettings": 6, "extraOptionsTGLF": {'USE_BPER':True}}}, - "INITparameters": {"FastIsThermal": True, "removeIons": [5,6], "quasineutrality": True}, - "optimization_options": { - "convergence_options": { - "maximum_iterations": 50, - "stopping_criteria_parameters": { - "maximum_value": 1e-3, - "maximum_value_is_rel": True, - }, - }, - "strategy_options": { - "AllowedExcursions":[0.0, 0.0] - }, - }, - "exploration_ranges": { - 'ymax_rel': 1.0, - 'ymin_rel': 0.9, - 'hardGradientLimits': [None,2] - } - } - -# To see what values this namelist can take: mitim_modules/maestro/utils/EPEDbeat.py: prepare() -eped_parameters = { 'nn_location': f'{mfe_im_path}/private_code_mitim/NN_DATA/EPED-NN-ARC/new-EPED-NN-MODEL-ARC.keras', - 'norm_location': f'{mfe_im_path}/private_code_mitim/NN_DATA/EPED-NN-ARC/EPED-NN-NORMALIZATION.txt'} - -# ----------------------------------------------------------------------------------------------------------------------- -# Workflow -# ----------------------------------------------------------------------------------------------------------------------- - -from mitim_tools.misc_tools.IOtools import mitim_timer - -@mitim_timer('\t\t* MAESTRO') -def run_maestro(): - m = maestro(folder, terminal_outputs = False) - - # TRANSP with only current diffusion - transp_namelist['flattop_window'] = 10.0 - transp_namelist['dtEquilMax_ms'] = 50.0 # Let the equilibrium evolve with long steps - transp_namelist['useNUBEAMforAlphas'] = False - transp_namelist['Pich'] = False - - m.define_beat('transp', initializer=initializer) - m.define_creator('eped', BetaN = BetaN_initialization, **eped_parameters,**parameters) - m.initialize(**geometry, **parameters) - m.prepare(**transp_namelist) - m.run() - - # TRANSP for toric and nubeam - transp_namelist['flattop_window'] = 0.5 - transp_namelist['dtEquilMax_ms'] = 10.0 - transp_namelist['useNUBEAMforAlphas'] = True - transp_namelist['Pich'] = True - - m.define_beat('transp') - m.prepare(**transp_namelist) - m.run() - - # EPED - m.define_beat('eped') - m.prepare(**eped_parameters) - m.run() - - # PORTALS - m.define_beat('portals') - m.prepare(**portals_namelist, change_last_radial_call = True) - m.run() - - # TRANSP - m.define_beat('transp') - m.prepare(**transp_namelist) - m.run() - - for i in range(9): - # EPED - m.define_beat('eped') - m.prepare(**eped_parameters) - m.run() - - # PORTALS - m.define_beat('portals') - m.prepare(**portals_namelist,use_previous_surrogate_data=i>0, change_last_radial_call = True) # Reuse the surrogate data if I'm not coming from a TRANSP run - m.run() - - m.finalize() - - return m - -m = run_maestro() \ No newline at end of file diff --git a/src/mitim_modules/maestro/utils/EPEDbeat.py b/src/mitim_modules/maestro/utils/EPEDbeat.py index 6167ec0a..baefe24e 100644 --- a/src/mitim_modules/maestro/utils/EPEDbeat.py +++ b/src/mitim_modules/maestro/utils/EPEDbeat.py @@ -5,12 +5,13 @@ import matplotlib.pyplot as plt from scipy.optimize import curve_fit from mitim_tools.gacode_tools import PROFILEStools +from mitim_tools.eped_tools import EPEDtools from mitim_tools.misc_tools import IOtools, GRAPHICStools, GUItools from mitim_tools.surrogate_tools import NNtools from mitim_tools.popcon_tools import FunctionalForms from mitim_tools.misc_tools.LOGtools import printMsg as print from mitim_modules.maestro.utils.MAESTRObeat import beat -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from IPython import embed # <> Function to interpolate a curve <> @@ -35,11 +36,21 @@ def prepare( **kwargs ): - self.nn = NNtools.eped_nn(type='tf') - nn_location = IOtools.expandPath(nn_location) - norm_location = IOtools.expandPath(norm_location) + if nn_location is not None: - self.nn.load(nn_location, norm=norm_location) + print(f'\t- Choice of EPED: NN from {IOtools.clipstr(nn_location)}', typeMsg='i') + + self.nn = NNtools.eped_nn(type='tf') + nn_location = IOtools.expandPath(nn_location) + norm_location = IOtools.expandPath(norm_location) + + self.nn.load(nn_location, norm=norm_location) + + else: + + print('\t- Choice of EPED: full', typeMsg='i') + + self.nn = None # Parameters to run EPED with instead of those from the profiles self.neped_20 = neped_20 @@ -52,6 +63,13 @@ def prepare( self.ptop_multiplier = ptop_multiplier self.TioverTe = TioverTe + # Whether EPED is going to be run with Zeta + if 'zeta_flag' in kwargs: + self.zeta_flag = kwargs['zeta_flag'] + print('zeta_flag set to True') + else: + self.zeta_flag = False + self._inform() def run(self, **kwargs): @@ -63,7 +81,7 @@ def run(self, **kwargs): # Run the NN # ------------------------------------------------------- - eped_results = self._run(loopBetaN = 1, store_scan=True) + eped_results = self._run(loopBetaN = 1, store_scan=True, nproc_per_run=kwargs.get('cpus', 16), cold_start=kwargs.get('cold_start', False)) # ------------------------------------------------------- # Save stuff @@ -73,11 +91,17 @@ def run(self, **kwargs): self.rhotop = eped_results['rhotop'] - def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = False): + def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = False, nproc_per_run=64, cold_start=True): ''' minimum_relative_change_in_x: minimum relative change in x to streach the core, otherwise it will keep the old core ''' + # Check to make sure using full EPED if running with squareness + if self.zeta_flag and self.nn is not None: + print('Warning: zeta_flag is not implemented for NN-based EPED, ignoring it', typeMsg='warning') + self.zeta_flag = False + + # ------------------------------------------------------- # Grab inputs from profiles_current # ------------------------------------------------------- @@ -108,36 +132,55 @@ def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = F kappa995 = self.profiles_current.derived['kappa995'] delta995 = self.profiles_current.derived['delta995'] + zeta995 = self.profiles_current.derived['zeta995'] if self.zeta_flag else None BetaN = self.profiles_current.derived['BetaN_engineering'] Tesep_keV = self.profiles_current.profiles['te(keV)'][-1] nesep_20 = self.profiles_current.profiles['ne(10^19/m^3)'][-1]*0.1 - if 'kappa995' in self.__dict__ and self.kappa995 is not None: kappa995 = self.kappa995 - if 'delta995' in self.__dict__ and self.delta995 is not None: delta995 = self.delta995 - if "BetaN" in self.__dict__ and self.BetaN is not None: BetaN = self.BetaN - if "Tesep_keV" in self.__dict__ and self.Tesep_keV is not None: Tesep_keV = self.Tesep_keV - if "nesep_20" in self.__dict__ and self.nesep_20 is not None: nesep_20 = self.nesep_20 + if 'kappa995' in self.__dict__ and self.kappa995 is not None: kappa995 = self.kappa995 + if 'delta995' in self.__dict__ and self.delta995 is not None: delta995 = self.delta995 + if self.zeta_flag and 'zeta995' in self.__dict__ and self.zeta995 is not None: zeta995 = self.zeta995 + if "BetaN" in self.__dict__ and self.BetaN is not None: BetaN = self.BetaN + if "Tesep_keV" in self.__dict__ and self.Tesep_keV is not None: Tesep_keV = self.Tesep_keV + if "nesep_20" in self.__dict__ and self.nesep_20 is not None: nesep_20 = self.nesep_20 nesep_ratio = nesep_20 / neped_20 # Store evaluation - self.current_evaluation = { - 'Ip': np.abs(Ip), - 'Bt': np.abs(Bt), - 'R': np.abs(R), - 'a': np.abs(a), - 'kappa995': np.abs(kappa995), - 'delta995': np.abs(delta995), - 'neped_20': np.abs(neped_20), - 'BetaN': np.abs(BetaN), - 'zeff': np.abs(zeff), - 'Tesep_keV': np.abs(Tesep_keV), - 'nesep_ratio': np.abs(nesep_ratio), - } + if self.zeta_flag: + self.current_evaluation = { + 'Ip': np.abs(Ip), + 'Bt': np.abs(Bt), + 'R': np.abs(R), + 'a': np.abs(a), + 'kappa995': np.abs(kappa995), + 'delta995': np.abs(delta995), + 'neped_20': np.abs(neped_20), + 'BetaN': np.abs(BetaN), + 'zeff': np.abs(zeff), + 'Tesep_keV': np.abs(Tesep_keV), + 'nesep_ratio': np.abs(nesep_ratio), + 'zeta': np.abs(zeta995) + } + else: + self.current_evaluation = { + 'Ip': np.abs(Ip), + 'Bt': np.abs(Bt), + 'R': np.abs(R), + 'a': np.abs(a), + 'kappa995': np.abs(kappa995), + 'delta995': np.abs(delta995), + 'neped_20': np.abs(neped_20), + 'BetaN': np.abs(BetaN), + 'zeff': np.abs(zeff), + 'Tesep_keV': np.abs(Tesep_keV), + 'nesep_ratio': np.abs(nesep_ratio) + } # --- Sometimes we may need specific EPED inputs for key, value in self.corrections_set.items(): - self.current_evaluation[key] = value + if key not in ['ptop_kPa', 'wtop_psipol']: + self.current_evaluation[key] = value # ---------------------------------------------- print('\n\t- Running EPED with:') @@ -151,6 +194,7 @@ def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = F print(f'\t\t- zeff: {self.current_evaluation["zeff"]:.2f}') print(f'\t\t- tesep: {self.current_evaluation["Tesep_keV"]:.3f} keV') print(f'\t\t- nesep_ratio: {self.current_evaluation["nesep_ratio"]:.2f}') + if self.zeta_flag: print(f'\t\t- zeta: {self.current_evaluation["zeta"]:.3f}') # ------------------------------------------------------- # Run NN @@ -162,22 +206,63 @@ def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = F for i in range(loopBetaN): print(f'\t\t- BetaN: {BetaN:.2f}') - inputs_to_nn = ( - self.current_evaluation["Ip"], - self.current_evaluation["Bt"], - self.current_evaluation["R"], - self.current_evaluation["a"], - self.current_evaluation["kappa995"], - self.current_evaluation["delta995"], - self.current_evaluation["neped_20"]*10, - BetaN, - self.current_evaluation["zeff"], - self.current_evaluation["Tesep_keV"]* 1E3, - self.current_evaluation["nesep_ratio"] - ) - - ptop_kPa, wtop_psipol = self.nn(*inputs_to_nn) + if self.zeta_flag: + inputs_to_eped = ( + self.current_evaluation["Ip"], + self.current_evaluation["Bt"], + self.current_evaluation["R"], + self.current_evaluation["a"], + self.current_evaluation["kappa995"], + self.current_evaluation["delta995"], + self.current_evaluation["neped_20"]*10, + BetaN, + self.current_evaluation["zeff"], + self.current_evaluation["Tesep_keV"]* 1E3, + self.current_evaluation["nesep_ratio"], + self.current_evaluation["zeta"] + ) + + else: + inputs_to_eped = ( + self.current_evaluation["Ip"], + self.current_evaluation["Bt"], + self.current_evaluation["R"], + self.current_evaluation["a"], + self.current_evaluation["kappa995"], + self.current_evaluation["delta995"], + self.current_evaluation["neped_20"]*10, + BetaN, + self.current_evaluation["zeff"], + self.current_evaluation["Tesep_keV"]* 1E3, + self.current_evaluation["nesep_ratio"] + ) + # ------------------------------------------------------- + # Give the option to override the ptop_kPa and wtop_psipol + if 'ptop_kPa' in self.corrections_set: + print(f'\t\t- Overriding ptop_kPa: {self.corrections_set["ptop_kPa"]:.2f} kPa', typeMsg='w') + ptop_kPa = self.corrections_set["ptop_kPa"] + else: + ptop_kPa = None + + if 'wtop_psipol' in self.corrections_set: + print(f'\t\t- Overriding wtop_psipol: {self.corrections_set["wtop_psipol"]:.5f}', typeMsg='w') + wtop_psipol = self.corrections_set["wtop_psipol"] + else: + wtop_psipol = None + # ------------------------------------------------------- + + if ptop_kPa is None or wtop_psipol is None: + + if self.nn is not None: + ptop_kPa, wtop_psipol = self.nn(*inputs_to_eped) + else: + ptop_kPa, wtop_psipol = self._run_full_eped(self.folder,*inputs_to_eped, nproc_per_run=nproc_per_run, cold_start=cold_start) + + if store_scan: + store_scan = False + print('\t- Warning: store_scan is not available for full EPED runs yet, only for NN-based EPED') + print('\t- Raw EPED results:') print(f'\t\t- ptop_kPa: {ptop_kPa:.4f}') print(f'\t\t- wtop_psipol: {wtop_psipol:.4f}') @@ -251,10 +336,10 @@ def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = F scan_results = {} for k,key in enumerate(scan_relative): - inputs_scan = list(copy.deepcopy(inputs_to_nn)) + inputs_scan = list(copy.deepcopy(inputs_to_eped)) scan_results[key] = {'ptop_kPa': [], 'wtop_psipol': [], 'value': []} for m in np.linspace(1-scan_relative[key],1+scan_relative[key],15): - inputs_scan[k] = inputs_to_nn[k]*m + inputs_scan[k] = inputs_to_eped[k]*m ptop_kPa0, wtop_psipol0 = self.nn(*inputs_scan) scan_results[key]['ptop_kPa'].append(ptop_kPa0) scan_results[key]['wtop_psipol'].append(wtop_psipol0) @@ -263,7 +348,7 @@ def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = F scan_results[key]['wtop_psipol'] = np.array(scan_results[key]['wtop_psipol']) scan_results[key]['value'] = np.array(scan_results[key]['value']) - scan_results[key]['ptop_kPa_nominal'], scan_results[key]['wtop_psipol_nominal'] = self.nn(*inputs_to_nn) + scan_results[key]['ptop_kPa_nominal'], scan_results[key]['wtop_psipol_nominal'] = self.nn(*inputs_to_eped) # --------------------------------- # Store @@ -278,22 +363,85 @@ def _run(self, loopBetaN = 1, minimum_relative_change_in_x=0.005, store_scan = F 'nesep_20': nesep_20, 'rhotop': rhotop, 'Tesep_keV': Tesep_keV, - 'inputs_to_nn': inputs_to_nn, + 'inputs_to_eped': inputs_to_eped, 'scan_results': scan_results } for key in eped_results: print(f'\t\t- {key}: {eped_results[key]}') - self.profiles_output.writeCurrentStatus(file=self.folder / 'input.gacode.eped') + self.profiles_output.write_state(file=self.folder / 'input.gacode.eped') return eped_results + def _run_full_eped(self, folder, Ip, Bt, R, a, kappa995, delta995, neped19, BetaN, zeff, Tesep_eV, nesep_ratio, *args, nproc_per_run=64, cold_start=True): + ''' + Run the full EPED code with the given inputs. + Returns ptop_kPa and wtop_psipol. + If zeta is provided as an extra argument, use it; otherwise set zeta to zero. + ''' + + # Handle optional zeta parameter + if len(args) > 0: + zeta = args[0] + print('Let of args > 0, using zeta =', zeta) + else: + zeta = 0.0 + print('No zeta provided, setting zeta = 0.0') + + eped = EPEDtools.EPED(folder=folder) + + if len(args) > 0: + input_params = { + 'ip': Ip, + 'bt': Bt, + 'r': R, + 'a': a, + 'kappa': kappa995, + 'delta': delta995, + 'neped': neped19, + 'betan': BetaN, + 'zeffped': zeff, + 'nesep': nesep_ratio * neped19, + 'tesep': Tesep_eV, + 'zeta': zeta + } + print('_run_full_eped input_params with zeta:', input_params) + else: + input_params = { + 'ip': Ip, + 'bt': Bt, + 'r': R, + 'a': a, + 'kappa': kappa995, + 'delta': delta995, + 'neped': neped19, + 'betan': BetaN, + 'zeffped': zeff, + 'nesep': nesep_ratio * neped19, + 'tesep': Tesep_eV + } + print('_run_full_eped input_params without zeta:', input_params) + + eped.run( + subfolder = 'case1', + input_params = input_params, + nproc_per_run = nproc_per_run, + cold_start = cold_start, + ) + + eped.read(subfolder='case1') + + ptop_kPa = float(eped.results['case1']['run1']['ptop']) + wtop_psipol = float(eped.results['case1']['run1']['wptop']) + + return ptop_kPa, wtop_psipol + def finalize(self, **kwargs): - self.profiles_output = PROFILEStools.PROFILES_GACODE(self.folder / 'input.gacode.eped') + self.profiles_output = PROFILEStools.gacode_state(self.folder / 'input.gacode.eped') - self.profiles_output.writeCurrentStatus(file=self.folder_output / 'input.gacode') + self.profiles_output.write_state(file=self.folder_output / 'input.gacode') def merge_parameters(self): # EPED beat does not modify the profiles grid or anything, so I can keep it fine @@ -307,7 +455,7 @@ def grab_output(self): loaded_results = np.load(self.folder_output / 'eped_results.npy', allow_pickle=True).item() - profiles = PROFILEStools.PROFILES_GACODE(self.folder_output / 'input.gacode') if isitfinished else None + profiles = PROFILEStools.gacode_state(self.folder_output / 'input.gacode') if isitfinished else None else: @@ -332,7 +480,7 @@ def plot(self, fn = None, counter = 0, full_plot = True): loaded_results, profiles = self.grab_output() - profiles_current = PROFILEStools.PROFILES_GACODE(self.folder / 'input.gacode') + profiles_current = PROFILEStools.gacode_state(self.folder / 'input.gacode') profiles_current.plotRelevant(axs = axs, color = 'b', label = 'orig') @@ -397,10 +545,10 @@ def _plot_scan(self, ikey, loaded_results = None, axs = None, color = 'b'): axs[i].plot(loaded_results['scan_results'][key]['value'], loaded_results['scan_results'][key][ikey], 's-', color=color, markersize=3) - axs[i].plot([loaded_results['inputs_to_nn'][i]], [loaded_results[ikey]], '^', color=color) - axs[i].plot([loaded_results['inputs_to_nn'][i]], [loaded_results['scan_results'][key][f'{ikey}_nominal']], 'o', color=color) + axs[i].plot([loaded_results['inputs_to_eped'][i]], [loaded_results[ikey]], '^', color=color) + axs[i].plot([loaded_results['inputs_to_eped'][i]], [loaded_results['scan_results'][key][f'{ikey}_nominal']], 'o', color=color) - axs[i].axvline(loaded_results['inputs_to_nn'][i], color=color, ls='--') + axs[i].axvline(loaded_results['inputs_to_eped'][i], color=color, ls='--') axs[i].axhline(loaded_results['scan_results'][key][f'{ikey}_nominal'], color=color, ls='-.') max_val = np.max([max_val,np.max(loaded_results['scan_results'][key][ikey])]) @@ -488,12 +636,12 @@ def scale_profile_by_stretching( x, y, xp, yp, xp_old, plotYN=False, label='', k print('\t\t\t* Keeping old aLT profile in the core-predicted region, using r/a for it') # Calculate gradient in entire region - aLy = CALCtools.produceGradient( torch.from_numpy(roa), torch.from_numpy(y) ) + aLy = CALCtools.derivation_into_Lx( torch.from_numpy(roa), torch.from_numpy(y) ) # I'm only interested in core region, plus one ghost point with the same gradient aLy = torch.cat( (aLy[:ibc+1], aLy[ibc].unsqueeze(0)) ) - y_mod = CALCtools.integrateGradient( torch.from_numpy(roa[:ibc+2]).unsqueeze(0), aLy.unsqueeze(0), torch.from_numpy(np.array(ynew[ibc+1])).unsqueeze(0) ).squeeze().numpy() + y_mod = CALCtools.integration_Lx( torch.from_numpy(roa[:ibc+2]).unsqueeze(0), aLy.unsqueeze(0), torch.from_numpy(np.array(ynew[ibc+1])).unsqueeze(0) ).squeeze().numpy() ynew[:ibc+2] = y_mod @@ -511,11 +659,11 @@ def scale_profile_by_stretching( x, y, xp, yp, xp_old, plotYN=False, label='', k ax.legend() ax = axs[1] - aLy = CALCtools.produceGradient( torch.from_numpy(roa), torch.from_numpy(y) ) + aLy = CALCtools.derivation_into_Lx( torch.from_numpy(roa), torch.from_numpy(y) ) ax.plot(x,aLy,'-o',color='b', label='old') ax.axvline(x=xp_old,color='b',ls='--') - aLy = CALCtools.produceGradient( torch.from_numpy(roa), torch.from_numpy(ynew) ) + aLy = CALCtools.derivation_into_Lx( torch.from_numpy(roa), torch.from_numpy(ynew) ) ax.plot(x,aLy,'-o',color='r', label='new') ax.axvline(x=xp,color='r',ls='--') @@ -599,6 +747,6 @@ def eped_profiler(profiles, xp_old, rhotop, Tetop_keV, Titop_keV, netop_20, mini # Re-derive # --------------------------------- - profiles_output.deriveQuantities(rederiveGeometry=False) + profiles_output.derive_quantities(rederiveGeometry=False) return profiles_output \ No newline at end of file diff --git a/src/mitim_modules/maestro/utils/MAESTRObeat.py b/src/mitim_modules/maestro/utils/MAESTRObeat.py index b4ac6733..4991b4a1 100644 --- a/src/mitim_modules/maestro/utils/MAESTRObeat.py +++ b/src/mitim_modules/maestro/utils/MAESTRObeat.py @@ -40,6 +40,8 @@ def define_initializer(self, initializer): self.initialize = initializer_from_previous(self) elif initializer == 'freegs': self.initialize = initializer_from_freegs(self) + elif initializer == 'fibe': + self.initialize = initializer_from_fibe(self) elif initializer == 'geqdsk': self.initialize = initializer_from_geqdsk(self) elif initializer == 'profiles': @@ -92,7 +94,7 @@ def __init__(self, beat_instance, label = 'profiles'): def __call__(self, profiles_file = None, Vsurf = None, **kwargs_beat): # Load profiles - self.profiles_current = PROFILEStools.PROFILES_GACODE(profiles_file) + self.profiles_current = PROFILEStools.gacode_state(profiles_file) # -------------------------------------------------------------------------------------------- # Operations @@ -112,7 +114,7 @@ def __call__(self, profiles_file = None, Vsurf = None, **kwargs_beat): # -------------------------------------------------------------------------------------------- # Write it to initialization folder - self.profiles_current.writeCurrentStatus(file=self.folder / 'input.gacode') + self.profiles_current.write_state(file=self.folder / 'input.gacode') # Pass the profiles to the beat instance self.beat_instance.profiles_current = self.profiles_current @@ -177,8 +179,29 @@ def __call__( print(f'\t- Converting geqdsk to profiles, using {coeffs_MXH = }') p = self.f.to_profiles(ne0_20 = netop_20, Zeff = Zeff, PichT = PichT_MW, coeffs_MXH = coeffs_MXH) + # Sometimes I may want to change Ip and Bt + if 'Ip_MA' in kwargs_profiles and kwargs_profiles['Ip_MA'] is not None: + Ip_in_geqdsk = p.profiles['current(MA)'][0] + if Ip_in_geqdsk != kwargs_profiles['Ip_MA']: + print(f'\t- Requested to ignore geqdsk current and use user-specified one, changing Ip from {Ip_in_geqdsk} to {kwargs_profiles["Ip_MA"]}', typeMsg = 'w') + p.profiles['current(MA)'][0] = kwargs_profiles['Ip_MA'] + print(f'\t\t* Scaling poloidal flux by same factor as Ip, {kwargs_profiles["Ip_MA"] / Ip_in_geqdsk:.2f}') + p.profiles['polflux(Wb/radian)'] *= kwargs_profiles['Ip_MA'] / Ip_in_geqdsk + print(f'\t\t* Scaling q-profile by same factor as Ip, {kwargs_profiles["Ip_MA"] / Ip_in_geqdsk:.2f}') + p.profiles['q(-)'] *= 1/(kwargs_profiles['Ip_MA'] / Ip_in_geqdsk) + + if 'B_T' in kwargs_profiles and kwargs_profiles['B_T'] is not None: + Bt_in_geqdsk = p.profiles['bcentr(T)'][0] + if Bt_in_geqdsk != kwargs_profiles['B_T']: + print(f'\t- Requested to ignore geqdsk B and use user-specified one, changing Bt from {Bt_in_geqdsk} to {kwargs_profiles["B_T"]}', typeMsg = 'w') + p.profiles['bcentr(T)'][0] = kwargs_profiles['B_T'] + print(f'\t\t* Scaling toroidal flux by same factor as Bt, {kwargs_profiles["B_T"] / Bt_in_geqdsk:.2f}') + p.profiles['torfluxa(Wb/radian)'] *= kwargs_profiles['B_T'] / Bt_in_geqdsk + print(f'\t\t* Scaling q-profile by same factor as Bt, {kwargs_profiles["B_T"] / Bt_in_geqdsk:.2f}') + p.profiles['q(-)'] *= kwargs_profiles['B_T'] / Bt_in_geqdsk + # Write it to initialization folder - p.writeCurrentStatus(file=self.folder / 'input.geqdsk.gacode') + p.write_state(file=self.folder / 'input.geqdsk.gacode') # Copy original geqdsk for reference use shutil.copy2(geqdsk_file, self.folder / "input.geqdsk") @@ -248,6 +271,67 @@ def __call__(self, # Call the geqdsk initializer super().__call__(geqdsk_file = self.folder / 'freegs.geqdsk',**kwargs_geqdsk) +# -------------------------------------------------------------------------------------------- +# Initializer from FiBE: create the equilibrium, convert to geqdsk and call the geqdsk initializer +# -------------------------------------------------------------------------------------------- + +class initializer_from_fibe(initializer_from_geqdsk): + ''' + Idea is to write geqdsk and then call the geqdsk initializer + ''' + def __init__(self, beat_instance, label = 'fibe'): + super().__init__(beat_instance, label = label) + + def __call__(self, + R, + a, + kappa_sep, + delta_sep, + zeta_sep, + z0, + p0_MPa = 1.0, + Ip_MA = 1.0, + B_T = 5.4, + **kwargs_geqdsk + ): + + p0 = p0_MPa * 1.0e6 + Ip = Ip_MA * 1.0e6 + # If profiles exist, substitute the pressure and density guesses by something better (not perfect though, no ions) + if ('ne' in kwargs_geqdsk.get('profiles_insert',{})) and ('Te' in kwargs_geqdsk.get('profiles_insert',{})): + print('\t- Using ne profile instead of the ne0 guess') + ne0_20 = kwargs_geqdsk['profiles_insert']['ne'][1][0] + print('\t- Using Te profile for a better estimation of pressure, instead of the p0 guess') + Te0_keV = kwargs_geqdsk['profiles_insert']['Te'][1][0] + p0 = 2 * (Te0_keV*1E3) * 1.602176634E-19 * (ne0_20 * 1E20) + # If betaN provided, use it to estimate the pressure + elif 'BetaN' in kwargs_geqdsk: + print('\t- Using BetaN for a better estimation of pressure, instead of the p0 guess') + pvol_MPa = ( Ip_MA / (a * B_T) ) * (B_T ** 2 / (2 * 4 * np.pi * 1e-7)) / 1e6 * kwargs_geqdsk['BetaN'] * 1E-2 + p0 = pvol_MPa * 3.0 * 1.0e6 + + # Run FiBE to generate equilibrium + from fibe import FixedBoundaryEquilibrium + eq = FixedBoundaryEquilibrium() + eq.define_grid_and_boundary_with_mxh( + nr=129, + nz=129, + rgeo=R, + zgeo=z0, + rminor=a, + kappa=kappa_sep, + cos_coeffs=[0.0, 0.0, 0.0], + sin_coeffs=[0.0, np.arcsin(delta_sep), -zeta_sep]) + eq.initialize_profiles_with_minimal_input(p0, Ip, B_T) + eq.initialize_psi() + eq.solve_psi() + + # Convert to geqdsk and write it to initialization folder + eq.to_geqdsk(str(self.folder / 'fibe.geqdsk')) + + # Call the geqdsk initializer + super().__call__(geqdsk_file = self.folder / 'fibe.geqdsk',**kwargs_geqdsk) + # -------------------------------------------------------------------------------------------- # [Generic] Profile creator: Insert profiles # -------------------------------------------------------------------------------------------- @@ -290,7 +374,7 @@ def __call__(self): self.initialize_instance.profiles_current.profiles['ni(10^19/m^3)'] = self.initialize_instance.profiles_current.profiles['ni(10^19/m^3)'] * (self.initialize_instance.profiles_current.profiles['ne(10^19/m^3)']/old_density)[:,np.newaxis] # Update derived - self.initialize_instance.profiles_current.deriveQuantities() + self.initialize_instance.profiles_current.derive_quantities() def _inform_save(self, **kwargs): pass @@ -440,7 +524,8 @@ def __call__(self): self.beat_eped.profiles_current = self.initialize_instance.profiles_current # Run EPED - eped_results = self.beat_eped._run(loopBetaN = 1) + nproc_per_run = 64 #TODO: make it a parameter to be received from MAESTRO namelist + eped_results = self.beat_eped._run(loopBetaN = 1, nproc_per_run=nproc_per_run, cold_start=True) # Assume always cold start for a creator # Potentially save variables np.save(self.beat_eped.folder_output / 'eped_results.npy', eped_results) @@ -466,4 +551,4 @@ def _inform_save(self, eped_results = None): if eped_results is None: eped_results = np.load(beat_eped_for_save.folder_output / 'eped_results.npy', allow_pickle=True).item() - beat_eped_for_save._inform_save(eped_results) \ No newline at end of file + beat_eped_for_save._inform_save(eped_results) diff --git a/src/mitim_modules/maestro/utils/MAESTROplot.py b/src/mitim_modules/maestro/utils/MAESTROplot.py index 321eb904..685c639b 100644 --- a/src/mitim_modules/maestro/utils/MAESTROplot.py +++ b/src/mitim_modules/maestro/utils/MAESTROplot.py @@ -1,12 +1,15 @@ import numpy as np from collections import OrderedDict from mitim_tools.gacode_tools import PROFILEStools -from mitim_tools.misc_tools import LOGtools, GRAPHICStools +from mitim_tools.plasmastate_tools.utils import state_plotting +from mitim_tools.misc_tools import LOGtools, GRAPHICStools, IOtools from mitim_tools.gs_tools import GEQtools from pathlib import Path from mitim_tools.misc_tools.LOGtools import printMsg as print +import json, re +from pathlib import Path +import matplotlib.pyplot as plt from IPython import embed - from mitim_modules.maestro.utils.TRANSPbeat import transp_beat from mitim_modules.maestro.utils.PORTALSbeat import portals_beat from mitim_modules.maestro.utils.EPEDbeat import eped_beat @@ -51,9 +54,9 @@ def plotMAESTRO(folder, fn = None, num_beats = 2, only_beats = None, full_plot = m = grabMAESTRO(folder) # Plot - m.plot(fn = fn, num_beats=num_beats, only_beats = only_beats, full_plot = full_plot) + ps, ps_lab = m.plot(fn = fn, num_beats=num_beats, only_beats = only_beats, full_plot = full_plot) - return m + return m, ps, ps_lab def plot_results(self, fn): @@ -62,9 +65,11 @@ def plot_results(self, fn): # ******************************************************************************************************** # Collect initialization - ini = {'geqdsk': None, 'profiles': PROFILEStools.PROFILES_GACODE(f'{self.beats[1].initialize.folder}/input.gacode')} + ini = {'geqdsk': None, 'profiles': None} if (self.beats[1].initialize.folder / 'input.geqdsk').exists(): ini['geqdsk'] = GEQtools.MITIMgeqdsk(self.beats[1].initialize.folder / 'input.geqdsk') + if Path(f'{self.beats[1].initialize.folder}/input.gacode').exists(): + ini['profiles'] = PROFILEStools.gacode_state(f'{self.beats[1].initialize.folder}/input.gacode') # Collect PORTALS profiles and TRANSP cdfs translated to profiles objs = OrderedDict() @@ -73,7 +78,11 @@ def plot_results(self, fn): for i,beat in enumerate(self.beats.values()): - _, profs = beat.grab_output() + # _, profs = beat.grab_output() + if (beat.folder_output / 'input.gacode').exists(): + profs = PROFILEStools.gacode_state(beat.folder_output / 'input.gacode') + else: + profs = None if isinstance(beat, transp_beat): key = f'TRANSP b#{i+1}' @@ -89,17 +98,17 @@ def plot_results(self, fn): # ******************************************************************************************************** ps, ps_lab = [], [] for label in objs: - if isinstance(objs[label], PROFILEStools.PROFILES_GACODE): + if isinstance(objs[label], PROFILEStools.gacode_state): ps.append(objs[label]) ps_lab.append(label) maxPlot = 5 if len(ps) > 0: # Plot profiles - figs = PROFILEStools.add_figures(fn,fnlab_pre = 'MAESTRO - ') + figs = state_plotting.add_figures(fn,fnlab_pre = 'MAESTRO - ') log_file = self.folder_logs/'plot_maestro.log' if (not self.terminal_outputs) else None with LOGtools.conditional_log_to_file(log_file=log_file): - PROFILEStools.plotAll(ps[-maxPlot:], extralabs=ps_lab[-maxPlot:], figs=figs) + state_plotting.plotAll(ps[-maxPlot:], extralabs=ps_lab[-maxPlot:], figs=figs) for p,pl in zip(ps,ps_lab): p.printInfo(label = pl) @@ -193,7 +202,24 @@ def plot_results(self, fn): DFHJ """ ) + + plot_special_quantities(ps, ps_lab, axs) + + if (self.folder_performance / 'timing.jsonl').exists(): + # ******************************************************************************************************** + # Timings + # ******************************************************************************************************** + fig = fn.add_figure(label='MAESTRO timings', tab_color=3) + axs = fig.subplot_mosaic(""" + A + B + """,sharex=True) + IOtools.plot_timings(self.folder_performance / 'timing.jsonl', axs = axs, log=True) + + return ps, ps_lab +def plot_special_quantities(ps, ps_lab, axs, color='b', label = '', legYN=True): + x, BetaN, Pfus, p_th, p_tot, Pin, Q, fG, nu_ne, q95, q0, xsaw,p90 = [], [], [], [], [], [], [], [], [], [], [], [], [] for p,pl in zip(ps,ps_lab): x.append(pl) @@ -210,24 +236,37 @@ def plot_results(self, fn): xsaw.append(p.derived['rho_saw']) p90.append(np.interp(0.9,p.profiles['rho(-)'],p.derived['pthr_manual'])) + def _special(ax,x): + for xi in x: + if 'portals' in xi.lower(): + if legYN: + ax.axvline(xi, color='y', linestyle='-', lw=5, alpha=0.2) + # ----------------------------------------------------------------- ax = axs['A'] - ax.plot(x, BetaN, '-s', markersize=7, lw = 1) + ax.plot(x, BetaN, '-s', color=color, markersize=7, lw = 1, label = label) ax.set_ylabel('$\\beta_N$ (engineering)') ax.set_title('Pressure Evolution') + if len(label) > 0: + ax.legend() GRAPHICStools.addDenseAxis(ax) ax.set_ylim(bottom = 0) + + _special(ax, x) ax.set_xticklabels([]) ax = axs['D'] - ax.plot(x, p_th, '-s', markersize=7, lw = 1, label='Thermal

') - ax.plot(x, p_tot, '-o', markersize=7, lw = 1, label='Total

') - ax.plot(x, p90, '-*', markersize=7, lw = 1, label='Total, p(rho=0.9)') + ax.plot(x, p_th, '-s', color=color, markersize=7, lw = 1, label='Thermal

') + ax.plot(x, p_tot, '-o', color=color, markersize=7, lw = 1, label='Total

') + ax.plot(x, p90, '-*', color=color, markersize=7, lw = 1, label='Total, p(rho=0.9)') ax.set_ylabel('$p$ (MPa)') GRAPHICStools.addDenseAxis(ax) ax.set_ylim(bottom = 0) - ax.legend() + if legYN: + ax.legend() + + _special(ax, x) rotation = 90 fontsize = 6 @@ -236,35 +275,41 @@ def plot_results(self, fn): # ----------------------------------------------------------------- ax = axs['B'] - ax.plot(x, Q, '-s', markersize=7, lw = 1) + ax.plot(x, Q, '-s', color=color, markersize=7, lw = 1) ax.set_ylabel('$Q$') ax.set_title('Performance Evolution') GRAPHICStools.addDenseAxis(ax) ax.set_ylim(bottom = 0) ax.set_xticklabels([]) + + _special(ax, x) ax = axs['E'] - ax.plot(x, Pfus, '-s', markersize=7, lw = 1) + ax.plot(x, Pfus, '-s', color=color, markersize=7, lw = 1) ax.set_ylabel('$P_{fus}$ (MW)') GRAPHICStools.addDenseAxis(ax) ax.set_ylim(bottom = 0) ax.set_xticklabels([]) + + _special(ax, x) ax = axs['F'] - ax.plot(x, Pin, '-s', markersize=7, lw = 1) + ax.plot(x, Pin, '-s', color=color, markersize=7, lw = 1) ax.set_ylabel('$P_{in}$ (MW)') GRAPHICStools.addDenseAxis(ax) ax.set_ylim(bottom = 0) ax.tick_params(axis='x', rotation=rotation, labelsize=fontsize) + + _special(ax, x) # ----------------------------------------------------------------- ax = axs['G'] - ax.plot(x, fG, '-s', markersize=7, lw = 1) + ax.plot(x, fG, '-s', color=color, markersize=7, lw = 1) ax.set_ylabel('$f_{G}$') ax.set_title('Density Evolution') ax.axhline(y=1, color = 'k', lw = 1, ls = '--') @@ -273,37 +318,46 @@ def plot_results(self, fn): ax.set_ylim([0,1.2]) ax.set_xticklabels([]) + + _special(ax, x) ax = axs['H'] - ax.plot(x, nu_ne, '-s', markersize=7, lw = 1) + ax.plot(x, nu_ne, '-s', color=color, markersize=7, lw = 1) ax.set_ylabel('$\\nu_{ne}$') GRAPHICStools.addDenseAxis(ax) ax.set_ylim(bottom = 0) ax.tick_params(axis='x', rotation=rotation, labelsize=fontsize) + + _special(ax, x) # ----------------------------------------------------------------- # ----------------------------------------------------------------- ax = axs['I'] - ax.plot(x, q95, '-s', markersize=7, lw = 1, label='q95') - ax.plot(x, q0, '-*', markersize=7, lw = 1, label='q0') + ax.plot(x, q95, '-s', color=color, markersize=7, lw = 1, label='q95') + ax.plot(x, q0, '-*', color=color, markersize=7, lw = 1, label='q0') ax.set_ylabel('$q$') ax.set_title('Current Evolution') GRAPHICStools.addDenseAxis(ax) ax.axhline(y=1, color = 'k', lw = 2, ls = '--') - ax.legend() + if legYN: + ax.legend() ax.set_ylim(bottom = 0) ax.set_xticklabels([]) + + _special(ax, x) ax = axs['J'] - ax.plot(x, xsaw, '-s', markersize=7, lw = 1) + ax.plot(x, xsaw, '-s', color=color, markersize=7, lw = 1) ax.set_ylabel('Inversion radius (rho)') GRAPHICStools.addDenseAxis(ax) ax.set_ylim([0,1]) ax.tick_params(axis='x', rotation=rotation, labelsize=fontsize) + + _special(ax, x) # ----------------------------------------------------------------- @@ -311,5 +365,7 @@ def plot_results(self, fn): def plot_g_quantities(g, axs, color = 'b', lw = 1, ms = 0): g.plotFluxSurfaces(ax=axs[0], fluxes=np.linspace(0, 1, 21), rhoPol=False, sqrt=True, color=color,lwB=lw*3, lw = lw,label='Initial geqdsk') - axs[3].plot(g.g['RHOVN'], g.g['PRES']*1E-6, '-o', markersize=ms, lw = lw, label='Initial geqdsk', color=color) - axs[4].plot(g.g['RHOVN'], g.g['QPSI'], '-o', markersize=ms, lw = lw, label='Initial geqdsk', color=color) + axs[3].plot(g.g.derived['rho_tor'], g.g.raw['pres']*1E-6, '-o', markersize=ms, lw = lw, label='Initial geqdsk', color=color) + axs[4].plot(g.g.derived['rho_tor'], g.g.raw['qpsi'], '-o', markersize=ms, lw = lw, label='Initial geqdsk', color=color) + + diff --git a/src/mitim_modules/maestro/utils/PORTALSbeat.py b/src/mitim_modules/maestro/utils/PORTALSbeat.py index b8bf1552..11276430 100644 --- a/src/mitim_modules/maestro/utils/PORTALSbeat.py +++ b/src/mitim_modules/maestro/utils/PORTALSbeat.py @@ -25,18 +25,20 @@ def prepare(self, use_previous_surrogate_data = False, try_flux_match_only_for_first_point = True, change_last_radial_call = False, - additional_params_in_surrogate = [], - exploration_ranges = { - 'ymax_rel': 1.0, - 'ymin_rel': 1.0, - 'hardGradientLimits': [0,2] - }, - PORTALSparameters = {}, - MODELparameters = {}, - optimization_options = {}, - INITparameters = {}, + portals_namelist_location = None, + portals_parameters = None, + initialization_parameters = None, + optimization_options = None, enforce_impurity_radiation_existence = True, ): + + if portals_parameters is None: + portals_parameters = {} + if initialization_parameters is None: + initialization_parameters = {} + if optimization_options is None: + optimization_options = {} + self.fileGACODE = self.initialize.folder / 'input.gacode' @@ -44,7 +46,7 @@ def prepare(self, profiles = self.profiles_current for i in range(len(profiles.Species)): - data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics" / "radiation_chebyshev.csv") + data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics_models" / "radiation_chebyshev.csv") if not (data_df['Ion'].str.lower()==profiles.Species[i]["N"].lower()).any(): print(f"\t\t- {profiles.Species[i]['N']} not found in radiation table, looking for closest Z (+- 5) USING THE Z SPECIFIED IN THE INPUT.GACODE (fully stripped assumption)",typeMsg='w') # Find closest Z @@ -56,23 +58,20 @@ def prepare(self, new_name = data_df['Ion'][iZ] - print(f"\t\t\t- Changing name of ion from {profiles.Species[i]["N"]} ({profiles.Species[i]["Z"]}) to {new_name} ({Z[iZ]})") + print(f'\t\t\t- Changing name of ion from {profiles.Species[i]["N"]} ({profiles.Species[i]["Z"]}) to {new_name} ({Z[iZ]})') profiles.profiles['name'][i] = profiles.Species[i]["N"] = new_name self.profiles_current = profiles - self.profiles_current.writeCurrentStatus(file = self.fileGACODE) + self.profiles_current.write_state(file = self.fileGACODE) - self.PORTALSparameters = PORTALSparameters - self.MODELparameters = MODELparameters + self.portals_parameters = portals_parameters + self.portals_namelist_location = portals_namelist_location self.optimization_options = optimization_options - self.INITparameters = INITparameters + self.initialization_parameters = initialization_parameters - self.additional_params_in_surrogate = additional_params_in_surrogate - - self.exploration_ranges = exploration_ranges self.use_previous_surrogate_data = use_previous_surrogate_data self.change_last_radial_call = change_last_radial_call @@ -87,14 +86,23 @@ def run(self, **kwargs): cold_start = kwargs.get('cold_start', False) - portals_fun = PORTALSmain.portals(self.folder, additional_params_in_surrogate = self.additional_params_in_surrogate) + # Read the namelist if explicitly given in the MAESTRO namelist (variable: portals_namelist_location) + portals_fun = PORTALSmain.portals(self.folder, portals_namelist = self.portals_namelist_location) - modify_dictionary(portals_fun.PORTALSparameters, self.PORTALSparameters) - modify_dictionary(portals_fun.MODELparameters, self.MODELparameters) - modify_dictionary(portals_fun.optimization_options, self.optimization_options) - modify_dictionary(portals_fun.INITparameters, self.INITparameters) + # Update the namelist with the parameters in the MAESTRO namelist (variable: portals_parameters) + portals_fun.portals_parameters = IOtools.deep_dict_update(portals_fun.portals_parameters, self.portals_parameters) + if 'optimization_options' in self.portals_parameters: + portals_fun.portals_parameters['optimization_options'] = portals_fun.optimization_options = IOtools.deep_dict_update(portals_fun.optimization_options, self.portals_parameters['optimization_options']) + + # MAESTRO beat may receive optimization options changes from previous beats, so allow that too + portals_fun.portals_parameters['optimization_options'] = portals_fun.optimization_options = IOtools.deep_dict_update(portals_fun.optimization_options, self.optimization_options) - portals_fun.prep(self.fileGACODE,askQuestions=False,**self.exploration_ranges) + # Initialization now happens by the user + from mitim_tools.gacode_tools.PROFILEStools import gacode_state + p = gacode_state(self.fileGACODE) + p.correct(options=self.initialization_parameters) + + portals_fun.prep(p,askQuestions=False) self.mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, seed = self.maestro_instance.master_seed, cold_start = cold_start, askQuestions = False) @@ -107,7 +115,7 @@ def run(self, **kwargs): if len(self.mitim_bo.optimization_data.data) == 0: self._flux_match_for_first_point() - portals_fun.prep(self.fileGACODE,askQuestions=False,**self.exploration_ranges) + portals_fun.prep(self.fileGACODE,askQuestions=False) self.mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, seed=self.maestro_instance.master_seed,cold_start = cold_start, askQuestions = False) @@ -115,7 +123,7 @@ def run(self, **kwargs): def _flux_match_for_first_point(self): - print('\t- Running flux match for first point') + print('\n\t- Running flux match for first point') # Flux-match first folder_fm = self.folder / 'flux_match' @@ -123,7 +131,13 @@ def _flux_match_for_first_point(self): portals = PORTALSanalysis.PORTALSanalyzer.from_folder(self.folder_starting_point) p = portals.powerstates[portals.ibest].profiles - _ = PORTALSoptimization.flux_match_surrogate(portals.step,p,file_write_csv=folder_fm / 'optimization_data.csv') + _ = PORTALSoptimization.flux_match_surrogate( + portals.step, + p, + target_options_use = self.mitim_bo.optimization_object.powerstate.target_options, # Use the target_options of the new run, not the old one (which may be with fixed targets if soft) + file_write_csv=folder_fm / 'optimization_data.csv' + ) + # Move files (self.folder / 'Outputs').mkdir(parents=True, exist_ok=True) @@ -155,7 +169,7 @@ def finalize(self, **kwargs): print('\t\t- PORTALS probably converged in training, so analyzing a bit differently') self.profiles_output = portals_output.profiles[portals_output.opt_fun_full.res.best_absolute_index] - self.profiles_output.writeCurrentStatus(file=self.folder_output / 'input.gacode') + self.profiles_output.write_state(file=self.folder_output / 'input.gacode') def merge_parameters(self): ''' @@ -172,7 +186,7 @@ def merge_parameters(self): ''' # Write the pre-merge input.gacode before modifying it - self.profiles_output.writeCurrentStatus(file=self.folder_output / 'input.gacode_pre_merge') + self.profiles_output.write_state(file=self.folder_output / 'input.gacode_pre_merge') # First, bring back to the resolution of the frozen p_frozen = self.maestro_instance.profiles_with_engineering_parameters @@ -220,18 +234,19 @@ def merge_parameters(self): # Insert powers opt_fun = PORTALSanalysis.PORTALSanalyzer.from_folder(self.folder) - if opt_fun.MODELparameters['Physics_options']["TypeTarget"] > 1: - # Insert exchange + if 'qie' in opt_fun.portals_parameters['target']['options']['targets_evolve']: self.profiles_output.profiles['qei(MW/m^3)'] = profiles_portals_out.profiles['qei(MW/m^3)'] - if opt_fun.MODELparameters['Physics_options']["TypeTarget"] > 2: - # Insert radiation and fusion - for key in ['qbrem(MW/m^3)', 'qsync(MW/m^3)', 'qline(MW/m^3)', 'qfuse(MW/m^3)', 'qfusi(MW/m^3)']: - self.profiles_output.profiles[key] = profiles_portals_out.profiles[key] + if 'qrad' in opt_fun.portals_parameters['target']['options']['targets_evolve']: + for key in ['qbrem(MW/m^3)', 'qsync(MW/m^3)', 'qline(MW/m^3)']: + self.profiles_output.profiles[key] = profiles_portals_out.profiles[key] + if 'qfus' in opt_fun.portals_parameters['target']['options']['targets_evolve']: + for key in ['qfuse(MW/m^3)', 'qfusi(MW/m^3)']: + self.profiles_output.profiles[key] = profiles_portals_out.profiles[key] # -------------------------------------------------------------------------------------------- # Write to final input.gacode - self.profiles_output.deriveQuantities() - self.profiles_output.writeCurrentStatus(file=self.folder_output / 'input.gacode') + self.profiles_output.derive_quantities() + self.profiles_output.write_state(file=self.folder_output / 'input.gacode') def grab_output(self, full = False): @@ -241,7 +256,7 @@ def grab_output(self, full = False): opt_fun = STRATEGYtools.opt_evaluator(folder) if full else PORTALSanalysis.PORTALSanalyzer.from_folder(folder) - profiles = PROFILEStools.PROFILES_GACODE(self.folder_output / 'input.gacode') if isitfinished else None + profiles = PROFILEStools.gacode_state(self.folder_output / 'input.gacode') if isitfinished else None return opt_fun, profiles @@ -299,7 +314,7 @@ def _inform(self, use_previous_residual = True, use_previous_surrogate_data = Tr last_radial_location_moved = False if change_last_radial_call and ('rhotop' in self.maestro_instance.parameters_trans_beat): - if 'RoaLocations' in self.MODELparameters: + if 'predicted_roa' in self.portals_parameters['solution']: print('\t\t- Using EPED pedestal top rho to select last radial location of PORTALS (in r/a)') @@ -311,40 +326,40 @@ def _inform(self, use_previous_residual = True, use_previous_surrogate_data = Tr #roatop = roatop.round(3) # set the last value of the radial locations to the interpolated value - roatop_old = copy.deepcopy(self.MODELparameters["RoaLocations"][-1]) - self.MODELparameters["RoaLocations"][-1] = roatop - print(f'\t\t\t* Last radial location moved from r/a = {roatop_old} to {self.MODELparameters["RoaLocations"][-1]}') - print(f'\t\t\t* RoaLocations: {self.MODELparameters["RoaLocations"]}') + roatop_old = copy.deepcopy(self.portals_parameters['solution']["predicted_roa"][-1]) + self.portals_parameters['solution']["predicted_roa"][-1] = roatop + print(f'\t\t\t* Last radial location moved from r/a = {roatop_old} to {self.portals_parameters["solution"]["predicted_roa"][-1]}') + print(f'\t\t\t* predicted_roa: {self.portals_parameters["solution"]["predicted_roa"]}') - strKeys = 'RoaLocations' + strKeys = 'predicted_roa' else: print('\t\t- Using EPED pedestal top rho to select last radial location of PORTALS (in rho)') # set the last value of the radial locations to the interpolated value - rhotop_old = copy.deepcopy(self.MODELparameters["RhoLocations"][-1]) - self.MODELparameters["RhoLocations"][-1] = self.maestro_instance.parameters_trans_beat['rhotop'] - print(f'\t\t\t* Last radial location moved from rho = {rhotop_old} to {self.MODELparameters["RhoLocations"][-1]}') + rhotop_old = copy.deepcopy(self.portals_parameters['solution']['predicted_rho'][-1]) + self.portals_parameters['solution']['predicted_rho'][-1] = self.maestro_instance.parameters_trans_beat['rhotop'] + print(f'\t\t\t* Last radial location moved from rho = {rhotop_old} to {self.portals_parameters["solution"]["predicted_rho"][-1]}') - strKeys = 'RhoLocations' + strKeys = 'predicted_rho' last_radial_location_moved = True # Check if I changed it previously and it hasn't moved if strKeys in self.maestro_instance.parameters_trans_beat: print(f'\t\t\t* {strKeys} in previous PORTALS beat: {self.maestro_instance.parameters_trans_beat[strKeys]}') - print(f'\t\t\t* {strKeys} in current PORTALS beat: {self.MODELparameters[strKeys]}') + print(f'\t\t\t* {strKeys} in current PORTALS beat: {self.portals_parameters["solution"][strKeys]}') - if abs(self.MODELparameters[strKeys][-1]-self.maestro_instance.parameters_trans_beat[strKeys][-1]) / self.maestro_instance.parameters_trans_beat[strKeys][-1] < minimum_relative_change_in_x: + if abs(self.portals_parameters['solution'][strKeys][-1]-self.maestro_instance.parameters_trans_beat[strKeys][-1]) / self.maestro_instance.parameters_trans_beat[strKeys][-1] < minimum_relative_change_in_x: print('\t\t\t* Last radial location was not moved') last_radial_location_moved = False - self.MODELparameters[strKeys][-1] = self.maestro_instance.parameters_trans_beat[strKeys][-1] + self.portals_parameters['solution'][strKeys][-1] = self.maestro_instance.parameters_trans_beat[strKeys][-1] # In the situation where the last radial location moves, I cannot reuse that surrogate data if last_radial_location_moved and reusing_surrogate_data: print('\t\t- Last radial location was moved, so surrogate data will not be reused for that specific location') - self.optimization_options['surrogate_options']["extrapointsModelsAvoidContent"] = ['Tar',f'_{len(self.MODELparameters[strKeys])}'] + self.optimization_options['surrogate_options']["extrapointsModelsAvoidContent"] = ['_tar',f"_{len(self.portals_parameters['solution'][strKeys])}"] self.try_flux_match_only_for_first_point = False def _inform_save(self): @@ -357,41 +372,28 @@ def _inform_save(self): # Standard PORTALS output try: stepSettings = portals_output.step.stepSettings - MODELparameters = portals_output.MODELparameters + portals_parameters = portals_output.portals_parameters # Converged in training case except AttributeError: stepSettings = portals_output.opt_fun_full.mitim_model.stepSettings - MODELparameters =portals_output.opt_fun_full.mitim_model.optimization_object.MODELparameters + portals_parameters =portals_output.opt_fun_full.mitim_model.optimization_object.portals_parameters max_value_neg_residual = stepSettings['optimization_options']['convergence_options']['stopping_criteria_parameters']['maximum_value'] self.maestro_instance.parameters_trans_beat['portals_neg_residual_obj'] = max_value_neg_residual print(f'\t\t* Maximum value of negative residual saved for future beats: {max_value_neg_residual}') - fileTraining = stepSettings['folderOutputs'] / 'surrogate_data.csv' + fileTraining = self.folder / 'Outputs/' / 'surrogate_data.csv' self.maestro_instance.parameters_trans_beat['portals_last_run_folder'] = self.folder self.maestro_instance.parameters_trans_beat['portals_surrogate_data_file'] = fileTraining print(f'\t\t* Surrogate data saved for future beats: {IOtools.clipstr(fileTraining)}') - if 'RoaLocations' in MODELparameters: - self.maestro_instance.parameters_trans_beat['RoaLocations'] = MODELparameters['RoaLocations'] - print(f'\t\t* RoaLocations saved for future beats: {MODELparameters["RoaLocations"]}') - elif 'RhoLocations' in MODELparameters: - self.maestro_instance.parameters_trans_beat['RhoLocations'] = MODELparameters['RhoLocations'] - print(f'\t\t* RhoLocations saved for future beats: {MODELparameters["RhoLocations"]}') - - -def modify_dictionary(original, new): - for key in new: - # If something on the new dictionary is not in the original, add it - if key not in original: - original[key] = new[key] - # If it is a dictionary, go deeper - elif isinstance(new[key], dict): - modify_dictionary(original[key], new[key]) - # If it is not a dictionary, just replace the value - else: - original[key] = new[key] + if 'predicted_roa' in portals_parameters['solution']: + self.maestro_instance.parameters_trans_beat['predicted_roa'] = portals_parameters['solution']['predicted_roa'] + print(f'\t\t* predicted_roa saved for future beats: {portals_parameters["solution"]["predicted_roa"]}') + elif 'predicted_rho' in portals_parameters['solution']: + self.maestro_instance.parameters_trans_beat['predicted_rho'] = portals_parameters['solution']['predicted_rho'] + print(f'\t\t* predicted_rho saved for future beats: {portals_parameters["solution"]["predicted_rho"]}') # ----------------------------------------------------------------------------------------------------------------------- # Defaults to help MAESTRO @@ -414,9 +416,11 @@ def portals_beat_soft_criteria(portals_namelist): portals_namelist_soft['optimization_options']['convergence_options']["stopping_criteria_parameters"]["minimum_dvs_variation"] = [10, 3, 1.0] portals_namelist_soft['optimization_options']['convergence_options']["stopping_criteria_parameters"]["ricci_value"] = 0.15 - if 'MODELparameters' not in portals_namelist_soft: - portals_namelist_soft['MODELparameters'] = {} + if 'target' not in portals_namelist_soft["portals_parameters"]: + portals_namelist_soft["portals_parameters"]['target'] = {} + if 'options' not in portals_namelist_soft["portals_parameters"]['target']: + portals_namelist_soft["portals_parameters"]['target']['options'] = {} - portals_namelist_soft["MODELparameters"]["Physics_options"] = {"TypeTarget": 2} + portals_namelist_soft["portals_parameters"]["target"]["options"]["targets_evolve"] = ["qie"] return portals_namelist_soft diff --git a/src/mitim_modules/maestro/utils/TRANSPbeat.py b/src/mitim_modules/maestro/utils/TRANSPbeat.py index f0d6d395..bd873022 100644 --- a/src/mitim_modules/maestro/utils/TRANSPbeat.py +++ b/src/mitim_modules/maestro/utils/TRANSPbeat.py @@ -104,7 +104,7 @@ def prepare( self._additional_operations_add_initialization() # ICRF on - PichT_MW = self.profiles_current.derived['qRF_MWmiller'][-1] + PichT_MW = self.profiles_current.derived['qRF_MW'][-1] if freq_ICH is None: @@ -156,8 +156,15 @@ def finalize(self, force_auxiliary_heating_at_output = {'Pe': None, 'Pi': None}, print('\t\t- No TRANSP files in beat folder, assuming they may exist in the output folder (MAESTRO restart case)', typeMsg='w') # Find CDF name - files = [f for f in self.folder_output.iterdir() if f.is_file()] - cdf_prefix = next((file.stem for file in files if file.suffix.lower() == '.cdf'), None) + files = [f for f in self.folder.iterdir() if f.is_file()] + cdf_prefix = next( + (file.stem + for file in files + if file.suffix.lower() == ".cdf" # keep only .cdf files … + and not file.name.lower().endswith("ph.cdf")), # … but skip *.ph.cdf + None + ) + shutil.copy2(self.folder / f"{cdf_prefix}TR.DAT", self.folder_output / f"{self.shot}{self.runid}TR.DAT") shutil.copy2(self.folder / f"{cdf_prefix}.CDF", self.folder_output / f"{self.shot}{self.runid}.CDF") shutil.copy2(self.folder / f"{cdf_prefix}tr.log", self.folder_output / f"{self.shot}{self.runid}tr.log") @@ -174,18 +181,18 @@ def finalize(self, force_auxiliary_heating_at_output = {'Pe': None, 'Pi': None}, self._add_heating_profiles(force_auxiliary_heating_at_output) # Write profiles - self.profiles_output.writeCurrentStatus(file=self.folder_output / "input.gacode") + self.profiles_output.write_state(file=self.folder_output / "input.gacode") def _add_heating_profiles(self, force_auxiliary_heating_at_output = {'Pe': None, 'Pi': None}): ''' force_auxiliary_heating_at_output['Pe'] has the shaping function (takes rho) and the integrated value ''' - for key, pkey, ikey in zip(['Pe','Pi'], ['qrfe(MW/m^3)', 'qrfi(MW/m^3)'], ['qRFe_MWmiller', 'qRFi_MWmiller']): + for key, pkey, ikey in zip(['Pe','Pi'], ['qrfe(MW/m^3)', 'qrfi(MW/m^3)'], ['qRFe_MW', 'qRFi_MW']): if force_auxiliary_heating_at_output[key] is not None: self.profiles_output.profiles[pkey] = force_auxiliary_heating_at_output[key][0](self.profiles_output.profiles['rho(-)']) - self.profiles_output.deriveQuantities() + self.profiles_output.derive_quantities() self.profiles_output.profiles[pkey] = self.profiles_output.profiles[pkey] * force_auxiliary_heating_at_output[key][1]/self.profiles_output.derived[ikey][-1] def merge_parameters(self): @@ -206,7 +213,7 @@ def merge_parameters(self): # Write the pre-merge input.gacode before modifying it profiles_output_pre_merge = copy.deepcopy(self.profiles_output) - profiles_output_pre_merge.writeCurrentStatus(file=self.folder_output / 'input.gacode_pre_merge') + profiles_output_pre_merge.write_state(file=self.folder_output / 'input.gacode_pre_merge') # First, bring back to the resolution of the frozen p_frozen = self.maestro_instance.profiles_with_engineering_parameters @@ -230,14 +237,14 @@ def merge_parameters(self): self.profiles_output.profiles[key] = p_frozen.profiles[key] # Power scale - self.profiles_output.profiles['qrfe(MW/m^3)'] *= p_frozen.derived['qRF_MWmiller'][-1] / self.profiles_output.derived['qRF_MWmiller'][-1] - self.profiles_output.profiles['qrfi(MW/m^3)'] *= p_frozen.derived['qRF_MWmiller'][-1] / self.profiles_output.derived['qRF_MWmiller'][-1] + self.profiles_output.profiles['qrfe(MW/m^3)'] *= p_frozen.derived['qRF_MW'][-1] / self.profiles_output.derived['qRF_MW'][-1] + self.profiles_output.profiles['qrfi(MW/m^3)'] *= p_frozen.derived['qRF_MW'][-1] / self.profiles_output.derived['qRF_MW'][-1] # -------------------------------------------------------------------------------------------- # Write to final input.gacode - self.profiles_output.deriveQuantities() - self.profiles_output.writeCurrentStatus(file=self.folder_output / 'input.gacode') + self.profiles_output.derive_quantities() + self.profiles_output.write_state(file=self.folder_output / 'input.gacode') def grab_output(self): @@ -245,7 +252,7 @@ def grab_output(self): if isitfinished: c = CDFtools.transp_output(self.folder_output) - profiles = PROFILEStools.PROFILES_GACODE(self.folder_output / 'input.gacode') + profiles = PROFILEStools.gacode_state(self.folder_output / 'input.gacode') else: # Trying to see if there's an intermediate CDF in folder print('\t\t- Searching for intermediate CDF in folder') diff --git a/src/mitim_modules/portals/PORTALSmain.py b/src/mitim_modules/portals/PORTALSmain.py index 6fb6db59..d5cbee3f 100644 --- a/src/mitim_modules/portals/PORTALSmain.py +++ b/src/mitim_modules/portals/PORTALSmain.py @@ -1,317 +1,138 @@ import shutil import torch import copy +from collections import OrderedDict import numpy as np import dill as pickle_dill -from functools import partial from collections import OrderedDict from mitim_tools.misc_tools import IOtools from mitim_tools.gacode_tools import PROFILEStools -from mitim_tools.gacode_tools.utils import PORTALSinteraction from mitim_modules.portals import PORTALStools from mitim_modules.portals.utils import ( PORTALSinit, - PORTALSoptimization, PORTALSanalysis, ) -from mitim_modules.powertorch.physics import TRANSPORTtools, TARGETStools from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.opt_tools.utils import BOgraphics from mitim_tools.misc_tools.LOGtools import printMsg as print +from mitim_tools import __mitimroot__ from IPython import embed -""" -Reading analysis for PORTALS has more options than standard: --------------------------------------------------------------------------------------------------------- - Standard: - ************************************** - -1: Only improvement - 0: Only optimization_results - 1: 0 + Pickle - 2: 1 + Final redone in this machine - - PORTALS-specific: - ************************************** - 3: 1 + PORTALSplot metrics (only works if optimization_extra is provided or Execution exists) - 4: 3 + PORTALSplot expected (only works if optimization_extra is provided or Execution exists) - 5: 2 + 4 (only works if optimization_extra is provided or Execution exists) - - >2 will also plot profiles & gradients comparison (original, initial, best) -""" - -def default_namelist(optimization_options, CGYROrun=False): - """ - This is to be used after reading the namelist, so self.optimization_options should be completed with main defaults. - """ - - # Initialization - optimization_options["initialization_options"]["initial_training"] = 5 - optimization_options["initialization_options"]["initialization_fun"] = PORTALSoptimization.initialization_simple_relax - - # Strategy for stopping - optimization_options["convergence_options"]["maximum_iterations"] = 50 - optimization_options['convergence_options']['stopping_criteria'] = PORTALStools.stopping_criteria_portals - optimization_options['convergence_options']['stopping_criteria_parameters'] = { - "maximum_value": 5e-3, # Reducing residual by 200x is enough - "maximum_value_is_rel": True, - "minimum_dvs_variation": [10, 5, 0.1], # After iteration 10, Check if 5 consecutive DVs are varying less than 0.1% from the rest that has been evaluated - "ricci_value": 0.1, - "ricci_d0": 2.0, - "ricci_lambda": 0.5, - } - - optimization_options['acquisition_options']['relative_improvement_for_stopping'] = 1e-2 - - # Surrogate - optimization_options["surrogate_options"]["selectSurrogate"] = partial(PORTALStools.selectSurrogate, CGYROrun=CGYROrun) - - if CGYROrun: - # CGYRO runs should prioritize accuracy - optimization_options["acquisition_options"]["type"] = "posterior_mean" - optimization_options["acquisition_options"]["optimizers"] = ["root", "botorch", "ga"] - else: - # TGLF runs should prioritize speed - optimization_options["acquisition_options"]["type"] = "posterior_mean" # "noisy_logei_mc" - optimization_options["acquisition_options"]["optimizers"] = ["sr", "root"] #, "botorch"] - - return optimization_options - - class portals(STRATEGYtools.opt_evaluator): def __init__( self, folder, # Folder where the PORTALS workflow will be run - namelist=None, # If None, default namelist will be used. If not None, it will be read and used - tensor_opts = { + portals_namelist = None, + tensor_options = { "dtype": torch.double, "device": torch.device("cpu"), }, - CGYROrun=False, # If True, use CGYRO defaults for best optimization practices - portals_transformation_variables = None, # If None, use defaults for both main and trace - portals_transformation_variables_trace = None, - additional_params_in_surrogate = [] # Additional parameters to be used in the surrogate (e.g. ['q']) ): - ''' - Note that additional_params_in_surrogate They must exist in the plasma dictionary of the powerstate object - ''' - + print("\n-----------------------------------------------------------------------------------------") print("\t\t\t PORTALS class module") print("-----------------------------------------------------------------------------------------\n") - # Store folder, namelist. Read namelist - super().__init__( folder, - namelist=namelist, - tensor_opts=tensor_opts, - default_namelist_function=( - partial(default_namelist, CGYROrun=CGYROrun) - if (namelist is None) - else None - ), - ) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Default (please change to your desire after instancing the object) - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - self.potential_flags = {'INITparameters': [], 'MODELparameters': [], 'PORTALSparameters': []} - - """ - Parameters to initialize files - ------------------------------ - These parameters are used to initialize the input.gacode to work with, before any PORTALS workflow - ( passed directly to profiles.correct() ) - Bear in mind that this is not necessary, you provide an already ready-to-go input.gacode without the need - to run these corrections. - """ - - self.INITparameters = { - "recompute_ptot": True, # Recompute PTOT to match kinetic profiles (after removals) - "quasineutrality": False, # Make sure things are quasineutral by changing the *MAIN* ion (D,T or both) (after removals) - "removeIons": [], # Remove this ion from the input.gacode (if D,T,Z, eliminate T with [2]) - "removeFast": False, # Automatically detect which are fast ions and remove them - "FastIsThermal": False, # Do not remove fast, keep their diluiton effect but make them thermal - "sameDensityGradients": False, # Make all ion density gradients equal to electrons - "groupQIONE": False, - "ensurePostiveGamma": False, - "ensureMachNumber": None, - } - - for key in self.INITparameters.keys(): - self.potential_flags['INITparameters'].append(key) - - """ - Parameters to run the model - --------------------------- - The corrections are applied prior to each evaluation, so that things are consistent. - Here, do not include things that are not specific for a given iteration. Otherwise if they are general - changes to input.gacode, then that should go into INITparameters. - - if MODELparameters contains RoaLocations, use that instead of RhoLocations - """ - - self.MODELparameters = { - "RhoLocations": [0.3, 0.45, 0.6, 0.75, 0.9], - "RoaLocations": None, - "ProfilesPredicted": ["te", "ti", "ne"], # ['nZ','w0'] - "Physics_options": { - "TypeTarget": 3, - "TurbulentExchange": 0, # In PORTALS TGYRO evaluations, let's always calculate turbulent exchange, but NOT include it in targets! - "PtotType": 1, # In PORTALS TGYRO evaluations, let's always use the PTOT column (so control of that comes from the ap) - "GradientsType": 0, # In PORTALS TGYRO evaluations, we need to not recompute gradients - "InputType": 1, # In PORTALS TGYRO evaluations, we need to use exact profiles - }, - "applyCorrections": { - "Ti_thermals": True, # Keep all thermal ion temperatures equal to the main Ti - "ni_thermals": True, # Adjust for quasineutrality by modifying the thermal ion densities together with ne - "recompute_ptot": True, # Recompute PTOT to insert in input file each time - "Tfast_ratio": False, # Keep the ratio of Tfast/Te constant throughout the Te evolution - "ensureMachNumber": None, # Change w0 to match this Mach number when Ti varies - }, - "transport_model": {"turbulence":'TGLF',"TGLFsettings": 6, "extraOptionsTGLF": {}} - } - - for key in self.MODELparameters.keys(): - self.potential_flags['MODELparameters'].append(key) + tensor_options=tensor_options + ) - """ - Physics-informed parameters to fit surrogates - --------------------------------------------- - """ + # Read PORTALS namelist (if not provided, use default) + if portals_namelist is None: + self.portals_namelist = __mitimroot__ / "templates" / "namelist.portals.yaml" + print(f"\t- No PORTALS namelist provided, using default in {IOtools.clipstr(self.portals_namelist)}") + else: + self.portals_namelist = portals_namelist + print(f"\t- Using provided PORTALS namelist in {IOtools.clipstr(self.portals_namelist)}") + self.portals_parameters = IOtools.read_mitim_yaml(self.portals_namelist) - ( - portals_transformation_variables, - portals_transformation_variables_trace, - ) = PORTALStools.default_portals_transformation_variables(additional_params = additional_params_in_surrogate) + # Read optimization namelist (always the default, the values to be modified are in the portals one) + if self.portals_parameters["optimization_namelist_location"] is not None: + self.optimization_namelist = self.portals_parameters["optimization_namelist_location"] + else: + self.optimization_namelist = __mitimroot__ / "templates" / "namelist.optimization.yaml" + self.optimization_options = IOtools.read_mitim_yaml(self.optimization_namelist) - """ - Parameters to run PORTALS - ----------------------- - """ + # Apply the optimization options to the proper namelist and drop it from portals_parameters + if 'optimization_options' in self.portals_parameters: + self.optimization_options = IOtools.deep_dict_update(self.optimization_options, self.portals_parameters['optimization_options']) + del self.portals_parameters['optimization_options'] - # Selection of model - transport_evaluator = TRANSPORTtools.tgyro_model - targets_evaluator = TARGETStools.analytical_model - - self.PORTALSparameters = { - "percentError": [5,10,1], # (%) Error (std, in percent) of model evaluation [TGLF (treated as minimum if scan trick), NEO, TARGET] - "transport_evaluator": transport_evaluator, - "targets_evaluator": targets_evaluator, - "TargetCalc": "powerstate", # Method to calculate targets (tgyro or powerstate) - "launchEvaluationsAsSlurmJobs": True, # Launch each evaluation as a batch job (vs just comand line) - "useConvectiveFluxes": True, # If True, then convective flux for final metric (not fitting). If False, particle flux - "includeFastInQi": False, # If True, and fast ions have been included, in seprateNEO, sum fast - "useDiffusivities": False, # If True, use [chi_e,chi_i,D] instead of [Qe,Qi,Gamma] - "useFluxRatios": False, # If True, fit to [Qi,Qe/Qi,Ge/Qi] - "portals_transformation_variables": portals_transformation_variables, # Physics-informed parameters to fit surrogates - "portals_transformation_variables_trace": portals_transformation_variables_trace, # Physics-informed parameters to fit surrogates for trace impurities - "Qi_criterion_stable": 0.01, # For CGYRO runs, MW/m^2 of Qi below which the case is considered stable - "percentError_stable": 5.0, # (%) For CGYRO runs, minimum error based on target if case is considered stable - "forceZeroParticleFlux": False, # If True, ignore particle flux profile and assume zero for all radii - "surrogateForTurbExch": False, # Run turbulent exchange as surrogate? - "profiles_postprocessing_fun": None, # Function to post-process input.gacode only BEFORE passing to transport codes - "Pseudo_multipliers": [1.0]*5, # [Qe,Qi,Ge] multipliers to calculate pseudo - "ImpurityOfInterest": None, # Impurity to do flux-matching for if nZ enabled (name of first impurity instance AFTER postprocessing), e.g. "W" - "applyImpurityGammaTrick": True, # If True, fit model to GZ/nZ, valid on the trace limit - "UseOriginalImpurityConcentrationAsWeight": 1.0, # If not None, using UseOriginalImpurityConcentrationAsWeight/fZ_0 as scaling factor for GZ, where fZ_0 is the original impurity concentration on axis - "fImp_orig": 1.0, - "fineTargetsResolution": 20, # If not None, calculate targets with this radial resolution (defaults TargetCalc to powerstate) - "hardCodedCGYRO": None, # If not None, use this hard-coded CGYRO evaluation - "additional_params_in_surrogate": additional_params_in_surrogate, - "use_tglf_scan_trick": 0.02, # If not None, use TGLF scan trick to calculate TGLF errors with this maximum delta - "keep_full_model_folder": True, # If False, remove full model folder after evaluation, to avoid large folders (e.g. in MAESTRO runs) - "cores_per_tglf_instance": 1, # Number of cores to use per TGLF instance - } - - for key in self.PORTALSparameters.keys(): - self.potential_flags['PORTALSparameters'].append(key) + # Grab all the flags here in a way that, after changing the dictionary extenrally, I make sure it's the same flags as PORTALS expects + self.potential_flags = IOtools.deep_grab_flags_dict(self.portals_parameters) def prep( self, - fileGACODE, + mitim_state, cold_start=False, - ymax_rel=1.0, - ymin_rel=1.0, - limitsAreRelative=True, - dvs_fixed=None, - hardGradientLimits=None, - enforceFiniteTemperatureGradients=None, - define_ranges_from_profiles=None, - start_from_folder=None, - reevaluateTargets=0, seedInitial=None, askQuestions=True, - ModelOptions=None, ): - """ - Notes: - - ymax_rel (and ymin_rel) can be float (common for all radii, channels) or the dictionary directly, e.g.: - ymax_rel = { - 'te': [1.0, 0.5, 0.5, 0.5], - 'ti': [0.5, 0.5, 0.5, 0.5], - 'ne': [1.0, 0.5, 0.5, 0.5] - } - - enforceFiniteTemperatureGradients is used to be able to select ymin_rel = 2.0 for ne but ensure that te, ti is at, e.g., enforceFiniteTemperatureGradients = 0.95 - - start_from_folder is a folder from which to grab optimization_data and optimization_extra - (if used with reevaluateTargets>0, change targets by reevaluating with different parameters) - - seedInitial can be optionally give a seed to randomize the starting profile (useful for developing, paper writing) - """ + + # Grab exploration ranges + ymax = self.portals_parameters["solution"]["exploration_ranges"]["ymax"] + ymin = self.portals_parameters["solution"]["exploration_ranges"]["ymin"] + limits_are_relative = self.portals_parameters["solution"]["exploration_ranges"]["limits_are_relative"] + fixed_gradients = self.portals_parameters["solution"]["exploration_ranges"]["fixed_gradients"] + yminymax_atleast = self.portals_parameters["solution"]["exploration_ranges"]["yminymax_atleast"] + enforce_finite_aLT = self.portals_parameters["solution"]["exploration_ranges"]["enforce_finite_aLT"] + define_ranges_from_profiles = self.portals_parameters["solution"]["exploration_ranges"]["define_ranges_from_profiles"] + start_from_folder = self.portals_parameters["solution"]["exploration_ranges"]["start_from_folder"] + reevaluate_targets = self.portals_parameters["solution"]["exploration_ranges"]["reevaluate_targets"] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Make sure that options that are required by good behavior of PORTALS # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - key_rhos = self.check_flags() + print(">> PORTALS flags pre-check") + + # Check that I haven't added a deprecated variable that I expect some behavior from + IOtools.check_flags_mitim_namelist(self.portals_parameters, self.potential_flags, avoid = ["run", "read"], askQuestions=askQuestions) - # TO BE REMOVED IN FUTURE - if not isinstance(cold_start, bool): - raise Exception("cold_start must be a boolean") + key_rhos = "predicted_roa" if self.portals_parameters["solution"]["predicted_roa"] is not None else "predicted_rho" # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Initialization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - if IOtools.isfloat(ymax_rel): - ymax_rel0 = copy.deepcopy(ymax_rel) + if IOtools.isfloat(ymax): + ymax0 = copy.deepcopy(ymax) - ymax_rel = {} - for prof in self.MODELparameters["ProfilesPredicted"]: - ymax_rel[prof] = np.array( [ymax_rel0] * len(self.MODELparameters[key_rhos]) ) + ymax = {} + for prof in self.portals_parameters["solution"]["predicted_channels"]: + ymax[prof] = np.array( [ymax0] * len(self.portals_parameters["solution"][key_rhos]) ) - if IOtools.isfloat(ymin_rel): - ymin_rel0 = copy.deepcopy(ymin_rel) + if IOtools.isfloat(ymin): + ymin0 = copy.deepcopy(ymin) - ymin_rel = {} - for prof in self.MODELparameters["ProfilesPredicted"]: - ymin_rel[prof] = np.array( [ymin_rel0] * len(self.MODELparameters[key_rhos]) ) + ymin = {} + for prof in self.portals_parameters["solution"]["predicted_channels"]: + ymin[prof] = np.array( [ymin0] * len(self.portals_parameters["solution"][key_rhos]) ) - if enforceFiniteTemperatureGradients is not None: + if enforce_finite_aLT is not None: for prof in ['te', 'ti']: - if prof in ymin_rel: - ymin_rel[prof] = np.array(ymin_rel[prof]).clip(min=None,max=enforceFiniteTemperatureGradients) + if prof in ymin: + ymin[prof] = np.array(ymin[prof]).clip(min=None,max=enforce_finite_aLT) # Initialize print(">> PORTALS initalization module (START)", typeMsg="i") PORTALSinit.initializeProblem( self, self.folder, - fileGACODE, - self.INITparameters, - ymax_rel, - ymin_rel, + mitim_state, + ymax, + ymin, start_from_folder=start_from_folder, define_ranges_from_profiles=define_ranges_from_profiles, - dvs_fixed=dvs_fixed, - limitsAreRelative=limitsAreRelative, + fixed_gradients=fixed_gradients, + limits_are_relative=limits_are_relative, cold_start=cold_start, - hardGradientLimits=hardGradientLimits, - tensor_opts = self.tensor_opts, + yminymax_atleast=yminymax_atleast, + tensor_options = self.tensor_options, seedInitial=seedInitial, checkForSpecies=askQuestions, - ModelOptions=ModelOptions, ) print(">> PORTALS initalization module (END)", typeMsg="i") @@ -320,9 +141,7 @@ def prep( # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if start_from_folder is not None: - self.reuseTrainingTabular( - start_from_folder, self.folder, reevaluateTargets=reevaluateTargets - ) + self.reuseTrainingTabular(start_from_folder, self.folder, reevaluate_targets=reevaluate_targets) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Ignore targets in surrogate_data.csv @@ -337,11 +156,17 @@ def prep( else: print("\t- extrapointsModels already defined, not changing") + # Make a copy of the namelist that was imported to the folder + shutil.copy(self.portals_namelist, self.folder / "portals.namelist_original.yaml") + + # Write the parameters (after script modification) to a yaml namelist for tracking purposes + IOtools.write_mitim_yaml(self.portals_parameters, self.folder / "namelist.portals.yaml") + def _define_reuse_models(self): ''' The user can define a list of strings to avoid reusing surrogates. e.g. - 'Tar' to avoid reusing targets + '_tar' to avoid reusing targets '_5' to avoid reusing position 5 ''' @@ -349,7 +174,7 @@ def _define_reuse_models(self): # Define avoiders if self.optimization_options['surrogate_options']['extrapointsModelsAvoidContent'] is None: - self.optimization_options['surrogate_options']['extrapointsModelsAvoidContent'] = ['Tar'] + self.optimization_options['surrogate_options']['extrapointsModelsAvoidContent'] = ['_tar'] # Define extrapointsModels for key in self.surrogate_parameters['surrogate_transformation_variables_lasttime'].keys(): @@ -376,7 +201,7 @@ def run(self, paramsfile, resultsfile): name, numPORTALS=numPORTALS, dictOFs=dictOFs, - remove_folder_upon_completion=not self.PORTALSparameters["keep_full_model_folder"], + remove_folder_upon_completion=not self.portals_parameters["solution"]["keep_full_model_folder"], ) # Write results @@ -391,14 +216,10 @@ def run(self, paramsfile, resultsfile): # Extra operations: Store data that will be useful to store and interpret in a machine were this was not run if self.optimization_extra is not None: - dictStore = IOtools.unpickle_mitim(self.optimization_extra) #TODO: This will fail in future versions of torch + dictStore = IOtools.unpickle_mitim(self.optimization_extra) #TODO: This will fail in future versions of torch dictStore[int(numPORTALS)] = {"powerstate": powerstate} - dictStore["profiles_modified"] = PROFILEStools.PROFILES_GACODE( - self.folder / "Initialization" / "input.gacode_modified" - ) - dictStore["profiles_original"] = PROFILEStools.PROFILES_GACODE( - self.folder / "Initialization" / "input.gacode_original" - ) + dictStore["profiles_modified"] = PROFILEStools.gacode_state(self.folder / "Initialization" / "input.gacode_modified") + dictStore["profiles_original"] = PROFILEStools.gacode_state( self.folder / "Initialization" / "input.gacode_original") with open(self.optimization_extra, "wb") as handle: pickle_dill.dump(dictStore, handle, protocol=4) @@ -416,17 +237,16 @@ def scalarized_objective(self, Y): ------------------------------------------------------------------------- Prepare transport dictionary ------------------------------------------------------------------------- - Note: var_dict['QeTurb'] must have shape (dim1...N, num_radii) + Note: var_dict['Qe_tr_turb'] must have shape (dim1...N, num_radii) """ var_dict = {} for of in ofs_ordered_names: - var, _ = of.split("_") + + var = '_'.join(of.split("_")[:-1]) if var not in var_dict: var_dict[var] = torch.Tensor().to(Y) - var_dict[var] = torch.cat( - (var_dict[var], Y[..., ofs_ordered_names == of]), dim=-1 - ) + var_dict[var] = torch.cat((var_dict[var], Y[..., ofs_ordered_names == of]), dim=-1) """ ------------------------------------------------------------------------- @@ -436,7 +256,7 @@ def scalarized_objective(self, Y): res must have shape (dim1...N) """ - of, cal, _, res = PORTALSinteraction.calculate_residuals(self.powerstate, self.PORTALSparameters,specific_vars=var_dict) + of, cal, _, res = PORTALStools.calculate_residuals(self.powerstate, self.portals_parameters,specific_vars=var_dict) return of, cal, res @@ -449,51 +269,10 @@ def analyze_results(self, plotYN=True, fn=None, cold_start=False, analysis_level self, plotYN=plotYN, fn=fn, cold_start=cold_start, analysis_level=analysis_level ) - def check_flags(self): - - print(">> PORTALS flags pre-check") - - # Check that I haven't added a deprecated variable that I expect some behavior from - for key in self.potential_flags.keys(): - for flag in self.__dict__[key]: - if flag not in self.potential_flags[key]: - print( - f"\t- {key}['{flag}'] is an unexpected variable, prone to errors or misinterpretation", - typeMsg="q", - ) - # ---------------------------------------------------------------------------------- - - if self.PORTALSparameters["fineTargetsResolution"] is not None: - if self.PORTALSparameters["TargetCalc"] != "powerstate": - print("\t- Requested fineTargetsResolution, so running powerstate target calculations",typeMsg="w") - self.PORTALSparameters["TargetCalc"] = "powerstate" - - if not issubclass(self.PORTALSparameters["transport_evaluator"], TRANSPORTtools.tgyro_model) and (self.PORTALSparameters["TargetCalc"] == "tgyro"): - print("\t- Requested TGYRO targets, but transport evaluator is not tgyro, so changing to powerstate",typeMsg="w") - self.PORTALSparameters["TargetCalc"] = "powerstate" - - if ("InputType" not in self.MODELparameters["Physics_options"]) or self.MODELparameters["Physics_options"]["InputType"] != 1: - print("\t- In PORTALS TGYRO evaluations, we need to use exact profiles (InputType=1)",typeMsg="i") - self.MODELparameters["Physics_options"]["InputType"] = 1 - - if ("GradientsType" not in self.MODELparameters["Physics_options"]) or self.MODELparameters["Physics_options"]["GradientsType"] != 0: - print("\t- In PORTALS TGYRO evaluations, we need to not recompute gradients (GradientsType=0)",typeMsg="i") - self.MODELparameters["Physics_options"]["GradientsType"] = 0 - - if 'TargetType' in self.MODELparameters["Physics_options"]: - raise Exception("\t- TargetType is not used in PORTALS anymore") - - if self.PORTALSparameters["TargetCalc"] == "tgyro" and self.PORTALSparameters['profiles_postprocessing_fun'] is not None: - print("\t- Requested custom modification of postprocessing function but targets from tgyro... are you sure?",typeMsg="q") - - key_rhos = "RoaLocations" if self.MODELparameters["RoaLocations"] is not None else "RhoLocations" - - return key_rhos - def reuseTrainingTabular( - self, folderRead, folderNew, reevaluateTargets=0, cold_startIfExists=False): + self, folderRead, folderNew, reevaluate_targets=0, cold_startIfExists=False): """ - reevaluateTargets: + reevaluate_targets: 0: No 1: Quick targets from powerstate with no transport calculation 2: Full original model (either with transport model targets or powerstate targets, but also calculate transport) @@ -519,7 +298,7 @@ def reuseTrainingTabular( typeMsg="i", ) - if reevaluateTargets > 0: + if reevaluate_targets > 0: print("- Re-evaluate targets", typeMsg="i") (folderNew / "TargetsRecalculate").mkdir(parents=True, exist_ok=True) @@ -551,11 +330,9 @@ def reuseTrainingTabular( name = f"portals_{b}_targets_ev{numPORTALS}" self_copy = copy.deepcopy(self) - if reevaluateTargets == 1: - self_copy.powerstate.TransportOptions["transport_evaluator"] = None - self_copy.powerstate.TargetOptions["ModelOptions"]["TypeTarget"] = "powerstate" - else: - self_copy.powerstate.TransportOptions["transport_evaluator"] = TRANSPORTtools.tgyro_model + if reevaluate_targets == 1: + self_copy.powerstate.transport_options["evaluator"] = None + self_copy.powerstate.target_options["options"]["targets_evolve"] = "target_evaluator_method" _, dictOFs = runModelEvaluator( self_copy, @@ -571,7 +348,7 @@ def reuseTrainingTabular( # ------------------------------------------------------------------------------------ for i in dictOFs: - if "Tar" in i: + if "_tar" in i: print(f"Changing {i} in file") optimization_data.data[i].iloc[numPORTALS] = dictOFs[i]["value"].cpu().numpy().item() @@ -601,16 +378,16 @@ def runModelEvaluator( # Prep run # --------------------------------------------------------------------------------------------------- - folder_model = FolderEvaluation / "model_complete" + folder_model = FolderEvaluation / "transport_simulation_folder" folder_model.mkdir(parents=True, exist_ok=True) # --------------------------------------------------------------------------------------------------- # Prepare evaluating vector X # --------------------------------------------------------------------------------------------------- - X = torch.zeros(len(powerstate.ProfilesPredicted) * (powerstate.plasma["rho"].shape[1] - 1)).to(powerstate.dfT) + X = torch.zeros(len(powerstate.predicted_channels) * (powerstate.plasma["rho"].shape[1] - 1)).to(powerstate.dfT) cont = 0 - for ikey in powerstate.ProfilesPredicted: + for ikey in powerstate.predicted_channels: for ix in range(powerstate.plasma["rho"].shape[1] - 1): X[cont] = dictDVs[f"aL{ikey}_{ix+1}"]["value"] cont += 1 @@ -624,7 +401,7 @@ def runModelEvaluator( # --------------------------------------------------------------------------------------------------- # In certain cases, I want to cold_start the model directly from the PORTALS call instead of powerstate - powerstate.TransportOptions["ModelOptions"]["cold_start"] = cold_start + powerstate.transport_options["cold_start"] = cold_start # Evaluate X (DVs) through powerstate.calculate(). This will populate .plasma with the results powerstate.calculate(X, nameRun=name, folder=folder_model, evaluation_number=numPORTALS) @@ -646,41 +423,31 @@ def runModelEvaluator( return powerstate, dictOFs def map_powerstate_to_portals(powerstate, dictOFs): - """ - """ - for var in powerstate.ProfilesPredicted: + for var in powerstate.predicted_channels: # Write in OFs for i in range(powerstate.plasma["rho"].shape[1] - 1): # Ignore position 0, which is rho=0 if var == "te": - var0, var1 = "Qe", "Pe" + var0, var1 = "Qe", "QeMWm2" elif var == "ti": - var0, var1 = "Qi", "Pi" + var0, var1 = "Qi", "QiMWm2" elif var == "ne": var0, var1 = "Ge", "Ce" elif var == "nZ": var0, var1 = "GZ", "CZ" elif var == "w0": - var0, var1 = "Mt", "Mt" + var0, var1 = "Mt", "MtJm2" """ TRANSPORT calculation --------------------- """ - dictOFs[f"{var0}Turb_{i+1}"]["value"] = powerstate.plasma[ - f"{var1}_tr_turb" - ][0, i+1] - dictOFs[f"{var0}Turb_{i+1}"]["error"] = powerstate.plasma[ - f"{var1}_tr_turb_stds" - ][0, i+1] + dictOFs[f"{var0}_tr_turb_{i+1}"]["value"] = powerstate.plasma[f"{var1}_tr_turb"][0, i+1] + dictOFs[f"{var0}_tr_turb_{i+1}"]["error"] = powerstate.plasma[f"{var1}_tr_turb_stds"][0, i+1] - dictOFs[f"{var0}Neo_{i+1}"]["value"] = powerstate.plasma[ - f"{var1}_tr_neo" - ][0, i+1] - dictOFs[f"{var0}Neo_{i+1}"]["error"] = powerstate.plasma[ - f"{var1}_tr_neo_stds" - ][0, i+1] + dictOFs[f"{var0}_tr_neoc_{i+1}"]["value"] = powerstate.plasma[f"{var1}_tr_neoc"][0, i+1] + dictOFs[f"{var0}_tr_neoc_{i+1}"]["error"] = powerstate.plasma[f"{var1}_tr_neoc_stds"][0, i+1] """ TARGET calculation @@ -688,25 +455,17 @@ def map_powerstate_to_portals(powerstate, dictOFs): If that radius & profile position has target, evaluate """ - dictOFs[f"{var0}Tar_{i+1}"]["value"] = powerstate.plasma[f"{var1}"][ - 0, i+1 - ] - dictOFs[f"{var0}Tar_{i+1}"]["error"] = powerstate.plasma[ - f"{var1}_stds" - ][0, i+1] + dictOFs[f"{var0}_tar_{i+1}"]["value"] = powerstate.plasma[f"{var1}"][0, i+1] + dictOFs[f"{var0}_tar_{i+1}"]["error"] = powerstate.plasma[f"{var1}_stds"][0, i+1] """ Turbulent Exchange ------------------ """ - if 'PexchTurb_1' in dictOFs: + if 'Qie_tr_turb_1' in dictOFs: for i in range(powerstate.plasma["rho"].shape[1] - 1): - dictOFs[f"PexchTurb_{i+1}"]["value"] = powerstate.plasma["PexchTurb"][ - 0, i+1 - ] - dictOFs[f"PexchTurb_{i+1}"]["error"] = powerstate.plasma[ - "PexchTurb_stds" - ][0, i+1] + dictOFs[f"Qie_tr_turb_{i+1}"]["value"] = powerstate.plasma["QieMWm3_tr_turb"][0, i+1] + dictOFs[f"Qie_tr_turb_{i+1}"]["error"] = powerstate.plasma["QieMWm3_tr_turb_stds"][0, i+1] return dictOFs diff --git a/src/mitim_modules/portals/PORTALStools.py b/src/mitim_modules/portals/PORTALStools.py index 310cac4e..30c8b2a9 100644 --- a/src/mitim_modules/portals/PORTALStools.py +++ b/src/mitim_modules/portals/PORTALStools.py @@ -2,29 +2,28 @@ import gpytorch import copy import numpy as np +from collections import OrderedDict from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.misc_tools import PLASMAtools -from collections import OrderedDict from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -def selectSurrogate(output, surrogate_options, CGYROrun=False): +def surrogate_selection_portals(output, surrogate_options): print(f'\t- Selecting surrogate options for "{output}" to be run') if output is not None: # If it's a target, just linear - if output[2:5] == "Tar": + if output[3:6] == "tar": surrogate_options["TypeMean"] = 1 surrogate_options["TypeKernel"] = 2 # Constant kernel - # If it's not, stndard + # If it's not, standard case for fluxes else: surrogate_options["TypeMean"] = 2 # Linear in gradients, constant in rest surrogate_options["TypeKernel"] = 1 # RBF - # surrogate_options['ExtraNoise'] = True surrogate_options["additional_constraints"] = { - 'lenghtscale_constraint': gpytorch.constraints.constraints.GreaterThan(0.01) # inputs normalized to [0,1], this is 1% lengthscale + 'lenghtscale_constraint': gpytorch.constraints.constraints.GreaterThan(0.05) # inputs normalized to [0,1], this is 5% lengthscale } return surrogate_options @@ -94,7 +93,7 @@ def default_portals_transformation_variables(additional_params = []): return portals_transformation_variables, portals_transformation_variables_trace -def produceNewInputs(Xorig, output, surrogate_parameters, surrogate_transformation_variables): +def input_transform_portals(Xorig, output, surrogate_parameters, surrogate_transformation_variables): """ - Xorig will be a tensor (batch1...N,dim) unnormalized (with or without gradients). @@ -125,7 +124,7 @@ def produceNewInputs(Xorig, output, surrogate_parameters, surrogate_transformati initialize it with a larger batch """ - _, num = output.split("_") + num = output.split("_")[-1] index = powerstate.indexes_simulation[int(num)] # num=1 -> pos=1, so that it takes the second value in vectors xFit = torch.Tensor().to(X) @@ -146,13 +145,11 @@ def produceNewInputs(Xorig, output, surrogate_parameters, surrogate_transformati return xFit, parameters_combined - # ---------------------------------------------------------------------- # Transformation of Outputs # ---------------------------------------------------------------------- - -def transformPORTALS(X, surrogate_parameters, output): +def output_transform_portals(X, surrogate_parameters, output): """ 1. Make sure all batches are squeezed into a single dimension ------------------------------------------------------------------ @@ -172,12 +169,10 @@ def transformPORTALS(X, surrogate_parameters, output): # --- Original model output is in real units, transform to GB here b/c that's how GK codes work factorGB = GBfromXnorm(X, output, powerstate) - # --- Ratio of fluxes (quasilinear) - factorRat = ratioFactor(X, surrogate_parameters, output, powerstate) # --- Specific to output factorImp = ImpurityGammaTrick(X, surrogate_parameters, output, powerstate) - compounded = factorGB * factorRat * factorImp + compounded = factorGB * factorImp """ 3. Go back to the original batching system @@ -190,7 +185,7 @@ def transformPORTALS(X, surrogate_parameters, output): return compounded -def computeTurbExchangeIndividual(PexchTurb, powerstate): +def computeTurbExchangeIndividual(QieMWm3_tr_turb, powerstate): """ Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added """ @@ -200,34 +195,34 @@ def computeTurbExchangeIndividual(PexchTurb, powerstate): ------------------------------------------------------------------ E.g.: (batch1,batch2,batch3,dimR) -> (batch1*batch2*batch3,dimR) """ - shape_orig = np.array(PexchTurb.shape) - PexchTurb = PexchTurb.view(np.prod(shape_orig[:-1]), shape_orig[-1]) + shape_orig = np.array(QieMWm3_tr_turb.shape) + QieMWm3_tr_turb = QieMWm3_tr_turb.view(np.prod(shape_orig[:-1]), shape_orig[-1]) """ 2. Integrate ------------------------------------------------------------------------ qExch is in MW/m^3 - powerstate.volume_integrate produces in MW/m^2 + powerstate.from_density_to_flux produces in MW/m^2 """ # Add zeros at zero - qExch = torch.cat((torch.zeros(PexchTurb.shape).to(PexchTurb)[..., :1], PexchTurb), dim=-1) + qExch = torch.cat((torch.zeros(QieMWm3_tr_turb.shape).to(QieMWm3_tr_turb)[..., :1], QieMWm3_tr_turb), dim=-1) - PexchTurb_integrated = powerstate.volume_integrate(qExch, force_dim=qExch.shape[0])[..., 1:] + QieMWm2_tr_turb = powerstate.from_density_to_flux(qExch, force_dim=qExch.shape[0])[..., 1:] """ 3. Go back to the original batching system ------------------------------------------------------------------------ E.g.: (batch1*batch2*batch3,dimR) -> (batch1,batch2,batch3,dimR) """ - PexchTurb_integrated = PexchTurb_integrated.view(tuple(shape_orig)) + QieMWm2_tr_turb = QieMWm2_tr_turb.view(tuple(shape_orig)) - return PexchTurb_integrated + return QieMWm2_tr_turb def GBfromXnorm(x, output, powerstate): # Decide, depending on the output here, which to use as normalization and at what location - varFull = output.split("_")[0] - pos = int(output.split("_")[1]) + varFull = '_'.join(output.split("_")[:-1]) + pos = int(output.split("_")[-1]) # Select GB unit if varFull[:2] == "Qe": @@ -237,9 +232,9 @@ def GBfromXnorm(x, output, powerstate): elif varFull[:2] == "Mt": quantity = "Pgb" elif varFull[:2] == "Ge": - quantity = "Ggb" if (not powerstate.useConvectiveFluxes) else "Qgb_convection" + quantity = "Qgb_convection" elif varFull[:2] == "GZ": - quantity = "Ggb" if (not powerstate.useConvectiveFluxes) else "Qgb_convection" + quantity = "Qgb_convection" elif varFull[:5] == "Pexch": quantity = "Sgb" @@ -253,9 +248,9 @@ def ImpurityGammaTrick(x, surrogate_parameters, output, powerstate): Trick to make GZ a function of a/Lnz only (flux as GammaZ_hat = GammaZ /nZ ) """ - pos = int(output.split("_")[1]) + pos = int(output.split("_")[-1]) - if ("GZ" in output) and surrogate_parameters["applyImpurityGammaTrick"]: + if ("GZ" in output) and surrogate_parameters["impurity_trick"]: factor = powerstate.plasma["ni"][: x.shape[0],powerstate.indexes_simulation[pos],powerstate.impurityPosition].unsqueeze(-1) else: @@ -263,58 +258,6 @@ def ImpurityGammaTrick(x, surrogate_parameters, output, powerstate): return factor - -def ratioFactor(X, surrogate_parameters, output, powerstate): - """ - This defines the vector to divide by. - - THIS IS BROKEN RIGHT NOW - """ - - v = torch.ones(tuple(X.shape[:-1]) + (1,)).to(X) - - # """ - # Apply diffusivities (not real value, just capturing dependencies, - # work on normalization, like e_J). Or maybe calculate gradients within powerstate - # Remember that for Ti I'm using ne... - # """ - # if surrogate_parameters["useDiffusivities"]: - # pos = int(output.split("_")[-1]) - # var = output.split("_")[0] - - # if var == "te": - # grad = x[:, i] * ( - # powerstate.plasma["te"][:, powerstate.indexes_simulation[pos]] - # / powerstate.plasma["a"] - # ) # keV/m - # v[:] = grad * powerstate.plasma["ne"][:, powerstate.indexes_simulation[pos]] - - # if var == "ti": - # grad = x[:, i] * ( - # powerstate.plasma["ti"][:, powerstate.indexes_simulation[pos]] - # / powerstate.plasma["a"] - # ) # keV/m - # v[:] = grad * powerstate.plasma["ne"][:, powerstate.indexes_simulation[pos]] - - # # if var == 'ne': - # # grad = x[:,i] * ( powerstate.plasma['ne'][:,pos]/powerstate.plasma['a']) # keV/m - # # v[:] = grad - - # """ - # Apply flux ratios - # For example [1,Qi,Qi] means I will fit to [Qi, Qe/Qi, Ge/Qi] - # """ - - # if surrogate_parameters["useFluxRatios"]: - # """ - # Not ready yet... since my code is not dealing with other outputs at a time so - # I don't know Qi if I'm evaluating other fluxes... - # """ - # pass - - return v - - def constructEvaluationProfiles(X, surrogate_parameters, recalculateTargets=False): """ Prepare powerstate for another evaluation with batches @@ -348,7 +291,7 @@ def constructEvaluationProfiles(X, surrogate_parameters, recalculateTargets=Fals # Obtain modified profiles CPs = torch.zeros((X.shape[0], num_x + 1)).to(X) - for iprof, var in enumerate(powerstate.ProfilesPredicted): + for iprof, var in enumerate(powerstate.predicted_channels): # Specific part of the input vector that deals with this profile and introduce to CP vector (that starts with 0,0) CPs[:, 1:] = X[:, (iprof * num_x) : (iprof * num_x) + num_x] @@ -360,7 +303,7 @@ def constructEvaluationProfiles(X, surrogate_parameters, recalculateTargets=Fals # Targets only if needed (for speed, GB doesn't need it) if recalculateTargets: - powerstate.TargetOptions["ModelOptions"]["TargetCalc"] = "powerstate" # For surrogate evaluation, always powerstate, logically. + powerstate.target_options["options"]["target_evaluator_method"] = "powerstate" # For surrogate evaluation, always powerstate, logically. powerstate.calculateTargets() return powerstate @@ -406,3 +349,238 @@ def stopping_criteria_portals(mitim_bo, parameters = {}): else: print("\t- No convergence yet, providing as iteration values the scalarized objective") return False, yvals + + + +def calculate_residuals(powerstate, portals_parameters, specific_vars=None): + """ + Notes + ----- + - Works with tensors + - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs + """ + + # Case where I have already constructed the dictionary (i.e. in scalarized objective) + if specific_vars is not None: + var_dict = specific_vars + # Prepare dictionary from powerstate (for use in Analysis) + else: + var_dict = {} + + mapper = { + "Qe_tr_turb": "QeMWm2_tr_turb", + "Qi_tr_turb": "QiMWm2_tr_turb", + "Ge_tr_turb": "Ce_tr_turb", + "GZ_tr_turb": "CZ_tr_turb", + "Mt_tr_turb": "MtJm2_tr_turb", + "Qe_tr_neoc": "QeMWm2_tr_neoc", + "Qi_tr_neoc": "QiMWm2_tr_neoc", + "Ge_tr_neoc": "Ce_tr_neoc", + "GZ_tr_neoc": "CZ_tr_neoc", + "Mt_tr_neoc": "MtJm2_tr_neoc", + "Qe_tar": "QeMWm2", + "Qi_tar": "QiMWm2", + "Ge_tar": "Ce", + "GZ_tar": "CZ", + "Mt_tar": "MtJm2", + "Qie_tr_turb": "QieMWm3_tr_turb" + } + + for ikey in mapper: + var_dict[ikey] = powerstate.plasma[mapper[ikey]][..., 1:] + if mapper[ikey] + "_stds" in powerstate.plasma: + var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][..., 1:] + else: + var_dict[ikey + "_stds"] = None + + dfT = list(var_dict.values())[0] # as a reference for sizes + + # ------------------------------------------------------------------------- + # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added + # ------------------------------------------------------------------------- + + if portals_parameters["solution"]["turbulent_exchange_as_surrogate"]: + QieMWm2_tr_turb = computeTurbExchangeIndividual(var_dict["Qie_tr_turb"], powerstate) + else: + QieMWm2_tr_turb = torch.zeros(dfT.shape).to(dfT) + + # ------------------------------------------------------------------------ + # Go through each profile that needs to be predicted, calculate components + # ------------------------------------------------------------------------ + + of, cal, res = ( + torch.Tensor().to(dfT), + torch.Tensor().to(dfT), + torch.Tensor().to(dfT), + ) + for prof in powerstate.predicted_channels: + if prof == "te": + var = "Qe" + elif prof == "ti": + var = "Qi" + elif prof == "ne": + var = "Ge" + elif prof == "nZ": + var = "GZ" + elif prof == "w0": + var = "Mt" + + """ + ----------------------------------------------------------------------------------- + Transport (_tr_turb+_tr_neoc) + ----------------------------------------------------------------------------------- + """ + of0 = var_dict[f"{var}_tr_turb"] + var_dict[f"{var}_tr_neoc"] + + """ + ----------------------------------------------------------------------------------- + Target (Sum here the turbulent exchange power) + ----------------------------------------------------------------------------------- + """ + if var == "Qe": + cal0 = var_dict[f"{var}_tar"] + QieMWm2_tr_turb + elif var == "Qi": + cal0 = var_dict[f"{var}_tar"] - QieMWm2_tr_turb + else: + cal0 = var_dict[f"{var}_tar"] + + """ + ----------------------------------------------------------------------------------- + Ad-hoc modifications for different weighting + ----------------------------------------------------------------------------------- + """ + + if var == "Qe": + of0, cal0 = ( + of0 * portals_parameters["solution"]["scalar_multipliers"][0], + cal0 * portals_parameters["solution"]["scalar_multipliers"][0], + ) + elif var == "Qi": + of0, cal0 = ( + of0 * portals_parameters["solution"]["scalar_multipliers"][1], + cal0 * portals_parameters["solution"]["scalar_multipliers"][1], + ) + elif var == "Ge": + of0, cal0 = ( + of0 * portals_parameters["solution"]["scalar_multipliers"][2], + cal0 * portals_parameters["solution"]["scalar_multipliers"][2], + ) + elif var == "GZ": + of0, cal0 = ( + of0 * portals_parameters["solution"]["scalar_multipliers"][3], + cal0 * portals_parameters["solution"]["scalar_multipliers"][3], + ) + elif var == "MtJm2": + of0, cal0 = ( + of0 * portals_parameters["solution"]["scalar_multipliers"][4], + cal0 * portals_parameters["solution"]["scalar_multipliers"][4], + ) + + of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) + + # ----------- + # Composition + # ----------- + + # Source term is (TARGET - TRANSPORT) + source = cal - of + + # Residual is defined as the negative (bc it's maximization) normalized (1/N) norm of radial & channel residuals -> L2 + res = -1 / source.shape[-1] * torch.norm(source, p=2, dim=-1) + + return of, cal, source, res + + +def calculate_residuals_distributions(powerstate, portals_parameters): + """ + - Works with tensors + - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs + """ + + # Prepare dictionary from powerstate (for use in Analysis) + + mapper = { + "Qe_tr_turb": "QeMWm2_tr_turb", + "Qi_tr_turb": "QiMWm2_tr_turb", + "Ge_tr_turb": "Ce_tr_turb", + "GZ_tr_turb": "CZ_tr_turb", + "Mt_tr_turb": "MtJm2_tr_turb", + "Qe_tr_neoc": "QeMWm2_tr_neoc", + "Qi_tr_neoc": "QiMWm2_tr_neoc", + "Ge_tr_neoc": "Ce_tr_neoc", + "GZ_tr_neoc": "CZ_tr_neoc", + "Mt_tr_neoc": "MtJm2_tr_neoc", + "Qe_tar": "QeMWm2", + "Qi_tar": "QiMWm2", + "Ge_tar": "Ce", + "GZ_tar": "CZ", + "Mt_tar": "MtJm2", + "Qie_tr_turb": "QieMWm3_tr_turb" + } + + var_dict = {} + for ikey in mapper: + var_dict[ikey] = powerstate.plasma[mapper[ikey]][:, 1:] + if mapper[ikey] + "_stds" in powerstate.plasma: + var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][:, 1:] + else: + var_dict[ikey + "_stds"] = None + + dfT = var_dict["Qe_tr_turb"] # as a reference for sizes + + # ------------------------------------------------------------------------- + # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added + # ------------------------------------------------------------------------- + + if portals_parameters["solution"]["turbulent_exchange_as_surrogate"]: + QieMWm2_tr_turb = computeTurbExchangeIndividual(var_dict["Qie_tr_turb"], powerstate) + QieMWm2_tr_turb_stds = computeTurbExchangeIndividual(var_dict["Qie_tr_turb_stds"], powerstate) + else: + QieMWm2_tr_turb = torch.zeros(dfT.shape).to(dfT) + QieMWm2_tr_turb_stds = torch.zeros(dfT.shape).to(dfT) + + # ------------------------------------------------------------------------ + # Go through each profile that needs to be predicted, calculate components + # ------------------------------------------------------------------------ + + of, cal = torch.Tensor().to(dfT), torch.Tensor().to(dfT) + ofE, calE = torch.Tensor().to(dfT), torch.Tensor().to(dfT) + for prof in powerstate.predicted_channels: + if prof == "te": + var = "Qe" + elif prof == "ti": + var = "Qi" + elif prof == "ne": + var = "Ge" + elif prof == "nZ": + var = "GZ" + elif prof == "w0": + var = "Mt" + + """ + ----------------------------------------------------------------------------------- + Transport (_tr_turb+_tr_neoc) + ----------------------------------------------------------------------------------- + """ + of0 = var_dict[f"{var}_tr_turb"] + var_dict[f"{var}_tr_neoc"] + of0E = (var_dict[f"{var}_tr_turb_stds"] ** 2 + var_dict[f"{var}_tr_neoc_stds"] ** 2) ** 0.5 + + """ + ----------------------------------------------------------------------------------- + Target (Sum here the turbulent exchange power) + ----------------------------------------------------------------------------------- + """ + if var == "Qe": + cal0 = var_dict[f"{var}_tar"] + QieMWm2_tr_turb + cal0E = (var_dict[f"{var}_tar_stds"] ** 2 + QieMWm2_tr_turb_stds**2) ** 0.5 + elif var == "Qi": + cal0 = var_dict[f"{var}_tar"] - QieMWm2_tr_turb + cal0E = (var_dict[f"{var}_tar_stds"] ** 2 + QieMWm2_tr_turb_stds**2) ** 0.5 + else: + cal0 = var_dict[f"{var}_tar"] + cal0E = var_dict[f"{var}_tar_stds"] + + of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) + ofE, calE = torch.cat((ofE, of0E), dim=-1), torch.cat((calE, cal0E), dim=-1) + + return of, cal, ofE, calE diff --git a/src/mitim_modules/portals/scripts/read_portals.py b/src/mitim_modules/portals/scripts/read_portals.py index 01b535ad..3d669f7a 100644 --- a/src/mitim_modules/portals/scripts/read_portals.py +++ b/src/mitim_modules/portals/scripts/read_portals.py @@ -1,64 +1,74 @@ import argparse import matplotlib.pyplot as plt -from mitim_tools.misc_tools import IOtools from mitim_modules.portals.utils import PORTALSanalysis +from mitim_tools.misc_tools import IOtools +from mitim_tools.opt_tools import STRATEGYtools +from mitim_tools.misc_tools.utils import remote_tools from IPython import embed -""" -This script is to plot only the convergence figure, not the rest of surrogates that takes long. -It also does it on a separate figure, so easy to manage (e.g. for saving as .eps) -""" def main(): parser = argparse.ArgumentParser() - parser.add_argument("folders", type=str, nargs="*") - parser.add_argument("--remote", "-r", type=str, required=False, default=None) + + # Standard options + parser.add_argument("folders", type=str, nargs="*", + help="Paths to the folders to read.") - parser.add_argument( - "--max", type=int, required=False, default=None - ) # Define max bounds of fluxes based on this one, like 0, -1 or None(best) + # PORTALS specific options + parser.add_argument("--max", type=int, required=False, default=None) # Define max bounds of fluxes based on this one, like 0, -1 or None(best) parser.add_argument("--indeces_extra", type=int, required=False, default=[], nargs="*") - parser.add_argument( - "--all", required=False, default=False, action="store_true" - ) # Plot all fluxes? - parser.add_argument( - "--file", type=str, required=False, default=None - ) # File to save .eps - parser.add_argument( - "--complete", "-c", required=False, default=False, action="store_true" - ) + parser.add_argument("--all", required=False, default=False, action="store_true") # Plot all fluxes? + parser.add_argument("--file", type=str, required=False, default=None) # File to save .eps + parser.add_argument("--complete", "-c", required=False, default=False, action="store_true") + + # Remote options + parser.add_argument("--remote",type=str, required=False, default=None, + help="Remote machine to retrieve the folders from. If not provided, it will read the local folders.") + parser.add_argument("--remote_folder_parent",type=str, required=False, default=None, + help="Parent folder in the remote machine where the folders are located. If not provided, it will use --remote_folders.") + parser.add_argument("--remote_folders",type=str, nargs="*", required=False, default=None, + help="List of folders in the remote machine to retrieve. If not provided, it will use the local folder structures.") + parser.add_argument("--remote_minimal", required=False, default=False, action="store_true", + help="If set, it will only retrieve the folder structure with a few key files.") + parser.add_argument('--fix', required=False, default=False, action='store_true', + help="If set, it will fix the pkl optimization portals in the remote folders.") args = parser.parse_args() - folders = [IOtools.expandPath(folder) for folder in args.folders] + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Retrieve from remote + # -------------------------------------------------------------------------------------------------------------------------------------------- - portals_total = [] - for folderWork in folders: - folderRemote_reduced = args.remote - file = args.file - indexToMaximize = args.max - indeces_extra = args.indeces_extra - plotAllFluxes = args.all - complete = args.complete + only_folder_structure_with_files = None + if args.remote_minimal: + only_folder_structure_with_files = ["Outputs/optimization_data.csv","Outputs/optimization_extra.pkl","Outputs/optimization_object.pkl","Outputs/optimization_results.out"] + + folders = remote_tools.retrieve_remote_folders(args.folders, args.remote, args.remote_folder_parent, args.remote_folders, only_folder_structure_with_files) - if not folderWork.exists(): - folderWork.mkdir(parents=True, exist_ok=True) + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Fix pkl optimization portals in remote + # -------------------------------------------------------------------------------------------------------------------------------------------- - folderRemote = ( - f"{folderRemote_reduced}/{IOtools.reducePathLevel(folderWork)[-1]}/" - if folderRemote_reduced is not None - else None - ) + if args.fix: + for folder in folders: + STRATEGYtools.clean_state(folder) - # Read PORTALS - portals = PORTALSanalysis.PORTALSanalyzer.from_folder( - folderWork, folderRemote=folderRemote - ) + # -------------------------------------------------------------------------------------------------------------------------------------------- + # PORTALS reading + # -------------------------------------------------------------------------------------------------------------------------------------------- + + portals_total = [PORTALSanalysis.PORTALSanalyzer.from_folder(folderWork) for folderWork in folders] - portals_total.append(portals) + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Actual PORTALS plotting + # -------------------------------------------------------------------------------------------------------------------------------------------- - # PLOTTING + file = args.file + indexToMaximize = args.max + indeces_extra = args.indeces_extra + plotAllFluxes = args.all + complete = args.complete if not complete: size = 8 @@ -87,9 +97,7 @@ def main(): portals_total[i].fn = fn # Plot metrics - if (not complete) or ( - isinstance(portals_total[i], PORTALSanalysis.PORTALSinitializer) - ): + if (not complete) or isinstance(portals_total[i], PORTALSanalysis.PORTALSinitializer): if isinstance(portals_total[i], PORTALSanalysis.PORTALSinitializer): fig = None elif requiresFN: @@ -116,5 +124,6 @@ def main(): plt.show() embed() + if __name__ == "__main__": main() diff --git a/src/mitim_modules/portals/scripts/runTGLFdrivesfromPORTALS.py b/src/mitim_modules/portals/scripts/runTGLFdrivesfromPORTALS.py deleted file mode 100644 index dffd6e9b..00000000 --- a/src/mitim_modules/portals/scripts/runTGLFdrivesfromPORTALS.py +++ /dev/null @@ -1,55 +0,0 @@ -import argparse -from mitim_tools.misc_tools import IOtools -from mitim_modules.portals.utils import PORTALSanalysis - -""" -This script is useful to understand why surrogates may fail at reproducing TGLF fluxes. -You can select the iteration to use as base case to see how TGLF behaves (if it has discontinuities) - e.g. - runTGLFdrivesfrommitim.py --folder run11/ --ev 5 --pos 0 2 --var 0.05 --wf 0.2 1.0 --num 5 - -Notes: - - wf runs scan with waveform too (slightly more expensive, as it will require 1 extra sim per run, but cheaper) -""" - -# --- Inputs - -parser = argparse.ArgumentParser() -parser.add_argument("--folder", required=True, type=str) -parser.add_argument("--ev", type=int, required=False, default=None) -parser.add_argument("--pos", type=int, required=False, default=[0], nargs="*") -parser.add_argument("--wf", type=float, required=False, default=None, nargs="*") -parser.add_argument("--var", type=float, required=False, default=0.01) # Variation in inputs (1% default) -parser.add_argument("--num",type=int, required=False, default=5) -parser.add_argument("--cold_start", "-r", required=False, default=False, action="store_true") -parser.add_argument("--ion", type=int, required=False, default=2) - - -args = parser.parse_args() -folder = IOtools.expandPath(args.folder) -ev = args.ev -pos = args.pos -wf = args.wf -var = args.var -num = args.num -cold_start = args.cold_start -ion = args.ion - -# --- Workflow - -portals = PORTALSanalysis.PORTALSanalyzer.from_folder(folder) -tglf, TGLFsettings, extraOptions = portals.extractTGLF(positions=pos, evaluation=ev, modified_profiles=True, cold_start=cold_start) - -tglf.runScanTurbulenceDrives( - subFolderTGLF="turb", - resolutionPoints=num, - variation=var, - variablesDrives=["RLTS_1", "RLTS_2", "RLNS_1", "XNUE", "TAUS_2", "BETAE"], - TGLFsettings=TGLFsettings, - extraOptions=extraOptions, - cold_start=cold_start, - runWaveForms=wf, - positionIon=ion, -) - -tglf.plotScanTurbulenceDrives(label="turb") diff --git a/src/mitim_modules/portals/scripts/run_portals.py b/src/mitim_modules/portals/scripts/run_portals.py new file mode 100644 index 00000000..cf007c03 --- /dev/null +++ b/src/mitim_modules/portals/scripts/run_portals.py @@ -0,0 +1,35 @@ +from pathlib import Path +import argparse +from mitim_tools.opt_tools import STRATEGYtools +from mitim_modules.portals import PORTALSmain +from mitim_tools.misc_tools import IOtools + +def main(): + + parser = argparse.ArgumentParser() + + parser.add_argument("folder", type=str, help="Simulation folder") + parser.add_argument("--namelist", type=str, required=False, default=None) # namelist.portals.yaml file, otherwise what's in the current folder + parser.add_argument("--input", type=str, required=False, default=None) # input.gacode file, otherwise what's in the current folder + parser.add_argument('--cold', required=False, default=False, action='store_true') + + args = parser.parse_args() + + folderWork = Path(args.folder) + portals_namelist = args.namelist + inputgacode = args.input + cold_start = args.cold + + # Actual PORTALS run + + portals_namelist = Path(portals_namelist) if portals_namelist is not None else IOtools.expandPath('.') / "namelist.portals.yaml" + inputgacode = Path(inputgacode) if inputgacode is not None else IOtools.expandPath('.') / "input.gacode" + + portals_fun = PORTALSmain.portals(folderWork, portals_namelist=portals_namelist) + portals_fun.prep(inputgacode) + + mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, cold_start=cold_start) + mitim_bo.run() + +if __name__ == "__main__": + main() diff --git a/src/mitim_modules/portals/scripts/runTGLF.py b/src/mitim_modules/portals/scripts/run_tglf_todebug_portals.py similarity index 90% rename from src/mitim_modules/portals/scripts/runTGLF.py rename to src/mitim_modules/portals/scripts/run_tglf_todebug_portals.py index 1cf8ea20..f606aad7 100644 --- a/src/mitim_modules/portals/scripts/runTGLF.py +++ b/src/mitim_modules/portals/scripts/run_tglf_todebug_portals.py @@ -30,7 +30,7 @@ args = parser.parse_args() folder = IOtools.expandPath(args.folder) -ev = args.ev +ev = int(args.ev) params = args.params pos = args.pos wf = args.wf @@ -43,7 +43,7 @@ # --- Workflow portals = PORTALSanalysis.PORTALSanalyzer.from_folder(folder) -tglf, TGLFsettings, extraOptions = portals.extractTGLF(positions=pos, evaluation=ev, modified_profiles=True, cold_start=cold_start) +tglf, code_settings, extraOptions = portals.extractTGLF(positions=pos, evaluation=ev, modified_profiles=True, cold_start=cold_start) if not drives: varUpDown = np.linspace(1.0 - var, 1.0 + var, num) @@ -51,10 +51,10 @@ labels = [] for param in params: tglf.runScan( - subFolderTGLF="scan", + subfolder="scan", variable=param, varUpDown=varUpDown, - TGLFsettings=TGLFsettings, + code_settings=code_settings, extraOptions=extraOptions, cold_start=cold_start, runWaveForms=wf, @@ -69,11 +69,11 @@ else: tglf.runScanTurbulenceDrives( - subFolderTGLF="turb", + subfolder="turb", resolutionPoints=5, variation=var, variablesDrives=["RLTS_1", "RLTS_2", "RLNS_1", "XNUE", "TAUS_2", "BETAE"], - TGLFsettings=TGLFsettings, + code_settings=code_settings, extraOptions=extraOptions, cold_start=cold_start, runWaveForms=wf, diff --git a/src/mitim_modules/portals/utils/PORTALSanalysis.py b/src/mitim_modules/portals/utils/PORTALSanalysis.py index 8e33cdd8..ecf85bd5 100644 --- a/src/mitim_modules/portals/utils/PORTALSanalysis.py +++ b/src/mitim_modules/portals/utils/PORTALSanalysis.py @@ -1,13 +1,14 @@ import copy import torch +import json import numpy as np import pandas as pd import matplotlib.pyplot as plt from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.misc_tools import IOtools, PLASMAtools, GRAPHICStools from mitim_tools.gacode_tools import TGLFtools, TGYROtools, PROFILEStools -from mitim_tools.gacode_tools.utils import PORTALSinteraction from mitim_modules.portals.utils import PORTALSplot +from mitim_modules.portals import PORTALStools from mitim_modules.powertorch import STATEtools from mitim_modules.powertorch.utils import POWERplot from mitim_tools.misc_tools.LOGtools import printMsg as print @@ -150,18 +151,15 @@ def prep_metrics(self, ilast=None): self.rhos = self.mitim_runs[0]['powerstate'].plasma['rho'][0,1:].cpu().numpy() self.roa = self.mitim_runs[0]['powerstate'].plasma['roa'][0,1:].cpu().numpy() - self.PORTALSparameters = self.opt_fun.mitim_model.optimization_object.PORTALSparameters - self.MODELparameters = self.opt_fun.mitim_model.optimization_object.MODELparameters + self.portals_parameters = self.opt_fun.mitim_model.optimization_object.portals_parameters # Useful flags - self.ProfilesPredicted = self.MODELparameters["ProfilesPredicted"] + self.predicted_channels = self.portals_parameters["solution"]["predicted_channels"] - self.runWithImpurity = self.powerstate.impurityPosition if "nZ" in self.ProfilesPredicted else None + self.runWithImpurity = self.powerstate.impurityPosition if "nZ" in self.predicted_channels else None - self.runWithRotation = "w0" in self.ProfilesPredicted - self.includeFast = self.PORTALSparameters["includeFastInQi"] - self.useConvectiveFluxes = self.PORTALSparameters["useConvectiveFluxes"] - self.forceZeroParticleFlux = self.PORTALSparameters["forceZeroParticleFlux"] + self.runWithRotation = "w0" in self.predicted_channels + self.force_zero_particle_flux = self.portals_parameters["target"]["options"]["force_zero_particle_flux"] # Profiles and tgyro results print("\t- Reading profiles and tgyros for each evaluation") @@ -171,7 +169,7 @@ def prep_metrics(self, ilast=None): self.powerstates.append(self.mitim_runs[i]["powerstate"]) # runWithImpurity_transport is stored after powerstate has run transport - self.runWithImpurity_transport = self.powerstates[0].impurityPosition_transport if "nZ" in self.ProfilesPredicted else None + self.runWithImpurity_transport = self.powerstates[0].impurityPosition_transport if "nZ" in self.predicted_channels else None if len(self.powerstates) <= self.ibest: print("\t- PORTALS was read after new residual was computed but before pickle was written!",typeMsg="w") @@ -180,20 +178,20 @@ def prep_metrics(self, ilast=None): self.profiles_next = None x_train_num = self.step.train_X.shape[0] - file = self.opt_fun.folder / "Execution" / f"Evaluation.{x_train_num}" / "model_complete" / "input.gacode_unmodified" + file = self.opt_fun.folder / "Execution" / f"Evaluation.{x_train_num}" / "transport_simulation_folder" / "input.gacode_unmodified" if file.exists(): print("\t\t- Reading next profile to evaluate (from folder)") - self.profiles_next = PROFILEStools.PROFILES_GACODE(file, calculateDerived=False) + self.profiles_next = PROFILEStools.gacode_state(file, derive_quantities=False) - file = self.opt_fun.folder / "Execution" / f"Evaluation.{x_train_num}" / "model_complete" / "input.gacode.new" + file = self.opt_fun.folder / "Execution" / f"Evaluation.{x_train_num}" / "transport_simulation_folder" / "input.gacode.new" if file.exists(): - self.profiles_next_new = PROFILEStools.PROFILES_GACODE( - file, calculateDerived=False + self.profiles_next_new = PROFILEStools.gacode_state( + file, derive_quantities=False ) self.profiles_next_new.printInfo(label="NEXT") else: self.profiles_next_new = self.profiles_next - self.profiles_next_new.deriveQuantities() + self.profiles_next_new.derive_quantities() else: print("\t\t- Could not read next profile to evaluate (from folder)") @@ -217,7 +215,7 @@ def prep_metrics(self, ilast=None): print(f"\t\t- Processing evaluation {i}/{len(self.powerstates)-1}") if 'Q' not in power.profiles.derived: - power.profiles.deriveQuantities() + power.profiles.derive_quantities() self.evaluations.append(i) self.FusionGain.append(power.profiles.derived["Q"]) @@ -228,9 +226,9 @@ def prep_metrics(self, ilast=None): # Residual definitions # ------------------------------------------------ - _, _, source, res = PORTALSinteraction.calculate_residuals( + _, _, source, res = PORTALStools.calculate_residuals( power, - self.PORTALSparameters, + self.portals_parameters, ) # Make sense of tensor "source" which are defining the entire predictive set in @@ -240,7 +238,7 @@ def prep_metrics(self, ilast=None): GZ_resR = np.zeros(self.rhos.shape[0]) Mt_resR = np.zeros(self.rhos.shape[0]) cont = 0 - for prof in self.MODELparameters["ProfilesPredicted"]: + for prof in self.portals_parameters["solution"]["predicted_channels"]: for ix in range(self.rhos.shape[0]): if prof == "te": Qe_resR[ix] = source[0, cont].abs() @@ -272,9 +270,9 @@ def prep_metrics(self, ilast=None): y2, y1_std, y2_std, - ) = PORTALSinteraction.calculate_residuals_distributions( + ) = PORTALStools.calculate_residuals_distributions( power, - self.PORTALSparameters, + self.portals_parameters, ) QR, chiR = PLASMAtools.RicciMetric( @@ -298,6 +296,7 @@ def prep_metrics(self, ilast=None): ) except: print("\t- Could not calculate Ricci metric", typeMsg="w") + embed() calculateRicci = None self.qR_Ricci, self.chiR_Ricci, self.points_Ricci = None, None, None @@ -330,14 +329,14 @@ def prep_metrics(self, ilast=None): self.resCheck = ( self.resTeM + self.resTiM + self.resneM + self.resnZM + self.resw0M - ) / len(self.MODELparameters["ProfilesPredicted"]) + ) / len(self.portals_parameters["solution"]["predicted_channels"]) # --------------------------------------------------------------------------------------------------------------------- # Jacobian # --------------------------------------------------------------------------------------------------------------------- DeltaQ1 = [] - for i in self.MODELparameters["ProfilesPredicted"]: + for i in self.portals_parameters["solution"]["predicted_channels"]: if i == "te": DeltaQ1.append(-self.resTe) if i == "ti": @@ -432,12 +431,14 @@ def extractProfiles(self, evaluation=None, modified_profiles=False): evaluation = self.ibest elif evaluation < 0: evaluation = self.ilast + else: + evaluation = int(evaluation) powerstate = self.mitim_runs[evaluation]["powerstate"] try: - p0 = powerstate.profiles if not modified_profiles else powerstate.model_results.profiles - except TypeError: + p0 = powerstate.profiles if not modified_profiles else powerstate.profiles_transport + except (TypeError, AttributeError): raise Exception(f"[MITIM] Could not extract profiles from evaluation {evaluation}, are you sure you have the right index?") p = copy.deepcopy(p0) @@ -463,7 +464,7 @@ def extractModels(self, step=-1): 1. Look at the dictionary keys to see which models are available: models.keys() 2. Select one model and print its information (e.g. variable labels and order): - m = models['QeTurb_1'] + m = models['Qe_tr_turb_1'] m.printInfo() 3. Trained points are stored as m.x, m.y, m.yvar, and you can make predictions with: x_test = m.x @@ -497,7 +498,7 @@ def extractPORTALS(self, evaluation=None, folder=None, modified_profiles=False): # Start from the profiles of that step fileGACODE = folder / "input.gacode_transferred" p = self.extractProfiles(evaluation=evaluation, modified_profiles=modified_profiles) - p.writeCurrentStatus(file=fileGACODE) + p.write_state(file=fileGACODE) # New class from mitim_modules.portals.PORTALSmain import portals @@ -505,8 +506,7 @@ def extractPORTALS(self, evaluation=None, folder=None, modified_profiles=False): portals_fun = portals(folder) # Transfer settings - portals_fun.PORTALSparameters = portals_fun_original.PORTALSparameters - portals_fun.MODELparameters = portals_fun_original.MODELparameters + portals_fun.portals_parameters = portals_fun_original.portals_parameters # PRINTING print( @@ -514,7 +514,7 @@ def extractPORTALS(self, evaluation=None, folder=None, modified_profiles=False): **************************************************************************************************** > MITIM has extracted PORTALS class to run in {IOtools.clipstr(folder)}, to proceed: 1. Modify any parameter as required - portals_fun.PORTALSparameters, portals_fun.MODELparameters, portals_fun.optimization_options + portals_fun.portals_parameters 2. Take the class portals_fun (arg #0) and prepare it with fileGACODE (arg #1) and folder (arg #2) with: portals_fun.prep(fileGACODE,folder) 3. Run PORTALS with: @@ -547,15 +547,15 @@ def extractTGYRO(self, folder=None, cold_start=False, evaluation=0, modified_pro folder, profilesclass_custom=profiles, cold_start=cold_start, forceIfcold_start=True ) - TGLFsettings = self.MODELparameters["transport_model"]["TGLFsettings"] - extraOptionsTGLF = self.MODELparameters["transport_model"]["extraOptionsTGLF"] + code_settings = self.portals_parameters["transport"]["options"]["code_settings"] + extraOptionsTGLF = self.portals_parameters["transport"]["options"]["extraOptionsTGLF"] PredictionSet = [ - int("te" in self.MODELparameters["ProfilesPredicted"]), - int("ti" in self.MODELparameters["ProfilesPredicted"]), - int("ne" in self.MODELparameters["ProfilesPredicted"]), + int("te" in self.portals_parameters["solution"]["predicted_channels"]), + int("ti" in self.portals_parameters["solution"]["predicted_channels"]), + int("ne" in self.portals_parameters["solution"]["predicted_channels"]), ] - return tgyro, self.rhos, PredictionSet, TGLFsettings, extraOptionsTGLF + return tgyro, self.rhos, PredictionSet, code_settings, extraOptionsTGLF def extractTGLF(self, folder=None, positions=None, evaluation=None, cold_start=False, modified_profiles=False): if evaluation is None: @@ -566,12 +566,12 @@ def extractTGLF(self, folder=None, positions=None, evaluation=None, cold_start=F """ NOTE on radial location extraction: Two possible options for the rho locations to use: - 1. self.MODELparameters["RhoLocations"] -> the ones PORTALS sent to TGYRO + 1. self.portals_parameters["solution"]["predicted_rho"] -> the ones PORTALS sent to TGYRO 2. self.rhos (came from TGYRO's t.rho[0, 1:]) -> the ones written by the TGYRO run (clipped to 7 decimal places) Because we want here to run TGLF *exactly* as TGYRO did, we use the first option. #TODO: This should be fixed in the future, we should never send to TGYRO more than 7 decimal places of any variable """ - rhos_considered = self.MODELparameters["RhoLocations"] + rhos_considered = self.portals_parameters["solution"]["predicted_rho"] if positions is None: rhos = rhos_considered @@ -591,15 +591,16 @@ def extractTGLF(self, folder=None, positions=None, evaluation=None, cold_start=F inputgacode = folder / "input.gacode.start" p = self.extractProfiles(evaluation=evaluation,modified_profiles=modified_profiles) - p.writeCurrentStatus(file=inputgacode) + p.write_state(file=inputgacode) tglf = TGLFtools.TGLF(rhos=rhos) - _ = tglf.prep(folder, cold_start=cold_start, inputgacode=inputgacode) + + _ = tglf.prep(p,folder,cold_start = cold_start) - TGLFsettings = self.MODELparameters["transport_model"]["TGLFsettings"] - extraOptions = self.MODELparameters["transport_model"]["extraOptionsTGLF"] + code_settings = self.portals_parameters["transport"]["options"]["tglf"]["run"]["code_settings"] + extraOptions = self.portals_parameters["transport"]["options"]["tglf"]["run"]["extraOptions"] - return tglf, TGLFsettings, extraOptions + return tglf, code_settings, extraOptions # **************************************************************************** # UTILITIES for post-analysis @@ -616,7 +617,7 @@ def runTGLFfull( ): """ This runs TGLF for all evaluations, all radii. - This is convenient if I want to re=run TGLF with different settings, e.g. different TGLFsettings, + This is convenient if I want to re=run TGLF with different settings, e.g. different code_settings, that you can provide as keyword arguments. """ @@ -628,18 +629,18 @@ def runTGLFfull( ranges = [self.ibest] if onlyBest else range(self.ilast + 1) for ev in ranges: - tglf, TGLFsettings, extraOptions = self.extractTGLF( + tglf, code_settings, extraOptions = self.extractTGLF( folder=folder / f"Evaluation.{ev}", evaluation=ev, cold_start=cold_start ) kwargsTGLF_this = copy.deepcopy(kwargsTGLF) - if "TGLFsettings" not in kwargsTGLF_this: - kwargsTGLF_this["TGLFsettings"] = TGLFsettings + if "code_settings" not in kwargsTGLF_this: + kwargsTGLF_this["code_settings"] = code_settings if "extraOptions" not in kwargsTGLF_this: kwargsTGLF_this["extraOptions"] = extraOptions - tglf.run(subFolderTGLF=f"tglf_{label}", cold_start=cold_start, **kwargsTGLF_this) + tglf.run(subfolder=f"tglf_{label}", cold_start=cold_start, **kwargsTGLF_this) # Read all previously run cases into a single class if tglf_object is None: @@ -723,7 +724,7 @@ def __init__(self, gpdict): self._training_outputs = {} if isinstance(gpdict, dict): for key in gpdict: - if 'Tar' in key: + if '_tar' in key: self._targets[key] = gpdict[key] else: self._models[key] = gpdict[key] @@ -951,7 +952,7 @@ def __init__(self, folder): self.profiles = [] for i in range(100): try: - prof = PROFILEStools.PROFILES_GACODE( + prof = PROFILEStools.gacode_state( self.folder / "Outputs" / "portals_profiles" / f"input.gacode.{i}" ) except FileNotFoundError: @@ -961,12 +962,12 @@ def __init__(self, folder): for i in range(100): try: p = STATEtools.read_saved_state( - self.folder / "Initialization" / "initialization_simple_relax" / f"portals_sr_{IOtools.reducePathLevel(self.folder)[1]}_ev_{i}" / "powerstate.pkl" + self.folder / "Initialization" / "initialization_simple_relax" / f"portals_sr_ev_{i}" / "powerstate.pkl" ) except FileNotFoundError: break - p.profiles.deriveQuantities() + p.profiles.derive_quantities() self.powerstates.append(p) self.fn = None @@ -990,12 +991,12 @@ def plotMetrics(self, extra_lab="", **kwargs): figG = self.fn.add_figure(label=f"{extra_lab} - Sequence") # ----------------------------------------------------------------- - axs, axsM = STATEtools.add_axes_powerstate_plot(figMain, num_kp=np.max([3,len(self.powerstates[-1].ProfilesPredicted)])) + axs, axsM = STATEtools.add_axes_powerstate_plot(figMain, num_kp=np.max([3,len(self.powerstates[-1].predicted_channels)])) colors = GRAPHICStools.listColors() axsGrads_extra = [] cont = 0 - for i in range(np.max([3,len(self.powerstates[-1].ProfilesPredicted)])): + for i in range(np.max([3,len(self.powerstates[-1].predicted_channels)])): axsGrads_extra.append(axs[cont]) axsGrads_extra.append(axs[cont+1]) cont += 4 @@ -1009,11 +1010,11 @@ def plotMetrics(self, extra_lab="", **kwargs): self.powerstates[i].plot(axs=axs, c=colors[i], label=f"#{i}") # Add profiles too - self.powerstates[i].profiles.plotGradients( + self.powerstates[i].profiles.plot_gradients( axsGrads_extra, color=colors[i], - plotImpurity=self.powerstates[-1].impurityPosition if 'nZ' in self.powerstates[-1].ProfilesPredicted else None, - plotRotation='w0' in self.powerstates[0].ProfilesPredicted, + plotImpurity=self.powerstates[-1].impurityPosition if 'nZ' in self.powerstates[-1].predicted_channels else None, + plotRotation='w0' in self.powerstates[0].predicted_channels, ls='-', lw=0.5, lastRho=self.powerstates[0].plasma["rho"][-1, -1].item(), @@ -1024,11 +1025,11 @@ def plotMetrics(self, extra_lab="", **kwargs): # Add next profile if len(self.profiles) > len(self.powerstates): - self.profiles[-1].plotGradients( + self.profiles[-1].plot_gradients( axsGrads_extra, color=colors[i+1], - plotImpurity=self.powerstates[-1].impurityPosition_transport if 'nZ' in self.powerstates[-1].ProfilesPredicted else None, - plotRotation='w0' in self.powerstates[0].ProfilesPredicted, + plotImpurity=self.powerstates[-1].impurityPosition_transport if 'nZ' in self.powerstates[-1].predicted_channels else None, + plotRotation='w0' in self.powerstates[0].predicted_channels, ls='-', lw=1.0, lastRho=self.powerstates[0].plasma["rho"][-1, -1].item(), @@ -1052,11 +1053,11 @@ def plotMetrics(self, extra_lab="", **kwargs): for i in range(2): axsGrads.append(figG.add_subplot(grid[i, j])) for i, p in enumerate(self.powerstates): - p.profiles.plotGradients( + p.profiles.plot_gradients( axsGrads, color=colors[i], - plotImpurity=p.impurityPosition if 'nZ' in p.ProfilesPredicted else None, - plotRotation='w0' in p.ProfilesPredicted, + plotImpurity=p.impurityPosition if 'nZ' in p.predicted_channels else None, + plotRotation='w0' in p.predicted_channels, lastRho=self.powerstates[0].plasma["rho"][-1, -1].item(), label=f"profile #{i}", ) @@ -1064,14 +1065,112 @@ def plotMetrics(self, extra_lab="", **kwargs): if len(self.profiles) > len(self.powerstates): prof = self.profiles[-1] - prof.plotGradients( + prof.plot_gradients( axsGrads, color=colors[i+1], - plotImpurity=p.impurityPosition_transport if 'nZ' in p.ProfilesPredicted else None, - plotRotation='w0' in p.ProfilesPredicted, + plotImpurity=p.impurityPosition_transport if 'nZ' in p.predicted_channels else None, + plotRotation='w0' in p.predicted_channels, lastRho=p.plasma["rho"][-1, -1].item(), label="next", ) axs[0].legend(prop={"size": 8}) axsGrads[0].legend(prop={"size": 8}) + + +def surrogate_file_expansion( + portals_folder, + file_new, + variables = ['aLte', 'aLti', 'aLne', 'nuei', 'tite', 'beta_e'], + output_mapper ={ + 'Qe_tr_turb': ['QeMWm2_tr_turb', 'Qgb'], + 'Qi_tr_turb': ['QiMWm2_tr_turb', 'Qgb'], + 'Ge_tr_turb': ['Ge1E20m2_tr_turb', 'Ggb'], + 'Qe_tr_neoc': ['QeMWm2_tr_neoc', 'Qgb'], + 'Qi_tr_neoc': ['QiMWm2_tr_neoc', 'Qgb'], + 'Ge_tr_neoc': ['Ge1E20m2_tr_neoc', 'Ggb'], + } + ): + ''' + This function reads a PORTALS folder and extracts the inputs and outputs using variables and output_mapper. + It then writes a surrogate_data.csv file that can be used as extrapointsFile + + This is useful when you have a PORTALS simulation with [te, ti] and now you want to create a surrogate model with [te, ti, ne] + + ''' + + # ---------------------------------------------------------------------------- + # Grab powerstates and turb_files + # ---------------------------------------------------------------------------- + + portals = PORTALSanalyzer.from_folder(portals_folder) + mitim_runs = IOtools.unpickle_mitim(portals.opt_fun.mitim_model.optimization_object.optimization_extra) + + powerstates = [mitim_runs[i]['powerstate'] for i in range(0, portals.ilast+1)] + turb_files = [powerstates[0].transport_options['folder'] / 'Execution' / f'Evaluation.{i}' / 'transport_simulation_folder' / 'fluxes_turb.json' for i in range(0, portals.ilast+1)] + + turb_info = [] + for file_name in turb_files: + with open(file_name, 'r') as f: + turb_info.append(json.load(f)) + + # ---------------------------------------------------------------------------- + # Prepare dictionary with new inputs and outputs + # ---------------------------------------------------------------------------- + + df_new = [] + + for i in range(0, portals.ilast+1): + + df_helper = {} + for var in output_mapper: + df_helper[var] = { + 'y': ( mitim_runs[i]['powerstate'].plasma[output_mapper[var][0]][0,1:] / mitim_runs[i]['powerstate'].plasma[output_mapper[var][1]][0,1:] ).cpu().numpy(), + 'yvar': ( (mitim_runs[i]['powerstate'].plasma[output_mapper[var][0]+'_stds'][0,1:] / mitim_runs[i]['powerstate'].plasma[output_mapper[var][1]][0,1:])**2 ).cpu().numpy(), + 'x_names': variables, + } + for ix,x in enumerate(df_helper[var]['x_names']): + df_helper[var][f'x{ix}'] = mitim_runs[i]['powerstate'].plasma[x][0,1:].cpu().numpy() + + # Make it per radius + df_helper_new = {} + for ir in range(len(df_helper['Qe_tr_turb']['y'])): + for var in output_mapper: + new_name = var+f'_{ir+1}' + df_helper_new[new_name] = {} + df_helper_new[new_name]['y'] = df_helper[var]['y'][ir] + df_helper_new[new_name]['yvar'] = df_helper[var]['yvar'][ir] + df_helper_new[new_name]['x_names'] = df_helper[var]['x_names'] + for ix,x in enumerate(df_helper[var]['x_names']): + df_helper_new[new_name][f'x{ix}'] = df_helper[var][f'x{ix}'][ir] + + df_new.append(df_helper_new) + + # ---------------------------------------------------------------------------- + # Insert in new dataframe + # ---------------------------------------------------------------------------- + + # Flatten df_new into rows + rows = [] + for d in df_new: + for model, vals in d.items(): + row = { + "Model": model, + "y": vals["y"], + "yvar": vals["yvar"], + "x_names": vals["x_names"], + } + # Add all x0, x1, ... keys + for k, v in vals.items(): + if k.startswith("x"): + row[k] = v + rows.append(row) + + # Build dataframe + df_new_flat = pd.DataFrame(rows) + + # ---------------------------------------------------------------------------- + # Grab those that have not been updated (targets) + # ---------------------------------------------------------------------------- + + df_new_flat.to_csv(file_new, index=False) diff --git a/src/mitim_modules/portals/utils/PORTALScgyro.py b/src/mitim_modules/portals/utils/PORTALScgyro.py deleted file mode 100644 index d2752041..00000000 --- a/src/mitim_modules/portals/utils/PORTALScgyro.py +++ /dev/null @@ -1,839 +0,0 @@ -import shutil -import copy -import numpy as np -from IPython import embed -from mitim_tools.misc_tools import IOtools, PLASMAtools -from mitim_tools.gacode_tools import PROFILEStools, TGYROtools -from mitim_tools.misc_tools.LOGtools import printMsg as print - -""" -__________________ -To run standalone: - run ~/MITIM/mitim_opt/mitim/utils/PORTALScgyro.py ./run5/ ~/PRF/mitim_cgyro/sparc_results.txt 0,1,2,3,4 -or - run ~/MITIM/mitim_opt/mitim/utils/PORTALScgyro.py ./run5/ ~/PRF/mitim_cgyro/sparc_results.txt 0[Evaluation.X] 0[position_in_txt] -__________________ -The CGYRO file must contain GB units, and the gb unit is MW/m^2, 1E19m^2/s -The CGYRO file must use particle flux. Convective transformation occurs later -""" - - -def evaluateCGYRO(PORTALSparameters, folder, numPORTALS, FolderEvaluation, unmodified_profiles, radii, ProfilesPredicted): - print("\n ** CGYRO evaluation of fluxes has been requested before passing information to the STRATEGY module **",typeMsg="i",) - - if isinstance(numPORTALS, int): - numPORTALS = str(numPORTALS) - - # ------------------------------------------------------------------------------------------------ - # Harcoded - # ------------------------------------------------------------------------------------------------ - if PORTALSparameters['hardCodedCGYRO'] is not None: - """ - train_sep is the number of initial runs in it#0 results file. Now, it's usually 1 - start_num is the number of the first iteration, usually 0 - trick_harcoded_f is the name of the file until the iteration number. E.g. 'example_run/Outputs/cgyro_results/iter_rmp_75_' - - e.g.: - includeMtAndGz_hardcoded, train_sep,start_num,last_one,trick_hardcoded_f = True, 1, 0,100, 'example_run/Outputs/cgyro_results/d3d_5chan_it_' - - """ - - includeMtAndGz_hardcoded = PORTALSparameters["hardCodedCGYRO"]["includeMtAndGz_hardcoded"] - train_sep = PORTALSparameters["hardCodedCGYRO"]["train_sep"] - start_num = PORTALSparameters["hardCodedCGYRO"]["start_num"] - last_one = PORTALSparameters["hardCodedCGYRO"]["last_one"] - trick_hardcoded_f = PORTALSparameters["hardCodedCGYRO"]["trick_hardcoded_f"] - else: - includeMtAndGz_hardcoded = None - train_sep = None - start_num = None - last_one = None - trick_hardcoded_f = None - # ------------------------------------------------------------------------------------------------ - - minErrorPercent = PORTALSparameters["percentError_stable"] - Qi_criterion_stable = PORTALSparameters["Qi_criterion_stable"] - percentNeo = PORTALSparameters["percentError"][1] - useConvectiveFluxes = PORTALSparameters["useConvectiveFluxes"] - - try: - impurityPosition = PROFILEStools.impurity_location(PROFILEStools.PROFILES_GACODE(unmodified_profiles), PORTALSparameters["ImpurityOfInterest"]) - except ValueError: - if 'nZ' in ProfilesPredicted: - raise ValueError(f"Impurity {PORTALSparameters['ImpurityOfInterest']} not found in the profiles and needed for CGYRO evaluation") - else: - impurityPosition = 0 - print(f'\t- Impurity location not found. Using hardcoded value of {impurityPosition}') - - OriginalFimp = PORTALSparameters["fImp_orig"] - - cgyroing_file = ( - lambda file_cgyro, numPORTALS_this=0, includeMtAndGz=False: cgyroing( - FolderEvaluation, - unmodified_profiles, - numPORTALS, - minErrorPercent, - Qi_criterion_stable, - useConvectiveFluxes, - percentNeo, - radii, - OriginalFimp=OriginalFimp, - evaluationsInFile=f"{numPORTALS_this}", - impurityPosition=impurityPosition, - file=file_cgyro, - includeMtAndGz=includeMtAndGz, - ) - ) - print(f"\t- Suggested function call for mitim evaluation {numPORTALS} (lambda for cgyroing):",typeMsg="i") - cgyropath = IOtools.expandPath(folder, ensurePathValid=True) / 'Outputs' / 'cgyro_results' / f'cgyro_it_{numPORTALS}.txt' - print(f"\tcgyroing_file('{cgyropath}')") - - print('\t- Then insert "exit" and RETURN', typeMsg="i") - if (trick_hardcoded_f is None) or (int(numPORTALS) > last_one): - embed() - else: - # ------------------------------------------------------------------ - # Hard-coded stuff for quick modifications - # ------------------------------------------------------------------ - if int(numPORTALS) < train_sep: - cgyroing_file( - f"{trick_hardcoded_f}{start_num}.txt", - numPORTALS_this=numPORTALS, - includeMtAndGz=includeMtAndGz_hardcoded, - ) - else: - cgyroing_file( - f"{trick_hardcoded_f}{int(numPORTALS)-train_sep+1+start_num}.txt", - numPORTALS_this=0, - includeMtAndGz=includeMtAndGz_hardcoded, - ) - - -def cgyroing( - FolderEvaluation, - unmodified_profiles, - evaluations, - minErrorPercent, - Qi_criterion_stable, - useConvectiveFluxes, - percentNeo, - radii, - OriginalFimp=1.0, - file=None, - evaluationsInFile=0, - impurityPosition=3, - includeMtAndGz=False, -): - """ - Variables need to have dimensions of (evaluation,rho) - """ - - evaluations = np.array([int(i) for i in evaluations.split(",")]) - evaluationsInFile = np.array([int(i) for i in evaluationsInFile.split(",")]) - - ( - aLTe, - aLTi, - aLne, - Q_gb, - Qe, - Qi, - Ge, - GZ, - Mt, - Pexch, - QeE, - QiE, - GeE, - GZE, - MtE, - PexchE, - _, - _, - ) = readCGYROresults(file, radii, includeMtAndGz=includeMtAndGz) - - cont = 0 - for i in evaluations: - k = evaluationsInFile[cont] - cont += 1 - - print( - f"\t- Modifying {IOtools.clipstr(FolderEvaluation)} with position {k} in CGYRO results file {IOtools.clipstr(file)}" - ) - - # Get TGYRO - tgyro = TGYROtools.TGYROoutput( - FolderEvaluation, - profiles=PROFILEStools.PROFILES_GACODE(unmodified_profiles), - ) - - # Quick checker of correct file - wasThisTheCorrectRun(aLTe, aLTi, aLne, Q_gb, tgyro) - - modifyResults( - Qe[k, :], - Qi[k, :], - Ge[k, :], - GZ[k, :], - Mt[k, :], - Pexch[k, :], - QeE[k, :], - QiE[k, :], - GeE[k, :], - GZE[k, :], - MtE[k, :], - PexchE[k, :], - tgyro, - FolderEvaluation, - minErrorPercent=minErrorPercent, - useConvectiveFluxes=useConvectiveFluxes, - Qi_criterion_stable=Qi_criterion_stable, - percentNeo=percentNeo, - impurityPosition=impurityPosition, - OriginalFimp=OriginalFimp, - ) - - -def wasThisTheCorrectRun(aLTe, aLTi, aLne, Q_gb, tgyro, ErrorRaised=0.005): - print("\t- Checking that this was the correct run...") - - tgyro_new = copy.deepcopy(tgyro) - tgyro_new.aLti = tgyro_new.aLti[:, 0, :] - - variables = [ - [aLTe, tgyro_new.aLte, "aLTe"], - [aLTi, tgyro_new.aLti, "aLTi"], - [aLne, tgyro_new.aLne, "aLne"], - [Q_gb, tgyro_new.Q_GB, "Qgb"], - ] - - for var in variables: - [c, t, n] = var - - for pos in range(c.shape[0]): - for i in range(c.shape[1]): - error = np.max(abs((t[pos, i + 1] - c[pos, i]) / t[pos, i + 1])) - print( - f"\t\t* Error in {n}[{i}] was {error*100.0:.2f}% (TGYRO {t[pos,i+1]:.3f} vs. CGYRO {c[pos,i]:.3f})", - typeMsg="w" if error > ErrorRaised else "", - ) - - -def readlineNTH(line, full_file=False, unnormalize=True): - s = line.split() - - i = 2 - roa = float(s[i]) - i += 3 - aLne = float(s[i]) - i += 3 - aLTi = float(s[i]) - i += 3 - aLTe = float(s[i]) - i += 3 - - Qi = float(s[i]) - i += 3 - Qi_std = float(s[i]) - i += 3 - Qe = float(s[i]) - i += 3 - Qe_std = float(s[i]) - i += 3 - Ge = float(s[i]) - i += 3 - Ge_std = float(s[i]) - i += 3 - - if full_file: - GZ = float(s[i]) - i += 3 - GZ_std = float(s[i]) - i += 3 - - Mt = float(s[i]) - i += 3 - Mt_std = float(s[i]) - i += 3 - - Pexch = float(s[i]) - i += 3 - Pexch_std = float(s[i]) - i += 3 - - Q_gb = float(s[i]) - i += 3 - G_gb = float(s[i]) * 1e-1 - i += 3 # From 1E19 to 1E20 - - if full_file: - Mt_gb = float(s[i]) - i += 3 - Pexch_gb = float(s[i]) - i += 3 - - tstart = float(s[i]) - i += 3 - tend = float(s[i]) - i += 3 - - if unnormalize: - QiReal = Qi * Q_gb - QiReal_std = Qi_std * Q_gb - QeReal = Qe * Q_gb - QeReal_std = Qe_std * Q_gb - GeReal = Ge * G_gb - GeReal_std = Ge_std * G_gb - else: - QiReal = Qi - QiReal_std = Qi_std - QeReal = Qe - QeReal_std = Qe_std - GeReal = Ge - GeReal_std = Ge_std - - if full_file: - if unnormalize: - GZReal = GZ * G_gb - GZReal_std = GZ_std * G_gb - - MtReal = Mt * Mt_gb - MtReal_std = Mt_std * Mt_gb - - PexchReal = Pexch * Pexch_gb - PexchReal_std = Pexch_std * Pexch_gb - else: - GZReal = GZ - GZReal_std = GZ_std - - MtReal = Mt - MtReal_std = Mt_std - - PexchReal = Pexch - PexchReal_std = Pexch_std - - return ( - roa, - aLTe, - aLTi, - aLne, - Q_gb, - QeReal, - QiReal, - GeReal, - GZReal, - MtReal, - PexchReal, - QeReal_std, - QiReal_std, - GeReal_std, - GZReal_std, - MtReal_std, - PexchReal_std, - tstart, - tend, - ) - else: - return ( - roa, - aLTe, - aLTi, - aLne, - Q_gb, - QeReal, - QiReal, - GeReal, - 0.0, - 0.0, - 0.0, - QeReal_std, - QiReal_std, - GeReal_std, - 0.0, - 0.0, - 0.0, - tstart, - tend, - ) - - -def readCGYROresults(file, radii, includeMtAndGz=False, unnormalize=True): - """ - Arrays are in (batch,radii) - MW/m^2 and 1E20 - """ - - with open(file, "r") as f: - lines = f.readlines() - - rad = len(radii) - num = len(lines) // rad - - roa = np.zeros((num, rad)) - aLTe = np.zeros((num, rad)) - aLTi = np.zeros((num, rad)) - aLne = np.zeros((num, rad)) - Q_gb = np.zeros((num, rad)) - - Qe = np.zeros((num, rad)) - Qe_std = np.zeros((num, rad)) - Qi = np.zeros((num, rad)) - Qi_std = np.zeros((num, rad)) - Ge = np.zeros((num, rad)) - Ge_std = np.zeros((num, rad)) - - GZ = np.zeros((num, rad)) - GZ_std = np.zeros((num, rad)) - - Mt = np.zeros((num, rad)) - Mt_std = np.zeros((num, rad)) - - Pexch = np.zeros((num, rad)) - Pexch_std = np.zeros((num, rad)) - - tstart = np.zeros((num, rad)) - tend = np.zeros((num, rad)) - - p = {} - for r in range(len(radii)): - p[r] = 0 - for i in range(len(lines)): - - # -------------------------------------------------------- - # Line not empty - # -------------------------------------------------------- - if len(lines[i].split()) < 10: - continue - - # -------------------------------------------------------- - # Read line - # -------------------------------------------------------- - ( - roa_read, - aLTe_read, - aLTi_read, - aLne_read, - Q_gb_read, - Qe_read, - Qi_read, - Ge_read, - GZ_read, - Mt_read, - Pexch_read, - Qe_std_read, - Qi_std_read, - Ge_std_read, - GZ_std_read, - Mt_std_read, - Pexch_std_read, - tstart_read, - tend_read, - ) = readlineNTH(lines[i], full_file=includeMtAndGz, unnormalize=unnormalize) - - # -------------------------------------------------------- - # Radial location position - # -------------------------------------------------------- - threshold_radii = 1E-4 - r = np.where(np.abs(radii-roa_read) 0 - or INITparameters["removeFast"] - or INITparameters["quasineutrality"] - or INITparameters["sameDensityGradients"] - or INITparameters["recompute_ptot"] - ): - profiles.correct(options=INITparameters) + portals_fun.portals_parameters["solution"]["predicted_rho"] = rho + + # Good approach to ensure this consistency + profiles.correct(options={"recalculate_ptot": True}) - if portals_fun.PORTALSparameters["ImpurityOfInterest"] is not None: - position_of_impurity = PROFILEStools.impurity_location(profiles, portals_fun.PORTALSparameters["ImpurityOfInterest"]) + if portals_fun.portals_parameters["solution"]["trace_impurity"] is not None: + position_of_impurity = MITIMstate.impurity_location(profiles, portals_fun.portals_parameters["solution"]["trace_impurity"]) else: position_of_impurity = 1 - if portals_fun.PORTALSparameters["UseOriginalImpurityConcentrationAsWeight"] is not None and portals_fun.PORTALSparameters["ImpurityOfInterest"] is not None: + if portals_fun.portals_parameters["solution"]["fZ0_as_weight"] is not None and portals_fun.portals_parameters["solution"]["trace_impurity"] is not None: f0 = profiles.Species[position_of_impurity]["n0"] / profiles.profiles['ne(10^19/m^3)'][0] - portals_fun.PORTALSparameters["fImp_orig"] = f0/portals_fun.PORTALSparameters["UseOriginalImpurityConcentrationAsWeight"] - print(f'\t- Ion {portals_fun.PORTALSparameters["ImpurityOfInterest"]} has original central concentration of {f0:.2e}, using its inverse multiplied by {portals_fun.PORTALSparameters["UseOriginalImpurityConcentrationAsWeight"]} as scaling factor of GZ -> {portals_fun.PORTALSparameters["fImp_orig"]}',typeMsg="i") + portals_fun.portals_parameters["solution"]["fImp_orig"] = f0/portals_fun.portals_parameters["solution"]["fZ0_as_weight"] + print(f'\t- Ion {portals_fun.portals_parameters["solution"]["trace_impurity"]} has original central concentration of {f0:.2e}, using its inverse multiplied by {portals_fun.portals_parameters["solution"]["fZ0_as_weight"]} as scaling factor of GZ -> {portals_fun.portals_parameters["solution"]["fImp_orig"]:.2e}',typeMsg="i") else: - portals_fun.PORTALSparameters["fImp_orig"] = 1.0 + portals_fun.portals_parameters["solution"]["fImp_orig"] = 1.0 # Check if I will be able to calculate radiation speciesNotFound = [] for i in range(len(profiles.Species)): - data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics" / "radiation_chebyshev.csv") + data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics_models" / "radiation_chebyshev.csv") if not (data_df['Ion'].str.lower()==profiles.Species[i]["N"].lower()).any(): speciesNotFound.append(profiles.Species[i]["N"]) # Print warning or question to be careful! if len(speciesNotFound) > 0: - if portals_fun.MODELparameters["Physics_options"]["TypeTarget"] == 3: + if "qrad" in portals_fun.portals_parameters["target"]["options"]["targets_evolve"]: answerYN = print(f"\t- Species {speciesNotFound} not found in radiation database, radiation will be zero in PORTALS... is this ok for your predictions?",typeMsg="q" if checkForSpecies else "w") if checkForSpecies and (not answerYN): @@ -123,37 +117,7 @@ def initializeProblem( # Prepare and defaults - xCPs = torch.from_numpy(np.array(portals_fun.MODELparameters["RhoLocations"])).to(dfT) - - if ModelOptions is None: - ModelOptions = { - "cold_start": False, - "launchMODELviaSlurm": portals_fun.PORTALSparameters[ - "launchEvaluationsAsSlurmJobs" - ], - "MODELparameters": portals_fun.MODELparameters, - "includeFastInQi": portals_fun.PORTALSparameters["includeFastInQi"], - "TurbulentExchange": portals_fun.PORTALSparameters["surrogateForTurbExch"], - "profiles_postprocessing_fun": portals_fun.PORTALSparameters[ - "profiles_postprocessing_fun" - ], - "impurityPosition": position_of_impurity, - "useConvectiveFluxes": portals_fun.PORTALSparameters["useConvectiveFluxes"], - "UseFineGridTargets": portals_fun.PORTALSparameters["fineTargetsResolution"], - "OriginalFimp": portals_fun.PORTALSparameters["fImp_orig"], - "forceZeroParticleFlux": portals_fun.PORTALSparameters[ - "forceZeroParticleFlux" - ], - "percentError": portals_fun.PORTALSparameters["percentError"], - "use_tglf_scan_trick": portals_fun.PORTALSparameters["use_tglf_scan_trick"], - } - - if "extra_params" not in ModelOptions: - ModelOptions["extra_params"] = { - "PORTALSparameters": portals_fun.PORTALSparameters, - "folder": portals_fun.folder, - } - + xCPs = torch.from_numpy(np.array(portals_fun.portals_parameters["solution"]["predicted_rho"])).to(dfT) """ *************************************************************************************************** @@ -161,51 +125,48 @@ def initializeProblem( *************************************************************************************************** """ + transport_parameters = portals_fun.portals_parameters["transport"] + + # Add folder and cold_start to the simulation options + transport_options = transport_parameters | {"folder": portals_fun.folder, "cold_start": False} + target_options = portals_fun.portals_parameters["target"] + portals_fun.powerstate = STATEtools.powerstate( profiles, - EvolutionOptions={ - "ProfilePredicted": portals_fun.MODELparameters["ProfilesPredicted"], + evolution_options={ + "ProfilePredicted": portals_fun.portals_parameters["solution"]["predicted_channels"], "rhoPredicted": xCPs, - "useConvectiveFluxes": portals_fun.PORTALSparameters["useConvectiveFluxes"], "impurityPosition": position_of_impurity, - "fineTargetsResolution": portals_fun.PORTALSparameters["fineTargetsResolution"], - }, - TransportOptions={ - "transport_evaluator": portals_fun.PORTALSparameters["transport_evaluator"], - "ModelOptions": ModelOptions, + "fImp_orig": portals_fun.portals_parameters["solution"]["fImp_orig"] }, - TargetOptions={ - "targets_evaluator": portals_fun.PORTALSparameters["targets_evaluator"], - "ModelOptions": { - "TypeTarget": portals_fun.MODELparameters["Physics_options"]["TypeTarget"], - "TargetCalc": portals_fun.PORTALSparameters["TargetCalc"]}, - }, - tensor_opts = tensor_opts + transport_options=transport_options, + target_options=target_options, + tensor_options=tensor_options ) # After resolution and corrections, store. - profiles.writeCurrentStatus(file=FolderInitialization / "input.gacode_modified") + profiles.write_state(file=FolderInitialization / "input.gacode_modified") # *************************************************************************************************** # *************************************************************************************************** # Store parameterization in dictCPs_base (to define later the relative variations) and modify profiles class with parameterized profiles dictCPs_base = {} - for name in portals_fun.MODELparameters["ProfilesPredicted"]: + for name in portals_fun.portals_parameters["solution"]["predicted_channels"]: dictCPs_base[name] = portals_fun.powerstate.update_var(name, var=None)[0, :] # Maybe it was provided from earlier run if start_from_folder is not None: dictCPs_base = grabPrevious(start_from_folder, dictCPs_base) - for name in portals_fun.MODELparameters["ProfilesPredicted"]: + for name in portals_fun.portals_parameters["solution"]["predicted_channels"]: _ = portals_fun.powerstate.update_var( name, var=dictCPs_base[name].unsqueeze(0) ) # Write this updated profiles class (with parameterized profiles) - _ = portals_fun.powerstate.to_gacode( + _ = portals_fun.powerstate.from_powerstate( write_input_gacode=FolderInitialization / "input.gacode", - postprocess_input_gacode=portals_fun.MODELparameters["applyCorrections"], + postprocess_input_gacode=portals_fun.portals_parameters["transport"]["applyCorrections"], ) # Original complete targets @@ -219,24 +180,18 @@ def initializeProblem( if define_ranges_from_profiles is not None: # If I want to define ranges from a different profile powerstate_extra = STATEtools.powerstate( define_ranges_from_profiles, - EvolutionOptions={ - "ProfilePredicted": portals_fun.MODELparameters["ProfilesPredicted"], + evolution_options={ + "ProfilePredicted": portals_fun.portals_parameters["solution"]["predicted_channels"], "rhoPredicted": xCPs, - "useConvectiveFluxes": portals_fun.PORTALSparameters["useConvectiveFluxes"], "impurityPosition": position_of_impurity, - "fineTargetsResolution": portals_fun.PORTALSparameters["fineTargetsResolution"], - }, - TargetOptions={ - "targets_evaluator": portals_fun.PORTALSparameters["targets_evaluator"], - "ModelOptions": { - "TypeTarget": portals_fun.MODELparameters["Physics_options"]["TypeTarget"], - "TargetCalc": portals_fun.PORTALSparameters["TargetCalc"]}, + "fImp_orig": portals_fun.portals_parameters["solution"]["fImp_orig"] }, - tensor_opts = tensor_opts + target_options=portals_fun.portals_parameters["target"], + tensor_options = tensor_options ) dictCPs_base_extra = {} - for name in portals_fun.MODELparameters["ProfilesPredicted"]: + for name in portals_fun.portals_parameters["solution"]["predicted_channels"]: dictCPs_base_extra[name] = powerstate_extra.update_var(name, var=None)[0, :] dictCPs_base = dictCPs_base_extra @@ -246,18 +201,18 @@ def initializeProblem( dictDVs = OrderedDict() for var in dictCPs_base: for conti, i in enumerate(np.arange(1, len(dictCPs_base[var]))): - if limitsAreRelative: + if limits_are_relative: y1 = dictCPs_base[var][i] - abs(dictCPs_base[var][i])*RelVar_y_min[var][conti] y2 = dictCPs_base[var][i] + abs(dictCPs_base[var][i])*RelVar_y_max[var][conti] else: y1 = torch.tensor(RelVar_y_min[var][conti]).to(dfT) y2 = torch.tensor(RelVar_y_max[var][conti]).to(dfT) - if hardGradientLimits is not None: - if hardGradientLimits[0] is not None: - y1 = torch.tensor(np.min([y1, hardGradientLimits[0]])) - if hardGradientLimits[1] is not None: - y2 = torch.tensor(np.max([y2, hardGradientLimits[1]])) + if yminymax_atleast is not None: + if yminymax_atleast[0] is not None: + y1 = torch.tensor(np.min([y1, yminymax_atleast[0]])) + if yminymax_atleast[1] is not None: + y2 = torch.tensor(np.max([y2, yminymax_atleast[1]])) # Check that makes sense if y2-y1 < thr: @@ -270,10 +225,10 @@ def initializeProblem( base_gradient = torch.rand(1)[0] * (y2 - y1) / 4 + (3 * y1 + y2) / 4 name = f"aL{var}_{i}" - if dvs_fixed is None: + if fixed_gradients is None: dictDVs[name] = [y1, base_gradient, y2] else: - dictDVs[name] = [dvs_fixed[name][0], base_gradient, dvs_fixed[name][1]] + dictDVs[name] = [fixed_gradients[name][0], base_gradient, fixed_gradients[name][1]] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Define output dictionaries @@ -292,21 +247,21 @@ def initializeProblem( elif ikey == "w0": var = "Mt" - for i in range(len(portals_fun.MODELparameters["RhoLocations"])): - ofs.append(f"{var}Turb_{i+1}") - ofs.append(f"{var}Neo_{i+1}") + for i in range(len(portals_fun.portals_parameters["solution"]["predicted_rho"])): + ofs.append(f"{var}_tr_turb_{i+1}") + ofs.append(f"{var}_tr_neoc_{i+1}") - ofs.append(f"{var}Tar_{i+1}") + ofs.append(f"{var}_tar_{i+1}") name_objectives.append(f"{var}Res_{i+1}") - if portals_fun.PORTALSparameters["surrogateForTurbExch"]: - for i in range(len(portals_fun.MODELparameters["RhoLocations"])): - ofs.append(f"PexchTurb_{i+1}") + if portals_fun.portals_parameters["solution"]["turbulent_exchange_as_surrogate"]: + for i in range(len(portals_fun.portals_parameters["solution"]["predicted_rho"])): + ofs.append(f"Qie_tr_turb_{i+1}") name_transformed_ofs = [] for of in ofs: - if ("GZ" in of) and (portals_fun.PORTALSparameters["applyImpurityGammaTrick"]): + if ("GZ" in of) and (portals_fun.portals_parameters["solution"]["impurity_trick"]): lab = f"{of} (GB MOD)" else: lab = f"{of} (GB)" @@ -335,16 +290,14 @@ def initializeProblem( # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Variables = {} - for ikey in portals_fun.PORTALSparameters["portals_transformation_variables"]: + for ikey in portals_fun.portals_parameters["solution"]["portals_transformation_variables"]: Variables[ikey] = prepportals_transformation_variables(portals_fun, ikey) portals_fun.surrogate_parameters = { - "transformationInputs": PORTALStools.produceNewInputs, - "transformationOutputs": PORTALStools.transformPORTALS, + "transformationInputs": PORTALStools.input_transform_portals, + "transformationOutputs": PORTALStools.output_transform_portals, "powerstate": portals_fun.powerstate, - "applyImpurityGammaTrick": portals_fun.PORTALSparameters["applyImpurityGammaTrick"], - "useFluxRatios": portals_fun.PORTALSparameters["useFluxRatios"], - "useDiffusivities": portals_fun.PORTALSparameters["useDiffusivities"], + "impurity_trick": portals_fun.portals_parameters["solution"]["impurity_trick"], "surrogate_transformation_variables_alltimes": Variables, "surrogate_transformation_variables_lasttime": copy.deepcopy(Variables[list(Variables.keys())[-1]]), "parameters_combined": {}, @@ -352,36 +305,31 @@ def initializeProblem( def prepportals_transformation_variables(portals_fun, ikey, doNotFitOnFixedValues=False): allOuts = portals_fun.optimization_options["problem_options"]["ofs"] - portals_transformation_variables = portals_fun.PORTALSparameters["portals_transformation_variables"][ikey] - portals_transformation_variables_trace = portals_fun.PORTALSparameters[ - "portals_transformation_variables_trace" - ][ikey] - additional_params_in_surrogate = portals_fun.PORTALSparameters[ - "additional_params_in_surrogate" - ] + portals_transformation_variables = portals_fun.portals_parameters["solution"]["portals_transformation_variables"][ikey] + portals_transformation_variables_trace = portals_fun.portals_parameters["solution"]["portals_transformation_variables_trace"][ikey] Variables = {} for output in allOuts: if IOtools.isfloat(output): continue - typ, num = output.split("_") - pos = int(num) + typ = '_'.join(output.split("_")[:-1]) + pos = int(output.split("_")[-1]) if typ in [ "Qe", - "QeTurb", - "QeNeo", + "Qe_tr_turb", + "Qe_tr_neoc", "Qi", - "QiTurb", - "QiNeo", + "Qi_tr_turb", + "Qi_tr_neoc", "Ge", - "GeTurb", - "GeNeo", - "PexchTurb", + "Ge_tr_turb", + "Ge_tr_neoc", + "Qie_tr_turb", "Mt", - "MtTurb", - "MtNeo", + "Mt_tr_turb", + "Mt_tr_neoc", ]: if doNotFitOnFixedValues: isAbsValFixed = pos == ( @@ -391,23 +339,20 @@ def prepportals_transformation_variables(portals_fun, ikey, doNotFitOnFixedValue isAbsValFixed = False Variations = { - "aLte": "te" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLti": "ti" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLne": "ne" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLw0": "w0" in portals_fun.MODELparameters["ProfilesPredicted"], - "te": ("te" in portals_fun.MODELparameters["ProfilesPredicted"]) + "aLte": "te" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLti": "ti" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLne": "ne" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLw0": "w0" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "te": ("te" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "ti": ("ti" in portals_fun.MODELparameters["ProfilesPredicted"]) + "ti": ("ti" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "ne": ("ne" in portals_fun.MODELparameters["ProfilesPredicted"]) + "ne": ("ne" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "w0": ("w0" in portals_fun.MODELparameters["ProfilesPredicted"]) + "w0": ("w0" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), } - for kkey in additional_params_in_surrogate: - Variations[kkey] = True - Variables[output] = [] for ikey in portals_transformation_variables: useThisOne = False @@ -419,35 +364,30 @@ def prepportals_transformation_variables(portals_fun, ikey, doNotFitOnFixedValue if useThisOne: Variables[output].append(ikey) - elif typ in ["GZ", "GZTurb", "GZNeo"]: + elif typ in ["GZ", "GZ_tr_turb", "GZ_tr_neoc"]: if doNotFitOnFixedValues: - isAbsValFixed = pos == ( - portals_fun.powerstate.plasma["rho"].shape[-1] - 1 - ) + isAbsValFixed = pos == portals_fun.powerstate.plasma["rho"].shape[-1] - 1 else: isAbsValFixed = False Variations = { - "aLte": "te" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLti": "ti" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLne": "ne" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLw0": "w0" in portals_fun.MODELparameters["ProfilesPredicted"], - "aLnZ": "nZ" in portals_fun.MODELparameters["ProfilesPredicted"], - "te": ("te" in portals_fun.MODELparameters["ProfilesPredicted"]) + "aLte": "te" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLti": "ti" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLne": "ne" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLw0": "w0" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "aLnZ": "nZ" in portals_fun.portals_parameters["solution"]["predicted_channels"], + "te": ("te" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "ti": ("ti" in portals_fun.MODELparameters["ProfilesPredicted"]) + "ti": ("ti" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "ne": ("ne" in portals_fun.MODELparameters["ProfilesPredicted"]) + "ne": ("ne" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "w0": ("w0" in portals_fun.MODELparameters["ProfilesPredicted"]) + "w0": ("w0" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), - "nZ": ("nZ" in portals_fun.MODELparameters["ProfilesPredicted"]) + "nZ": ("nZ" in portals_fun.portals_parameters["solution"]["predicted_channels"]) and (not isAbsValFixed), } - for kkey in additional_params_in_surrogate: - Variations[kkey] = True - Variables[output] = [] for ikey in portals_transformation_variables_trace: useThisOne = False @@ -459,15 +399,15 @@ def prepportals_transformation_variables(portals_fun, ikey, doNotFitOnFixedValue if useThisOne: Variables[output].append(ikey) - elif typ in ["QeTar"]: - Variables[output] = ["PeGB"] - elif typ in ["QiTar"]: - Variables[output] = ["PiGB"] - elif typ in ["GeTar"]: + elif typ in ["Qe_tar"]: + Variables[output] = ["QeGB"] + elif typ in ["Qi_tar"]: + Variables[output] = ["QiGB"] + elif typ in ["Ge_tar"]: Variables[output] = ["CeGB"] - elif typ in ["GZTar"]: + elif typ in ["GZ_tar"]: Variables[output] = ["CZGB"] - elif typ in ["MtTar"]: + elif typ in ["Mt_tar"]: Variables[output] = ["MtGB"] return Variables diff --git a/src/mitim_modules/portals/utils/PORTALSoptimization.py b/src/mitim_modules/portals/utils/PORTALSoptimization.py index dd9268e7..2b36f026 100644 --- a/src/mitim_modules/portals/utils/PORTALSoptimization.py +++ b/src/mitim_modules/portals/utils/PORTALSoptimization.py @@ -1,9 +1,10 @@ import copy +from mitim_modules.powertorch.physics_models import transport_analytic import torch import shutil import random from functools import partial -from mitim_modules.powertorch.physics import TRANSPORTtools +from mitim_modules.powertorch.utils import TRANSPORTtools from mitim_tools.misc_tools import IOtools from mitim_modules.powertorch import STATEtools from mitim_tools.opt_tools.utils import BOgraphics @@ -30,7 +31,7 @@ def initialization_simple_relax(self): MainFolder.mkdir(parents=True, exist_ok=True) a, b = IOtools.reducePathLevel(self.folderExecution, level=1) - namingConvention = f"portals_sr_{b}_ev" + namingConvention = "portals_sr_ev" if self.seed is not None and self.seed != 0: random.seed(self.seed) @@ -77,10 +78,10 @@ def initialization_simple_relax(self): newname = f"{namingConvention}_{i}" # Delte destination first - if (ff / "model_complete").exists(): - IOtools.shutil_rmtree(ff / "model_complete") + if (ff / "transport_simulation_folder").exists(): + IOtools.shutil_rmtree(ff / "transport_simulation_folder") - shutil.copytree(MainFolder / newname / "model_complete", ff / "model_complete") #### delete first + shutil.copytree(MainFolder / newname / "transport_simulation_folder", ff / "transport_simulation_folder") #### delete first return Xopt.cpu().numpy() @@ -91,7 +92,17 @@ def initialization_simple_relax(self): """ -def flux_match_surrogate(step,profiles, plot_results=False, fn = None, file_write_csv=None, algorithm = None, solver_options = None, keep_within_bounds = True): +def flux_match_surrogate( + step, + profiles, + plot_results=False, + fn = None, + file_write_csv=None, + algorithm = None, + solver_options = None, + keep_within_bounds = True, + target_options_use = None, + ): ''' Technique to reutilize flux surrogates to predict new conditions ---------------------------------------------------------------- @@ -125,27 +136,26 @@ def flux_match_surrogate(step,profiles, plot_results=False, fn = None, file_writ # Create powerstate with new profiles # ---------------------------------------------------- - TransportOptions = copy.deepcopy(step.surrogate_parameters["powerstate"].TransportOptions) + transport_options = copy.deepcopy(step.surrogate_parameters["powerstate"].transport_options) # Define transport calculation function as a surrogate model - TransportOptions['transport_evaluator'] = TRANSPORTtools.surrogate_model - TransportOptions['ModelOptions'] = {'flux_fun': partial(step.evaluators['residual_function'],outputComponents=True)} + transport_options['evaluator'] = transport_analytic.surrogate + transport_options["options"] = {'flux_fun': partial(step.evaluators['residual_function'],outputComponents=True)} # Create powerstate with the same options as the original portals but with the new profiles powerstate = STATEtools.powerstate( profiles, - EvolutionOptions={ - "ProfilePredicted": step.surrogate_parameters["powerstate"].ProfilesPredicted, + evolution_options={ + "ProfilePredicted": step.surrogate_parameters["powerstate"].predicted_channels, "rhoPredicted": step.surrogate_parameters["powerstate"].plasma["rho"][0,1:], - "useConvectiveFluxes": step.surrogate_parameters["powerstate"].useConvectiveFluxes, "impurityPosition": step.surrogate_parameters["powerstate"].impurityPosition, - "fineTargetsResolution": step.surrogate_parameters["powerstate"].fineTargetsResolution, }, - TransportOptions=TransportOptions, - TargetOptions=step.surrogate_parameters["powerstate"].TargetOptions, - tensor_opts = { + transport_options=transport_options, + target_options= step.surrogate_parameters["powerstate"].target_options if target_options_use is None else target_options_use, + tensor_options = { "dtype": step.surrogate_parameters["powerstate"].dfT.dtype, - "device": step.surrogate_parameters["powerstate"].dfT.device}, + "device": step.surrogate_parameters["powerstate"].dfT.device + }, ) # Pass powerstate as part of the surrogate_parameters such that transformations now occur with the new profiles diff --git a/src/mitim_modules/portals/utils/PORTALSplot.py b/src/mitim_modules/portals/utils/PORTALSplot.py index 22ec22f2..11013155 100644 --- a/src/mitim_modules/portals/utils/PORTALSplot.py +++ b/src/mitim_modules/portals/utils/PORTALSplot.py @@ -1,11 +1,12 @@ +from mitim_tools.plasmastate_tools.utils import state_plotting import torch import copy import numpy as np import matplotlib.pyplot as plt from mitim_tools.misc_tools import GRAPHICStools -from mitim_tools.gacode_tools import PROFILEStools from mitim_modules.portals import PORTALStools from mitim_modules.powertorch import STATEtools +from mitim_tools.plasmastate_tools.utils import state_plotting from mitim_modules.powertorch.utils import POWERplot from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed @@ -39,14 +40,14 @@ def PORTALSanalyzer_plotMetrics( plt.ion() fig = plt.figure(figsize=(15, 8)) - numprofs = len(self.ProfilesPredicted) + numprofs = len(self.predicted_channels) grid = plt.GridSpec(nrows=8, ncols=numprofs + 1, hspace=0.3, wspace=0.35) cont = 0 # Te - if "te" in self.ProfilesPredicted: + if "te" in self.predicted_channels: axTe = fig.add_subplot(grid[:4, cont]) axTe.set_title("Electron Temperature") axTe_g = fig.add_subplot(grid[4:6, cont]) @@ -55,7 +56,7 @@ def PORTALSanalyzer_plotMetrics( else: axTe = axTe_g = axTe_f = None - if "ti" in self.ProfilesPredicted: + if "ti" in self.predicted_channels: axTi = fig.add_subplot(grid[:4, cont]) axTi.set_title("Ion Temperature") axTi_g = fig.add_subplot(grid[4:6, cont]) @@ -65,7 +66,7 @@ def PORTALSanalyzer_plotMetrics( axTi = axTi_g = axTi_f = None - if "ne" in self.ProfilesPredicted: + if "ne" in self.predicted_channels: axne = fig.add_subplot(grid[:4, cont]) axne.set_title("Electron Density") axne_g = fig.add_subplot(grid[4:6, cont]) @@ -217,23 +218,23 @@ def PORTALSanalyzer_plotMetrics( if axTe_f is not None: axTe_f.plot( rho, - power.plasma['Pe_tr_turb'].cpu().numpy() + power.plasma['Pe_tr_neo'].cpu().numpy(), + power.plasma['QeMWm2_tr_turb'].cpu().numpy() + power.plasma['QeMWm2_tr_neoc'].cpu().numpy(), "-", c=col, lw=lwt, alpha=alph, ) - axTe_f.plot(rho, power.plasma['Pe'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) + axTe_f.plot(rho, power.plasma['QeMWm2'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) if axTi_f is not None: axTi_f.plot( rho, - power.plasma['Pi_tr_turb'].cpu().numpy() + power.plasma['Pi_tr_neo'].cpu().numpy(), + power.plasma['QiMWm2_tr_turb'].cpu().numpy() + power.plasma['QiMWm2_tr_neoc'].cpu().numpy(), "-", c=col, lw=lwt, alpha=alph, ) - axTi_f.plot(rho, power.plasma['Pi'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) + axTi_f.plot(rho, power.plasma['QiMWm2'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) if axne_f is not None: @@ -241,11 +242,11 @@ def PORTALSanalyzer_plotMetrics( axne_f.plot( rho, - power.plasma['Ce_raw_tr_turb'].cpu().numpy()+power.plasma['Ce_raw_tr_neo'].cpu().numpy(), + power.plasma['Ge1E20m2_tr_turb'].cpu().numpy()+power.plasma['Ge1E20m2_tr_neoc'].cpu().numpy(), "-", c=col, lw=lwt, alpha=alph) axne_f.plot( rho, - power.plasma['Ce_raw'].cpu().numpy() * (1 - int(self.forceZeroParticleFlux)), + power.plasma['Ge1E20m2'].cpu().numpy() * (1 - int(self.force_zero_particle_flux)), "--", c=col, lw=lwt, @@ -254,19 +255,19 @@ def PORTALSanalyzer_plotMetrics( if axnZ_f is not None: - axnZ_f.plot(rho, power.plasma['CZ_raw_tr_turb'].cpu().numpy()+power.plasma['CZ_raw_tr_neo'].cpu().numpy(), "-", c=col, lw=lwt, alpha=alph) - axnZ_f.plot(rho, power.plasma['CZ_raw'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) + axnZ_f.plot(rho, power.plasma['GZ1E20m2_tr_turb'].cpu().numpy()+power.plasma['GZ1E20m2_tr_neoc'].cpu().numpy(), "-", c=col, lw=lwt, alpha=alph) + axnZ_f.plot(rho, power.plasma['GZ1E20m2'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) if axw0_f is not None: axw0_f.plot( rho, - power.plasma['Mt_tr_turb'].cpu().numpy() + power.plasma['Mt_tr_neo'].cpu().numpy(), + power.plasma['MtJm2_tr_turb'].cpu().numpy() + power.plasma['MtJm2_tr_neoc'].cpu().numpy(), "-", c=col, lw=lwt, alpha=alph, ) - axw0_f.plot(rho, power.plasma['Mt'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) + axw0_f.plot(rho, power.plasma['MtJm2'].cpu().numpy(), "--", c=col, lw=lwt, alpha=alph) # --------------------------------------------------------------------------------------------------------- @@ -381,7 +382,7 @@ def PORTALSanalyzer_plotMetrics( col=col, lab=lab, msFlux=msFlux, - forceZeroParticleFlux=self.forceZeroParticleFlux, + force_zero_particle_flux=self.force_zero_particle_flux, maxStore=indexToMaximize == indexUse, decor=self.ibest == indexUse, plotFlows=plotFlows and (self.ibest == indexUse), @@ -477,7 +478,7 @@ def PORTALSanalyzer_plotMetrics( ax.set_xticklabels([]) ax = axC - if "te" in self.ProfilesPredicted: + if "te" in self.predicted_channels: v = self.resTeM ax.plot( self.evaluations, @@ -488,7 +489,7 @@ def PORTALSanalyzer_plotMetrics( markersize=2, label=self.labelsFluxes["te"], ) - if "ti" in self.ProfilesPredicted: + if "ti" in self.predicted_channels: v = self.resTiM ax.plot( self.evaluations, @@ -499,7 +500,7 @@ def PORTALSanalyzer_plotMetrics( markersize=2, label=self.labelsFluxes["ti"], ) - if "ne" in self.ProfilesPredicted: + if "ne" in self.predicted_channels: v = self.resneM ax.plot( self.evaluations, @@ -510,7 +511,7 @@ def PORTALSanalyzer_plotMetrics( markersize=2, label=self.labelsFluxes["ne"], ) - if "nZ" in self.ProfilesPredicted: + if "nZ" in self.predicted_channels: v = self.resnZM ax.plot( self.evaluations, @@ -521,7 +522,7 @@ def PORTALSanalyzer_plotMetrics( markersize=2, label=self.labelsFluxes["nZ"], ) - if "w0" in self.ProfilesPredicted: + if "w0" in self.predicted_channels: v = self.resw0M ax.plot( self.evaluations, @@ -545,7 +546,7 @@ def PORTALSanalyzer_plotMetrics( ): if (indexUse is None) or (indexUse >= len(self.powerstates)): continue - if "te" in self.ProfilesPredicted: + if "te" in self.predicted_channels: v = self.resTeM ax.plot( [self.evaluations[indexUse]], @@ -554,7 +555,7 @@ def PORTALSanalyzer_plotMetrics( color=col, markersize=4, ) - if "ti" in self.ProfilesPredicted: + if "ti" in self.predicted_channels: v = self.resTiM ax.plot( [self.evaluations[indexUse]], @@ -563,7 +564,7 @@ def PORTALSanalyzer_plotMetrics( color=col, markersize=4, ) - if "ne" in self.ProfilesPredicted: + if "ne" in self.predicted_channels: v = self.resneM ax.plot( [self.evaluations[indexUse]], @@ -572,7 +573,7 @@ def PORTALSanalyzer_plotMetrics( color=col, markersize=4, ) - if "nZ" in self.ProfilesPredicted: + if "nZ" in self.predicted_channels: v = self.resnZM ax.plot( [self.evaluations[indexUse]], @@ -581,7 +582,7 @@ def PORTALSanalyzer_plotMetrics( color=col, markersize=4, ) - if "w0" in self.ProfilesPredicted: + if "w0" in self.predicted_channels: v = self.resw0M ax.plot( [self.evaluations[indexUse]], @@ -1127,7 +1128,7 @@ def PORTALSanalyzer_plotExpected( # ---- Plot - numprofs = len(self.ProfilesPredicted) + numprofs = len(self.predicted_channels) if numprofs <= 4: wspace = 0.3 @@ -1137,7 +1138,7 @@ def PORTALSanalyzer_plotExpected( grid = plt.GridSpec(nrows=4, ncols=numprofs, hspace=0.2, wspace=wspace) cont = 0 - if "te" in self.ProfilesPredicted: + if "te" in self.predicted_channels: axTe = fig.add_subplot(grid[0, cont]) axTe.set_title("Electron Temperature") axTe_g = fig.add_subplot(grid[1, cont], sharex=axTe) @@ -1146,7 +1147,7 @@ def PORTALSanalyzer_plotExpected( cont += 1 else: axTe = axTe_g = axTe_f = axTe_r = None - if "ti" in self.ProfilesPredicted: + if "ti" in self.predicted_channels: axTi = fig.add_subplot(grid[0, cont], sharex=axTe) axTi.set_title("Ion Temperature") axTi_g = fig.add_subplot(grid[1, cont], sharex=axTe) @@ -1155,7 +1156,7 @@ def PORTALSanalyzer_plotExpected( cont += 1 else: axTi = axTi_g = axTi_f = axTi_r = None - if "ne" in self.ProfilesPredicted: + if "ne" in self.predicted_channels: axne = fig.add_subplot(grid[0, cont], sharex=axTe) axne.set_title("Electron Density") axne_g = fig.add_subplot(grid[1, cont], sharex=axTe) @@ -1206,7 +1207,7 @@ def PORTALSanalyzer_plotExpected( rho = p.profiles["rho(-)"] roa = p.derived["roa"] - rhoVals = self.MODELparameters["RhoLocations"] + rhoVals = self.portals_parameters["solution"]["predicted_rho"] roaVals = np.interp(rhoVals, rho, roa) lastX = roaVals[-1] @@ -1394,7 +1395,7 @@ def PORTALSanalyzer_plotExpected( rho = self.profiles_next_new.profiles["rho(-)"] - rhoVals = self.MODELparameters["RhoLocations"] + rhoVals = self.portals_parameters["solution"]["predicted_rho"] roaVals = np.interp(rhoVals, rho, roa) p0 = self.powerstates[plotPoints[0]].profiles @@ -1826,10 +1827,10 @@ def PORTALSanalyzer_plotSummary(self, fn=None, fn_color=None): # Plot PROFILES # ------------------------------------------------------- - figs = PROFILEStools.add_figures(fn,fnlab_pre = "PROFILES - ") + figs = state_plotting.add_figures(fn,fnlab_pre = "PROFILES - ") if indecesPlot[0] < len(self.powerstates): - _ = PROFILEStools.plotAll( + _ = state_plotting.plotAll( [ self.powerstates[indecesPlot[1]].profiles, self.powerstates[indecesPlot[0]].profiles, @@ -1851,7 +1852,7 @@ def PORTALSanalyzer_plotSummary(self, fn=None, fn_color=None): fig4 = fn.add_figure(label="PROFILES Comparison", tab_color=fn_color) grid = plt.GridSpec( 2, - np.max([3, len(self.ProfilesPredicted)]), + np.max([3, len(self.predicted_channels)]), hspace=0.3, wspace=0.3, ) @@ -1887,14 +1888,14 @@ def PORTALSanalyzer_plotSummary(self, fn=None, fn_color=None): [0.2, 1.0, 1.0, 1.0], ) ): - profiles.plotGradients( + profiles.plot_gradients( axs4, color=colors[i], label=label, - lastRho=self.MODELparameters["RhoLocations"][-1], + lastRho=self.portals_parameters["solution"]["predicted_rho"][-1], alpha=alpha, useRoa=True, - RhoLocationsPlot=self.MODELparameters["RhoLocations"], + predicted_rhoPlot=self.portals_parameters["solution"]["predicted_rho"], plotImpurity=self.runWithImpurity, plotRotation=self.runWithRotation, autoscale=i == 3, @@ -1907,7 +1908,7 @@ def PORTALSanalyzer_plotSummary(self, fn=None, fn_color=None): # ------------------------------------------------------- fig = fn.add_figure(label="Powerstate", tab_color=fn_color) - axs, axsM = STATEtools.add_axes_powerstate_plot(fig,num_kp=len(self.ProfilesPredicted)) + axs, axsM = STATEtools.add_axes_powerstate_plot(fig,num_kp=len(self.predicted_channels)) for indeces,c in zip(indecesPlot,["g","r","m"]): if indeces is not None: @@ -1925,7 +1926,7 @@ def PORTALSanalyzer_plotRanges(self, fig=None): fig = plt.figure() pps = np.max( - [3, len(self.ProfilesPredicted)] + [3, len(self.predicted_channels)] ) # Because plotGradients require at least Te, Ti, ne grid = plt.GridSpec(2, pps, hspace=0.3, wspace=0.3) axsR = [] @@ -1955,10 +1956,10 @@ def PORTALSanalyzer_plotRanges(self, fig=None): ms = 0 p = self.mitim_runs[self.i0]["powerstate"].profiles - p.plotGradients( + p.plot_gradients( axsR, color="b", - lastRho=self.MODELparameters["RhoLocations"][-1], + lastRho=self.portals_parameters["solution"]["predicted_rho"][-1], ms=ms, lw=1.0, label="Initial (#0)", @@ -1972,10 +1973,10 @@ def PORTALSanalyzer_plotRanges(self, fig=None): break p = self.mitim_runs[ikey]["powerstate"].profiles - p.plotGradients( + p.plot_gradients( axsR, color="r", - lastRho=self.MODELparameters["RhoLocations"][-1], + lastRho=self.portals_parameters["solution"]["predicted_rho"][-1], ms=ms, lw=0.3, ls="-o" if self.opt_fun.mitim_model.avoidPoints is not None else "-.o", @@ -1984,10 +1985,10 @@ def PORTALSanalyzer_plotRanges(self, fig=None): ) p = self.mitim_runs[self.ibest]["powerstate"].profiles - p.plotGradients( + p.plot_gradients( axsR, color="g", - lastRho=self.MODELparameters["RhoLocations"][-1], + lastRho=self.portals_parameters["solution"]["predicted_rho"][-1], ms=ms, lw=1.0, label=f"Best (#{self.opt_fun.res.best_absolute_index})", @@ -2010,10 +2011,10 @@ def PORTALSanalyzer_plotModelComparison( if (fig is None) and (axs is None): plt.ion() - fig = plt.figure(figsize=(15, 6 if len(self.ProfilesPredicted)+int(self.PORTALSparameters["surrogateForTurbExch"]) < 4 else 10)) + fig = plt.figure(figsize=(15, 6 if len(self.predicted_channels)+int(self.portals_parameters["solution"]["turbulent_exchange_as_surrogate"]) < 4 else 10)) if axs is None: - if len(self.ProfilesPredicted)+int(self.PORTALSparameters["surrogateForTurbExch"]) < 4: + if len(self.predicted_channels)+int(self.portals_parameters["solution"]["turbulent_exchange_as_surrogate"]) < 4: axs = fig.subplots(ncols=3) else: axs = fig.subplots(ncols=3, nrows=2) @@ -2026,95 +2027,99 @@ def PORTALSanalyzer_plotModelComparison( metrics = {} # te - quantityX = "QeGB_sim_turb" if UseTGLFfull_x is None else "[TGLF]Qe" - quantityX_stds = "QeGB_sim_turb_stds" if UseTGLFfull_x is None else None - quantityY = "QeGB_sim_turb" - quantityY_stds = "QeGB_sim_turb_stds" - metrics["Qe"] = plotModelComparison_quantity( - self, - axs[cont], - quantityX=quantityX, - quantityX_stds=quantityX_stds, - quantityY=quantityY, - quantityY_stds=quantityY_stds, - quantity_label="$Q_e^{GB}$", - title="Electron energy flux (GB)", - includeErrors=includeErrors, - includeMetric=includeMetric, - includeLeg=True, - ) + if 'te' in self.predicted_channels: + quantityX = "QeGB_sim_turb" if UseTGLFfull_x is None else "[TGLF]Qe" + quantityX_stds = "QeGB_sim_turb_stds" if UseTGLFfull_x is None else None + quantityY = "QeGB_sim_turb" + quantityY_stds = "QeGB_sim_turb_stds" + metrics["Qe"] = plotModelComparison_quantity( + self, + axs[cont], + quantityX=quantityX, + quantityX_stds=quantityX_stds, + quantityY=quantityY, + quantityY_stds=quantityY_stds, + quantity_label="$Q_e^{GB}$", + title="Electron energy flux (GB)", + includeErrors=includeErrors, + includeMetric=includeMetric, + includeLeg=True, + ) - axs[cont].set_xscale("log") - axs[cont].set_yscale("log") + axs[cont].set_xscale("log") + axs[cont].set_yscale("log") - cont += 1 + cont += 1 # ti - quantityX = "QiGBIons_sim_turb_thr" if UseTGLFfull_x is None else "[TGLF]Qi" - quantityX_stds = "QiGBIons_sim_turb_thr_stds" if UseTGLFfull_x is None else None - quantityY = "QiGBIons_sim_turb_thr" - quantityY_stds = "QiGBIons_sim_turb_thr_stds" - metrics["Qi"] = plotModelComparison_quantity( - self, - axs[cont], - quantityX=quantityX, - quantityX_stds=quantityX_stds, - quantityY=quantityY, - quantityY_stds=quantityY_stds, - quantity_label="$Q_i^{GB}$", - title="Ion energy flux (GB)", - includeErrors=includeErrors, - includeMetric=includeMetric, - includeLeg=includeLegAll, - ) + if 'ti' in self.predicted_channels: + quantityX = "QiGBIons_sim_turb_thr" if UseTGLFfull_x is None else "[TGLF]Qi" + quantityX_stds = "QiGBIons_sim_turb_thr_stds" if UseTGLFfull_x is None else None + quantityY = "QiGBIons_sim_turb_thr" + quantityY_stds = "QiGBIons_sim_turb_thr_stds" + metrics["Qi"] = plotModelComparison_quantity( + self, + axs[cont], + quantityX=quantityX, + quantityX_stds=quantityX_stds, + quantityY=quantityY, + quantityY_stds=quantityY_stds, + quantity_label="$Q_i^{GB}$", + title="Ion energy flux (GB)", + includeErrors=includeErrors, + includeMetric=includeMetric, + includeLeg=includeLegAll, + ) - axs[cont].set_xscale("log") - axs[cont].set_yscale("log") + axs[cont].set_xscale("log") + axs[cont].set_yscale("log") - cont += 1 + cont += 1 # ne - quantityX = "GeGB_sim_turb" if UseTGLFfull_x is None else "[TGLF]Ge" - quantityX_stds = "GeGB_sim_turb_stds" if UseTGLFfull_x is None else None - quantityY = "GeGB_sim_turb" - quantityY_stds = "GeGB_sim_turb_stds" - metrics["Ge"] = plotModelComparison_quantity( - self, - axs[cont], - quantityX=quantityX, - quantityX_stds=quantityX_stds, - quantityY=quantityY, - quantityY_stds=quantityY_stds, - quantity_label="$\\Gamma_e^{GB}$", - title="Electron particle flux (GB)", - includeErrors=includeErrors, - includeMetric=includeMetric, - includeLeg=includeLegAll, - ) + if 'ne' in self.predicted_channels: + quantityX = "GeGB_sim_turb" if UseTGLFfull_x is None else "[TGLF]Ge" + quantityX_stds = "GeGB_sim_turb_stds" if UseTGLFfull_x is None else None + quantityY = "GeGB_sim_turb" + quantityY_stds = "GeGB_sim_turb_stds" + metrics["Ge"] = plotModelComparison_quantity( + self, + axs[cont], + quantityX=quantityX, + quantityX_stds=quantityX_stds, + quantityY=quantityY, + quantityY_stds=quantityY_stds, + quantity_label="$\\Gamma_e^{GB}$", + title="Electron particle flux (GB)", + includeErrors=includeErrors, + includeMetric=includeMetric, + includeLeg=includeLegAll, + ) - if UseTGLFfull_x is None: - val_calc = self.mitim_runs[0]["powerstate"].model_results.__dict__[quantityX][0, 1:] - else: - val_calc = np.array( - [ - self.tglf_full.results["ev0"]["TGLFout"][j].__dict__[ - quantityX.replace("[TGLF]", "") + if UseTGLFfull_x is None: + val_calc = self.mitim_runs[0]["powerstate"].model_results.__dict__[quantityX][0, 1:] + else: + val_calc = np.array( + [ + self.tglf_full.results["ev0"]["output"][j].__dict__[ + quantityX.replace("[TGLF]", "") + ] + for j in range(len(self.rhos)) ] - for j in range(len(self.rhos)) - ] - ) + ) - try: - thre = 10 ** round(np.log10(np.abs(val_calc).min())) - axs[cont].set_xscale("symlog", linthresh=thre) - axs[cont].set_yscale("symlog", linthresh=thre) - # axs[2].tick_params(axis="both", which="major", labelsize=8) - except OverflowError: - pass + try: + thre = 10 ** round(np.log10(np.abs(val_calc).min())) + axs[cont].set_xscale("symlog", linthresh=thre) + axs[cont].set_yscale("symlog", linthresh=thre) + # axs[2].tick_params(axis="both", which="major", labelsize=8) + except OverflowError: + pass + + cont += 1 - cont += 1 - if "nZ" in self.ProfilesPredicted: + if "nZ" in self.predicted_channels: impurity_search = self.runWithImpurity_transport @@ -2146,7 +2151,7 @@ def PORTALSanalyzer_plotModelComparison( else: val_calc = np.array( [ - self.tglf_full.results["ev0"]["TGLFout"][j].__dict__[ + self.tglf_full.results["ev0"]["output"][j].__dict__[ quantityX.replace("[TGLF]", "") ] for j in range(len(self.rhos)) @@ -2160,7 +2165,7 @@ def PORTALSanalyzer_plotModelComparison( cont += 1 - if "w0" in self.ProfilesPredicted: + if "w0" in self.predicted_channels: if UseTGLFfull_x is not None: raise Exception("Momentum plot not implemented yet") # w0 @@ -2168,7 +2173,7 @@ def PORTALSanalyzer_plotModelComparison( quantityX_stds = "MtGB_sim_turb_stds" quantityY = "MtGB_sim_turb" quantityY_stds = "MtGB_sim_turb_stds" - metrics["Mt"] = plotModelComparison_quantity( + metrics["MtJm2"] = plotModelComparison_quantity( self, axs[cont], quantityX=quantityX, @@ -2196,7 +2201,7 @@ def PORTALSanalyzer_plotModelComparison( cont += 1 - if self.PORTALSparameters["surrogateForTurbExch"]: + if self.portals_parameters["solution"]["turbulent_exchange_as_surrogate"]: if UseTGLFfull_x is not None: raise Exception("Turbulent exchange plot not implemented yet") # Sexch @@ -2286,7 +2291,7 @@ def plotModelComparison_quantity( if "[TGLF]" in quantityX: X.append( [ - self.tglf_full.results[f"ev{i}"]["TGLFout"][j].__dict__[ + self.tglf_full.results[f"ev{i}"]["output"][j].__dict__[ quantityX.replace("[TGLF]", "") ] for j in range(len(self.rhos)) @@ -2414,8 +2419,8 @@ def varToReal(y, mitim_model): cont = 0 Qe, Qi, Ge, GZ, Mt = [], [], [], [], [] Qe_tar, Qi_tar, Ge_tar, GZ_tar, Mt_tar = [], [], [], [], [] - for prof in mitim_model.optimization_object.MODELparameters["ProfilesPredicted"]: - for rad in mitim_model.optimization_object.MODELparameters["RhoLocations"]: + for prof in mitim_model.optimization_object.portals_parameters["solution"]["predicted_channels"]: + for rad in mitim_model.optimization_object.portals_parameters["solution"]["predicted_rho"]: if prof == "te": Qe.append(of[0, cont]) Qe_tar.append(cal[0, cont]) @@ -2488,7 +2493,7 @@ def plotVars( .plasma["roa"][0, 1:] .cpu() .cpu().numpy() - ) # mitim_model.optimization_object.MODELparameters['RhoLocations'] + ) try: Qe, Qi, Ge, GZ, Mt, Qe_tar, Qi_tar, Ge_tar, GZ_tar, Mt_tar = varToReal( @@ -2830,7 +2835,7 @@ def plotFluxComparison( axne_f, axnZ_f, axw0_f, - forceZeroParticleFlux=False, + force_zero_particle_flux=False, runWithImpurity=3, labZ="Z", includeFirst=True, @@ -2882,7 +2887,7 @@ def plotFluxComparison( if axTe_f is not None: axTe_f.plot( r[0][ixF:], - power.plasma['Pe_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo'].cpu().numpy()[0][ixF:], + power.plasma['QeMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc'].cpu().numpy()[0][ixF:], "-s", c=col, lw=2, @@ -2891,10 +2896,10 @@ def plotFluxComparison( alpha=alpha, ) - sigma = power.plasma['Pe_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo_stds'].cpu().numpy()[0][ixF:] + sigma = power.plasma['QeMWm2_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc_stds'].cpu().numpy()[0][ixF:] - m_Qe, M_Qe = (power.plasma['Pe_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo'].cpu().numpy()[0][ixF:]) - stds * sigma, ( - power.plasma['Pe_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo'].cpu().numpy()[0][ixF:] + m_Qe, M_Qe = (power.plasma['QeMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc'].cpu().numpy()[0][ixF:]) - stds * sigma, ( + power.plasma['QeMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc'].cpu().numpy()[0][ixF:] ) + stds * sigma axTe_f.fill_between(r[0][ixF:], m_Qe, M_Qe, facecolor=col, alpha=alpha / 3) @@ -2905,7 +2910,7 @@ def plotFluxComparison( if axTi_f is not None: axTi_f.plot( r[0][ixF:], - power.plasma['Pi_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pi_tr_neo'].cpu().numpy()[0][ixF:], + power.plasma['QiMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_neoc'].cpu().numpy()[0][ixF:], "-s", markersize=msFlux, c=col, @@ -2915,13 +2920,13 @@ def plotFluxComparison( ) sigma = ( - power.plasma['Pi_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['Pi_tr_neo_stds'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_neoc_stds'].cpu().numpy()[0][ixF:] ) m_Qi, M_Qi = ( - power.plasma['Pi_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pi_tr_neo'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_neoc'].cpu().numpy()[0][ixF:] ) - stds * sigma, ( - power.plasma['Pi_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pi_tr_neo'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QiMWm2_tr_neoc'].cpu().numpy()[0][ixF:] ) + stds * sigma axTi_f.fill_between(r[0][ixF:], m_Qi, M_Qi, facecolor=col, alpha=alpha / 3) @@ -2931,7 +2936,7 @@ def plotFluxComparison( if axne_f is not None: - Ge = power.plasma['Ce_raw_tr_turb'].cpu().numpy() + power.plasma['Ce_raw_tr_neo'].cpu().numpy() + Ge = power.plasma['Ge1E20m2_tr_turb'].cpu().numpy() + power.plasma['Ge1E20m2_tr_neoc'].cpu().numpy() axne_f.plot( r[0][ixF:], @@ -2944,7 +2949,7 @@ def plotFluxComparison( alpha=alpha, ) - sigma = power.plasma['Ce_raw_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['Ce_raw_tr_neo_stds'].cpu().numpy()[0][ixF:] + sigma = power.plasma['Ge1E20m2_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['Ge1E20m2_tr_neoc_stds'].cpu().numpy()[0][ixF:] m_Ge, M_Ge = Ge[0][ixF:] - stds * sigma, Ge[0][ixF:] + stds * sigma @@ -2955,7 +2960,7 @@ def plotFluxComparison( # ----------------------------------------------------------------------------------------------- if axnZ_f is not None: - GZ = power.plasma['CZ_raw_tr_turb'].cpu().numpy() + power.plasma['CZ_raw_tr_neo'].cpu().numpy() + GZ = power.plasma['GZ1E20m2_tr_turb'].cpu().numpy() + power.plasma['GZ1E20m2_tr_neoc'].cpu().numpy() axnZ_f.plot( r[0][ixF:], @@ -2968,7 +2973,7 @@ def plotFluxComparison( alpha=alpha, ) - sigma = power.plasma['CZ_raw_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['CZ_raw_tr_neo_stds'].cpu().numpy()[0][ixF:] + sigma = power.plasma['GZ1E20m2_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['GZ1E20m2_tr_neoc_stds'].cpu().numpy()[0][ixF:] m_Gi, M_Gi = ( GZ[0][ixF:] - stds * sigma, @@ -2983,7 +2988,7 @@ def plotFluxComparison( if axw0_f is not None: axw0_f.plot( r[0][ixF:], - power.plasma['Mt_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Mt_tr_neo'].cpu().numpy()[0][ixF:], + power.plasma['MtJm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['MtJm2_tr_neoc'].cpu().numpy()[0][ixF:], "-s", markersize=msFlux, c=col, @@ -2992,10 +2997,10 @@ def plotFluxComparison( alpha=alpha, ) - sigma = power.plasma['Mt_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['Mt_tr_neo_stds'].cpu().numpy()[0][ixF:] + sigma = power.plasma['MtJm2_tr_turb_stds'].cpu().numpy()[0][ixF:] + power.plasma['MtJm2_tr_neoc_stds'].cpu().numpy()[0][ixF:] - m_Mt, M_Mt = (power.plasma['Mt_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Mt_tr_neo'].cpu().numpy()[0][ixF:]) - stds * sigma, ( - power.plasma['Mt_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Mt_tr_neo'].cpu().numpy()[0][ixF:] + m_Mt, M_Mt = (power.plasma['MtJm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['MtJm2_tr_neoc'].cpu().numpy()[0][ixF:]) - stds * sigma, ( + power.plasma['MtJm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['MtJm2_tr_neoc'].cpu().numpy()[0][ixF:] ) + stds * sigma axw0_f.fill_between(r[0][ixF:], m_Mt, M_Mt, facecolor=col, alpha=alpha / 3) @@ -3005,11 +3010,11 @@ def plotFluxComparison( # Retrieve targets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Qe_tar = power.plasma['Pe'].cpu().numpy()[0][ixF:] - Qi_tar = power.plasma['Pi'].cpu().numpy()[0][ixF:] - Ge_tar = power.plasma['Ce_raw'].cpu().numpy()[0][ixF:] * (1-int(forceZeroParticleFlux)) - GZ_tar = power.plasma['CZ_raw'].cpu().numpy()[0][ixF:] - Mt_tar = power.plasma['Mt'].cpu().numpy()[0][ixF:] + Qe_tar = power.plasma['QeMWm2'].cpu().numpy()[0][ixF:] + Qi_tar = power.plasma['QiMWm2'].cpu().numpy()[0][ixF:] + Ge_tar = power.plasma['Ge1E20m2'].cpu().numpy()[0][ixF:] * (1-int(force_zero_particle_flux)) + GZ_tar = power.plasma['GZ1E20m2'].cpu().numpy()[0][ixF:] + Mt_tar = power.plasma['MtJm2'].cpu().numpy()[0][ixF:] # Plot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -3107,7 +3112,7 @@ def plotFluxComparison( y = tBest.derived[var] * mult if var == "ge_10E20m2": - y *= 1 - int(forceZeroParticleFlux) + y *= 1 - int(force_zero_particle_flux) ax.plot( (tBest.profiles["rho(-)"] if not useRoa else tBest.derived["roa"]), @@ -3126,7 +3131,7 @@ def plotFluxComparison( if axTe_f is not None: (l1,) = axTe_f.plot( r[0][ixF:], - power.plasma['Pe_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo'].cpu().numpy()[0][ixF:], + power.plasma['QeMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc'].cpu().numpy()[0][ixF:], "-", c="k", lw=2, @@ -3134,12 +3139,12 @@ def plotFluxComparison( label="Transport", ) (l2,) = axTe_f.plot( - r[0][ixF:], power.plasma['Pe'].cpu().numpy()[0][ixF:], "--*", c="k", lw=2, markersize=0, label="Target" + r[0][ixF:], power.plasma['QeMWm2'].cpu().numpy()[0][ixF:], "--*", c="k", lw=2, markersize=0, label="Target" ) l3 = axTe_f.fill_between( r[0][ixF:], - (power.plasma['Pe_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo'].cpu().numpy()[0][ixF:]) - stds, - (power.plasma['Pe_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['Pe_tr_neo'].cpu().numpy()[0][ixF:]) + stds, + (power.plasma['QeMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc'].cpu().numpy()[0][ixF:]) - stds, + (power.plasma['QeMWm2_tr_turb'].cpu().numpy()[0][ixF:] + power.plasma['QeMWm2_tr_neoc'].cpu().numpy()[0][ixF:]) + stds, facecolor="k", alpha=0.3, ) @@ -3249,7 +3254,7 @@ def plotFluxComparison( def produceInfoRanges( self_complete, bounds, axsR, label="", color="k", lw=0.2, alpha=0.05 ): - rhos = np.append([0], self_complete.MODELparameters["RhoLocations"]) + rhos = np.append([0], self_complete.portals_parameters["solution"]["predicted_rho"]) aLTe, aLTi, aLne, aLnZ, aLw0 = ( np.zeros((len(rhos), 2)), np.zeros((len(rhos), 2)), @@ -3270,19 +3275,19 @@ def produceInfoRanges( if f"aLw0_{i+1}" in bounds: aLw0[i + 1, :] = bounds[f"aLw0_{i+1}"] - X = torch.zeros(((len(rhos) - 1) * len(self_complete.MODELparameters["ProfilesPredicted"]), 2)) + X = torch.zeros(((len(rhos) - 1) * len(self_complete.portals_parameters["solution"]["predicted_channels"]), 2)) l = len(rhos) - 1 X[0:l, :] = torch.from_numpy(aLTe[1:, :]) X[l : 2 * l, :] = torch.from_numpy(aLTi[1:, :]) cont = 0 - if "ne" in self_complete.MODELparameters["ProfilesPredicted"]: + if "ne" in self_complete.portals_parameters["solution"]["predicted_channels"]: X[(2 + cont) * l : (3 + cont) * l, :] = torch.from_numpy(aLne[1:, :]) cont += 1 - if "nZ" in self_complete.MODELparameters["ProfilesPredicted"]: + if "nZ" in self_complete.portals_parameters["solution"]["predicted_channels"]: X[(2 + cont) * l : (3 + cont) * l, :] = torch.from_numpy(aLnZ[1:, :]) cont += 1 - if "w0" in self_complete.MODELparameters["ProfilesPredicted"]: + if "w0" in self_complete.portals_parameters["solution"]["predicted_channels"]: X[(2 + cont) * l : (3 + cont) * l, :] = torch.from_numpy(aLw0[1:, :]) cont += 1 @@ -3333,7 +3338,7 @@ def produceInfoRanges( ) cont = 0 - if "ne" in self_complete.MODELparameters["ProfilesPredicted"]: + if "ne" in self_complete.portals_parameters["solution"]["predicted_channels"]: GRAPHICStools.fillGraph( axsR[3 + cont + 1], powerstate.plasma["rho"][0], @@ -3356,7 +3361,7 @@ def produceInfoRanges( ) cont += 2 - if "nZ" in self_complete.MODELparameters["ProfilesPredicted"]: + if "nZ" in self_complete.portals_parameters["solution"]["predicted_channels"]: GRAPHICStools.fillGraph( axsR[3 + cont + 1], powerstate.plasma["rho"][0], @@ -3379,7 +3384,7 @@ def produceInfoRanges( ) cont += 2 - if "w0" in self_complete.MODELparameters["ProfilesPredicted"]: + if "w0" in self_complete.portals_parameters["solution"]["predicted_channels"]: GRAPHICStools.fillGraph( axsR[3 + cont + 1], powerstate.plasma["rho"][0], diff --git a/src/mitim_modules/powertorch/STATEtools.py b/src/mitim_modules/powertorch/STATEtools.py index dda5a86d..02c43345 100644 --- a/src/mitim_modules/powertorch/STATEtools.py +++ b/src/mitim_modules/powertorch/STATEtools.py @@ -2,17 +2,21 @@ import torch import datetime import shutil -from pathlib import Path +from types import MethodType import matplotlib.pyplot as plt import dill as pickle from mitim_tools.misc_tools import PLASMAtools, IOtools from mitim_tools.gacode_tools import PROFILEStools +from mitim_tools.plasmastate_tools.utils import state_plotting from mitim_modules.powertorch.utils import TRANSFORMtools, POWERplot -from mitim_tools.opt_tools.optimizers import optim -from mitim_modules.powertorch.physics import TARGETStools, CALCtools, TRANSPORTtools +from mitim_tools.opt_tools.optimizers import multivariate_tools +from mitim_modules.powertorch.utils import TARGETStools, CALCtools, TRANSPORTtools +from mitim_modules.powertorch.physics_models import targets_analytic from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed +from mitim_tools.misc_tools.PLASMAtools import md_u + # ------------------------------------------------------------------ # POWERSTATE Class # ------------------------------------------------------------------ @@ -20,51 +24,73 @@ class powerstate: def __init__( self, - profiles, + profiles_object, increase_profile_resol=True, - EvolutionOptions={}, - TransportOptions={ - "transport_evaluator": None, - "ModelOptions": {} - }, - TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, - "ModelOptions": { - "TypeTarget": 3, - "TargetCalc": "powerstate" - }, - }, - tensor_opts = { - "dtype": torch.double, - "device": torch.device("cpu"), - } + evolution_options=None, + transport_options=None, + target_options=None, + tensor_options=None, ): ''' Inputs: - - profiles: PROFILES_GACODE object - - EvolutionOptions: + - profiles_object: Object for gacode_state or others + - evolution_options: - rhoPredicted: radial grid (MUST NOT CONTAIN ZERO, it will be added internally) - - ProfilesPredicted: list of profiles to predict - - useConvectiveFluxes: boolean = whether to use convective fluxes instead of particle fluxes for FM + - predicted_channels: list of profiles to predict - impurityPosition: int = position of the impurity in the ions set - - fineTargetsResolution: int = resolution of the fine targets - - TransportOptions: dictionary with transport_evaluator and ModelOptions - - TargetOptions: dictionary with targets_evaluator and ModelOptions + - transport_options: dictionary with transport_evaluator and transport_evaluator_options + - target_options: dictionary with target_evaluator and target_evaluator_options ''' + if evolution_options is None: + evolution_options = {} + + if transport_options is None: + transport_options = { + "evaluator": None, + "options": {} + } + + transport_options.setdefault("evaluator_instance_attributes", {}) + transport_options.setdefault("cold_start", False) + + # Target options defaults -------------------- + if target_options is None: + target_options = {} + if "options" not in target_options: + target_options["options"] = {} + + target_options.setdefault("evaluator", targets_analytic.analytical_model) + target_options["options"].setdefault("target_evaluator_method", "powerstate") + target_options["options"].setdefault("targets_evolve", ["qie", "qrad", "qfus"]) + target_options["options"].setdefault("force_zero_particle_flux", False) + target_options["options"].setdefault("percent_error", 1.0) + # --------------------------------------------- + + if tensor_options is None: + tensor_options = { + "dtype": torch.double, + "device": torch.device("cpu"), + } + + # ------------------------------------------------------------------------------------- + # Check inputs + # ------------------------------------------------------------------------------------- + print('>> Creating powerstate object...') - self.TransportOptions = TransportOptions - self.TargetOptions = TargetOptions + self.transport_options = transport_options + self.target_options = target_options # Default options - self.ProfilesPredicted = EvolutionOptions.get("ProfilePredicted", ["te", "ti", "ne"]) - self.useConvectiveFluxes = EvolutionOptions.get("useConvectiveFluxes", True) - self.impurityPosition = EvolutionOptions.get("impurityPosition", 1) + self.predicted_channels = evolution_options.get("ProfilePredicted", ["te", "ti", "ne"]) + self.impurityPosition = evolution_options.get("impurityPosition", 1) self.impurityPosition_transport = copy.deepcopy(self.impurityPosition) - self.fineTargetsResolution = EvolutionOptions.get("fineTargetsResolution", None) - self.scaleIonDensities = EvolutionOptions.get("scaleIonDensities", True) - rho_vec = EvolutionOptions.get("rhoPredicted", [0.2, 0.4, 0.6, 0.8]) + self.scaleIonDensities = evolution_options.get("scaleIonDensities", True) + self.fImp_orig = evolution_options.get("fImp_orig", 1.0) + rho_vec = evolution_options.get("rhoPredicted", [0.2, 0.4, 0.6, 0.8]) + + self.targets_resolution = target_options["options"].get("targets_resolution", None) if rho_vec[0] == 0: raise ValueError("[MITIM] The radial grid must not contain the initial zero") @@ -78,23 +104,23 @@ def _ensure_ne_before_nz(lst): # Swap "ne" and "nZ" positions lst[ne_index], lst[nz_index] = lst[nz_index], lst[ne_index] return lst - self.ProfilesPredicted = _ensure_ne_before_nz(self.ProfilesPredicted) + self.predicted_channels = _ensure_ne_before_nz(self.predicted_channels) # Default type and device tensor - self.dfT = torch.randn((2, 2), **tensor_opts) + self.dfT = torch.randn((2, 2), **tensor_options) ''' Potential profiles to evolve (aLX) and their corresponding flux matching ------------------------------------------------------------------------ The order in the P and P_tr (and therefore the source S) - tensors will be the same as in self.ProfilesPredicted + tensors will be the same as in self.predicted_channels ''' self.profile_map = { - "te": ("Pe", "Pe_tr"), - "ti": ("Pi", "Pi_tr"), + "te": ("QeMWm2", "QeMWm2_tr"), + "ti": ("QiMWm2", "QiMWm2_tr"), "ne": ("Ce", "Ce_tr"), "nZ": ("CZ", "CZ_tr"), - "w0": ("Mt", "Mt_tr") + "w0": ("MtJm2", "MtJm2_tr") } # ------------------------------------------------------------------------------------- @@ -113,23 +139,30 @@ def _ensure_ne_before_nz(lst): ), torch.Tensor().to(self.dfT) self.labelsFM = [] - for profile in self.ProfilesPredicted: + for profile in self.predicted_channels: self.labelsFM.append([f'aL{profile}', list(self.profile_map[profile])[0], list(self.profile_map[profile])[1]]) # ------------------------------------------------------------------------------------- - # input.gacode + # Object type (e.g. input.gacode) # ------------------------------------------------------------------------------------- - # Use a copy because I'm deriving, it may be expensive and I don't want to carry that out outside of this class - self.profiles = copy.deepcopy(profiles) - if "derived" not in self.profiles.__dict__: - self.profiles.deriveQuantities() + if isinstance(profiles_object, PROFILEStools.gacode_state): + self.to_powerstate = TRANSFORMtools.gacode_to_powerstate + self.from_powerstate = MethodType(TRANSFORMtools.to_gacode, self) + + # Use a copy because I'm deriving, it may be expensive and I don't want to carry that out outside of this class + self.profiles = copy.deepcopy(profiles_object) + if "derived" not in self.profiles.__dict__: + self.profiles.derive_quantities() + + else: + raise ValueError("[MITIM] The input profile object is not recognized, please use gacode_state") # ------------------------------------------------------------------------------------- # Fine targets (need to do it here so that it's only once per definition of powerstate) # ------------------------------------------------------------------------------------- - if self.fineTargetsResolution is None: + if self.targets_resolution is None: self.plasma_fine, self.positions_targets = None, None else: self._fine_grid() @@ -143,7 +176,7 @@ def _ensure_ne_before_nz(lst): TRANSFORMtools.improve_resolution_profiles(self.profiles, rho_vec) # Convert to powerstate - TRANSFORMtools.gacode_to_powerstate(self, self.profiles, self.plasma["rho"]) + self.to_powerstate(self) # Convert into a batch so that always the quantities are (batch,dimX) self.batch_size = 0 @@ -154,7 +187,7 @@ def _ensure_ne_before_nz(lst): def _high_res_rho(self): rho_new = torch.linspace( - self.plasma["rho"][0], self.plasma["rho"][-1], self.fineTargetsResolution + self.plasma["rho"][0], self.plasma["rho"][-1], self.targets_resolution ).to(self.plasma["rho"]) for i in self.plasma["rho"]: if not torch.isclose( @@ -189,51 +222,12 @@ def _fine_grid(self): ) # Recalculate with higher resolution - TRANSFORMtools.gacode_to_powerstate(self, self.profiles, rho_new) + TRANSFORMtools.gacode_to_powerstate(self, rho_vec = rho_new) self.plasma_fine = copy.deepcopy(self.plasma) # Revert plasma back self.plasma = plasma_copy - def to_gacode( - self, - write_input_gacode=None, - position_in_powerstate_batch=0, - postprocess_input_gacode={}, - insert_highres_powers=False, - rederive_profiles=True, - ): - ''' - Notes: - - insert_highres_powers: whether to insert high resolution powers (will calculate them with powerstate targets object, not other custom ones) - ''' - print(">> Inserting powerstate into input.gacode") - - profiles = TRANSFORMtools.powerstate_to_gacode( - self, - position_in_powerstate_batch=position_in_powerstate_batch, - postprocess_input_gacode=postprocess_input_gacode, - insert_highres_powers=insert_highres_powers, - rederive=rederive_profiles, - ) - - # Write input.gacode - if write_input_gacode is not None: - write_input_gacode = Path(write_input_gacode) - print(f"\t- Writing input.gacode file: {IOtools.clipstr(write_input_gacode)}") - write_input_gacode.parent.mkdir(parents=True, exist_ok=True) - profiles.writeCurrentStatus(file=write_input_gacode) - - # If corrections modify the ions set... it's better to re-read, otherwise powerstate will be confused - if rederive_profiles: - TRANSFORMtools.defineIons(self, profiles, self.plasma["rho"][position_in_powerstate_batch, :], self.dfT) - # Repeat, that's how it's done earlier - self._repeat_tensors(batch_size=self.plasma["rho"].shape[0], - specific_keys=["ni","ions_set_mi","ions_set_Zi","ions_set_Dion","ions_set_Tion","ions_set_c_rad"], - positionToUnrepeat=None) - - return profiles - # ------------------------------------------------------------------ # Storing and combining # ------------------------------------------------------------------ @@ -244,7 +238,7 @@ def save(self, file): pickle.dump(self, handle, protocol=4) def combine_states(self, states, includeTransport=True): - self.TransportOptions_set = [self.TransportOptions] + self.transport_options_set = [self.transport_options] self.profiles_stored_set = self.profiles_stored for state in states: @@ -253,17 +247,17 @@ def combine_states(self, states, includeTransport=True): self.plasma[key] ) - self.TransportOptions_set.append(state.TransportOptions) + self.transport_options_set.append(state.transport_options) self.profiles_stored_set += state.profiles_stored if includeTransport: for key in ["chi_e", "chi_i"]: - self.TransportOptions["ModelOptions"][key] = torch.cat( + self.transport_options["options"][key] = torch.cat( ( - self.TransportOptions["ModelOptions"][key], - state.TransportOptions["ModelOptions"][key], + self.transport_options["options"][key], + state.transport_options["options"][key], ) - ).to(self.TransportOptions["ModelOptions"][key]) + ).to(self.transport_options["options"][key]) def copy_state(self): @@ -311,8 +305,8 @@ def calculate( self.calculateProfileFunctions() # 3. Sources and sinks (populates components and Pe,Pi,...) - assumedPercentError = self.TransportOptions["ModelOptions"].get("percentError", [5, 1, 0.5])[-1] - self.calculateTargets(assumedPercentError=assumedPercentError) # Calculate targets based on powerstate functions (it may be overwritten in next step, if chosen) + relative_error_assumed = self.target_options["options"]["percent_error"] + self.calculateTargets(relative_error_assumed=relative_error_assumed) # Calculate targets based on powerstate functions (it may be overwritten in next step, if chosen) # 4. Turbulent and neoclassical transport (populates components and Pe_tr,Pi_tr,...) self.calculateTransport( @@ -335,7 +329,7 @@ def modify(self, X): self.Xcurrent = X numeach = self.plasma["rho"].shape[1] - 1 - for c, i in enumerate(self.ProfilesPredicted): + for c, i in enumerate(self.predicted_channels): if X is not None: aLx_before = self.plasma[f"aL{i}"][:, 1:].clone() @@ -350,7 +344,7 @@ def modify(self, X): self.update_var(i) - def flux_match(self, algorithm="root", solver_options=None, bounds=None): + def flux_match(self, algorithm="root", solver_options=None, bounds=None, debugYN=False): self.FluxMatch_plasma_orig = copy.deepcopy(self.plasma) self.bounds_current = bounds @@ -359,9 +353,9 @@ def flux_match(self, algorithm="root", solver_options=None, bounds=None): timeBeginning = datetime.datetime.now() if algorithm == "root": - solver_fun = optim.scipy_root + solver_fun = multivariate_tools.scipy_root elif algorithm == "simple_relax": - solver_fun = optim.simple_relaxation + solver_fun = multivariate_tools.simple_relaxation else: raise ValueError(f"[MITIM] Algorithm {algorithm} not recognized") @@ -380,21 +374,21 @@ def evaluator(X, y_history=None, x_history=None, metric_history=None): if folder_main is not None: folder = IOtools.expandPath(folder_main) / f"{namingConvention}_{cont}" - if issubclass(self.TransportOptions["transport_evaluator"], TRANSPORTtools.power_transport): - (folder / "model_complete").mkdir(parents=True, exist_ok=True) + if issubclass(self.transport_options["evaluator"], TRANSPORTtools.power_transport): + (folder / "transport_simulation_folder").mkdir(parents=True, exist_ok=True) # *************************************************************************************************************** # Calculate # *************************************************************************************************************** - folder_run = folder / "model_complete" if folder_main is not None else IOtools.expandPath('~/scratch/') + folder_run = folder / "transport_simulation_folder" if folder_main is not None else IOtools.expandPath('~/scratch/') QTransport, QTarget, _, _ = self.calculate(X, nameRun=nameRun, folder=folder_run, evaluation_number=cont) cont += 1 # Save state so that I can check initializations if folder_main is not None: - if issubclass(self.TransportOptions["transport_evaluator"], TRANSPORTtools.power_transport): + if issubclass(self.transport_options["evaluator"], TRANSPORTtools.power_transport): self.save(folder / "powerstate.pkl") shutil.copy2(folder_run / "input.gacode", folder) @@ -404,40 +398,45 @@ def evaluator(X, y_history=None, x_history=None, metric_history=None): # Residual is the difference between the target and the transport yRes = (QTarget - QTransport).abs() + # Metric is the mean of the absolute value of the residual - yMetric = -yRes.mean(axis=-1) - # Best in batch - best_candidate = yMetric.argmax().item() - # Only pass the best candidate - yRes = yRes[best_candidate, :].detach() - yMetric = yMetric[best_candidate].detach() - Xpass = X[best_candidate, :].detach() + yMetric = -yRes.mean(axis=-1).detach() # Store values - if y_history is not None: y_history.append(yRes) - if x_history is not None: x_history.append(Xpass) - if metric_history is not None: metric_history.append(yMetric) + if y_history is not None: + y_history.append(yRes.detach()) + if x_history is not None: + x_history.append(X.detach()) + if metric_history is not None: + metric_history.append(yMetric) return QTransport, QTarget, yMetric # Concatenate the input gradients x0 = torch.Tensor().to(self.plasma["aLte"]) - for c, i in enumerate(self.ProfilesPredicted): + for c, i in enumerate(self.predicted_channels): x0 = torch.cat((x0, self.plasma[f"aL{i}"][:, 1:].detach()), dim=1) # Make sure is properly batched - x0 = x0.view((self.plasma["rho"].shape[0],(self.plasma["rho"].shape[1] - 1) * len(self.ProfilesPredicted),)) + x0 = x0.view((self.plasma["rho"].shape[0],(self.plasma["rho"].shape[1] - 1) * len(self.predicted_channels),)) # Optimize - _,Yopt, Xopt, metric_history = solver_fun(evaluator,x0, bounds=self.bounds_current,solver_options=solver_options) + x_best,Yopt, Xopt, metric_history = solver_fun(evaluator,x0, bounds=self.bounds_current,solver_options=solver_options) # For simplicity, return the trajectory of only the best candidate - self.FluxMatch_Yopt = Yopt - self.FluxMatch_Xopt = Xopt + + idx_flat = metric_history.argmax() + index_best = divmod(idx_flat.item(), metric_history.shape[1]) + + self.FluxMatch_Yopt, self.FluxMatch_Xopt = Yopt[:,index_best[1],:], Xopt[:,index_best[1],:] print("**********************************************************************************************") print(f"\t- Flux matching of powerstate finished, and took {IOtools.getTimeDifference(timeBeginning)}\n") + if debugYN: + self.plot() + embed() + # ------------------------------------------------------------------ # Plotting tools # ------------------------------------------------------------------ @@ -457,17 +456,17 @@ def plot(self, axs=None, axsRes=None, axsMetrics=None, figs=None, fn=None,c="r", figMain = fn.add_figure(label="PowerState", tab_color='r') # Optimization figOpt = fn.add_figure(label="Optimization", tab_color='r') - grid = plt.GridSpec(2, 1+len(self.ProfilesPredicted), hspace=0.3, wspace=0.3) + grid = plt.GridSpec(2, 1+len(self.predicted_channels), hspace=0.3, wspace=0.3) axsRes = [figOpt.add_subplot(grid[:, 0])] - for i in range(len(self.ProfilesPredicted)): + for i in range(len(self.predicted_channels)): for j in range(2): axsRes.append(figOpt.add_subplot(grid[j, i+1])) # Profiles - figs = PROFILEStools.add_figures(fn, tab_color='b') + figs = state_plotting.add_figures(fn, tab_color='b') - axs, axsMetrics = add_axes_powerstate_plot(figMain, num_kp = len(self.ProfilesPredicted)) + axs, axsMetrics = add_axes_powerstate_plot(figMain, num_kp = len(self.predicted_channels)) else: axsNotGiven = False @@ -563,7 +562,7 @@ def _cpu_tensors(self): if hasattr(self, 'profiles'): self.profiles.toNumpyArrays() - def update_var(self, name, var=None, specific_deparametrizer=None): + def update_var(self, name, var=None, specific_profile_constructor=None): """ This inserts gradients and updates coarse profiles @@ -577,17 +576,13 @@ def update_var(self, name, var=None, specific_deparametrizer=None): # General function to update a variable # ------------------------------------------------------------------------------------- - deparametrizers_choice = ( - self.deparametrizers_coarse - if specific_deparametrizer is None - else specific_deparametrizer - ) + profile_constructor_choice = self.profile_constructors_coarse if specific_profile_constructor is None else specific_profile_constructor def _update_plasma_var(var_key, clamp_min=None, clamp_max=None): if var is not None: self.plasma[f"aL{var_key}"][: var.shape[0], :] = var[:, :] aLT_withZero = self.plasma[f"aL{var_key}"] - _, varN = deparametrizers_choice[var_key]( + _, varN = profile_constructor_choice[var_key]( self.plasma["roa"], aLT_withZero) self.plasma[var_key] = varN.clamp(min=clamp_min, max=clamp_max) if ( (clamp_min is not None) or (clamp_max is not None) ) else varN self.plasma[f"aL{var_key}"] = torch.cat( @@ -642,7 +637,7 @@ def _update_plasma_var(var_key, clamp_min=None, clamp_max=None): # Toolset for calculation # ------------------------------------------------------------------ - def calculateProfileFunctions(self, calculateRotationQuantities=True, mref=2.01355): + def calculateProfileFunctions(self, calculateRotationQuantities=True, mref=md_u): """ Update the normalizations of the current state Notes: @@ -694,19 +689,19 @@ def calculateProfileFunctions(self, calculateRotationQuantities=True, mref=2.013 self.plasma["w0_n"] = self.plasma["w0"] / self.plasma["c_s"] self.plasma["aLw0_n"] = (self.plasma["aLw0"] * self.plasma["w0"] / self.plasma["c_s"]) # aLw0 * w0 = -a*dw0/dr; then aLw0_n = -dw0/dr * a/c_s - def calculateTargets(self, assumedPercentError=1.0): + def calculateTargets(self, relative_error_assumed=1.0): """ Update the targets of the current state """ - # If no targets evaluator is given or the targets will come from TGYRO, assume them as zero - if (self.TargetOptions["targets_evaluator"] is None) or (self.TargetOptions["ModelOptions"]["TargetCalc"] == "tgyro"): + # If no targets evaluator is given or the targets will come from previous calculations (from transport), assume them as zero + if (self.target_options["evaluator"] is None) or (self.target_options["options"]["target_evaluator_method"] == "tgyro"): targets = TARGETStools.power_targets(self) else: - targets = self.TargetOptions["targets_evaluator"](self) + targets = self.target_options["evaluator"](self) # [Optional] Calculate local targets and integrals on a fine grid - if self.fineTargetsResolution is not None: + if self.targets_resolution is not None: targets.fine_grid() # Evaluate local quantities @@ -716,14 +711,14 @@ def calculateTargets(self, assumedPercentError=1.0): targets.flux_integrate() # Come back to original grid - if self.fineTargetsResolution is not None: + if self.targets_resolution is not None: targets.coarse_grid() # Merge targets, calculate errors and normalize targets.postprocessing( - assumedPercentError=assumedPercentError, - useConvectiveFluxes=self.useConvectiveFluxes, - forceZeroParticleFlux=self.TransportOptions["ModelOptions"].get("forceZeroParticleFlux", False)) + relative_error_assumed=relative_error_assumed, + force_zero_particle_flux=self.target_options["options"]["force_zero_particle_flux"] + ) def calculateTransport( self, nameRun="test", folder="~/scratch/", evaluation_number=0): @@ -733,10 +728,14 @@ def calculateTransport( folder = IOtools.expandPath(folder) # Select transport evaluator - if self.TransportOptions["transport_evaluator"] is None: + if self.transport_options["evaluator"] is None: transport = TRANSPORTtools.power_transport( self, name=nameRun, folder=folder, evaluation_number=evaluation_number ) else: - transport = self.TransportOptions["transport_evaluator"]( self, name=nameRun, folder=folder, evaluation_number=evaluation_number ) + transport = self.transport_options["evaluator"]( self, name=nameRun, folder=folder, evaluation_number=evaluation_number ) + + # The transport class may have instanciating attributes + for key in self.transport_options["evaluator_instance_attributes"]: + setattr(transport, key, self.transport_options["evaluator_instance_attributes"][key]) # Produce profile object (for certain transport evaluators, this is necessary) transport.produce_profiles() @@ -758,27 +757,29 @@ def _concatenate_flux(plasma, profile_key, flux_key): plasma["P"] = torch.cat((plasma["P"], plasma[profile_key][:, 1:]), dim=1).to(plasma["P"].device) plasma["P_tr"] = torch.cat((plasma["P_tr"], plasma[flux_key][:, 1:]), dim=1).to(plasma["P"].device) - self.plasma["P"], self.plasma["P_tr"] = torch.Tensor().to(self.plasma["Pe"]), torch.Tensor().to(self.plasma["Pe"]) + self.plasma["P"], self.plasma["P_tr"] = torch.Tensor().to(self.plasma["QeMWm2"]), torch.Tensor().to(self.plasma["QeMWm2"]) - for profile in self.ProfilesPredicted: + for profile in self.predicted_channels: _concatenate_flux(self.plasma, *self.profile_map[profile]) self.plasma["S"] = self.plasma["P"] - self.plasma["P_tr"] self.plasma["residual"] = self.plasma["S"].abs().mean(axis=1, keepdim=True) - def volume_integrate(self, var, force_dim=None): + def from_density_to_flux(self, var, force_dim=None): """ If var in MW/m^3, this gives as output the MW/m^2 profile """ + + surface_used = self.plasma["volp"] # IMPORTANT Note: This is the GACODE definition, acknowledging that volp=dV/dr is not equal to the surface area if force_dim is None: - return CALCtools.integrateQuadPoly( - self.plasma["rmin"], var * self.plasma["volp"] - ) / self.plasma["volp"] + return CALCtools.volume_integration( + var, self.plasma["rmin"], self.plasma["volp"] + ) / surface_used else: - return CALCtools.integrateQuadPoly( - self.plasma["rmin"][0,:].repeat(force_dim,1), var * self.plasma["volp"][0,:].repeat(force_dim,1), - ) / self.plasma["volp"][0,:].repeat(force_dim,1) + return CALCtools.volume_integration( + var, self.plasma["rmin"][0,:].repeat(force_dim,1), self.plasma["volp"][0,:].repeat(force_dim,1) + ) / surface_used[0,:].repeat(force_dim,1) def add_axes_powerstate_plot(figMain, num_kp=3): diff --git a/src/mitim_modules/powertorch/physics/GEOMETRYtools.py b/src/mitim_modules/powertorch/physics/GEOMETRYtools.py deleted file mode 100644 index 1b09ef46..00000000 --- a/src/mitim_modules/powertorch/physics/GEOMETRYtools.py +++ /dev/null @@ -1,401 +0,0 @@ -import numpy as np -from mitim_tools.misc_tools import MATHtools -from IPython import embed - - -def calculateGeometricFactors(profiles, n_theta=1001): - - # ---------------------------------------- - # Raw parameters from the file - # in expro_util.f90, it performs those divisions to pass to geo library - # ---------------------------------------- - - r = profiles.profiles["rmin(m)"] / profiles.profiles["rmin(m)"][-1] - R = profiles.profiles["rmaj(m)"] / profiles.profiles["rmin(m)"][-1] - kappa = profiles.profiles["kappa(-)"] - delta = profiles.profiles["delta(-)"] - zeta = profiles.profiles["zeta(-)"] - zmag = profiles.profiles["zmag(m)"] / profiles.profiles["rmin(m)"][-1] - q = profiles.profiles["q(-)"] - - shape_coeffs = profiles.shape_cos + profiles.shape_sin - - # ---------------------------------------- - # Derivatives as defined in expro_util.f90 - # ---------------------------------------- - - s_delta = r * MATHtools.deriv(r, delta) - s_kappa = r / kappa * MATHtools.deriv(r, kappa) - s_zeta = r * MATHtools.deriv(r, zeta) - dzmag = MATHtools.deriv(r, zmag) - dRmag = MATHtools.deriv(r, R) - - s_shape_coeffs = [] - for i in range(len(shape_coeffs)): - if shape_coeffs[i] is not None: - s_shape_coeffs.append(r * MATHtools.deriv(r, shape_coeffs[i])) - else: - s_shape_coeffs.append(None) - - # ---------------------------------------- - # Calculate the differencial volume at each radii - # from f2py/geo/geo.f90 in gacode source we have geo_volume_prime. - # ---------------------------------------- - - # Prepare cos_sins - cos_sin = [] - cos_sin_s = [] - for j in range(len(R)): - cos_sin0 = [] - cos_sin_s0 = [] - for k in range(len(shape_coeffs)): - if shape_coeffs[k] is not None: - cos_sin0.append(shape_coeffs[k][j]) - cos_sin_s0.append(s_shape_coeffs[k][j]) - else: - cos_sin0.append(None) - cos_sin_s0.append(None) - cos_sin.append(cos_sin0) - cos_sin_s.append(cos_sin_s0) - - ( - geo_volume_prime, - geo_surf, - geo_fluxsurfave_grad_r, - geo_fluxsurfave_bp2, - geo_fluxsurfave_bt2, - geo_bt0, - ) = volp_surf_Miller_vectorized( - R, - r, - delta, - kappa, - cos_sin, - cos_sin_s, - zeta, - zmag, - s_delta, - s_kappa, - s_zeta, - dzmag, - dRmag, - q, - n_theta=n_theta, - ) - - """ - from expro_util.f90 we have: - expro_volp(i) = geo_volume_prime*r_min**2, where r_min = expro_rmin(expro_n_exp) - expro_surf(i) = geo_surf*r_min**2 - """ - - volp = geo_volume_prime * profiles.profiles["rmin(m)"][-1] ** 2 - surf = geo_surf * profiles.profiles["rmin(m)"][-1] ** 2 - - return volp, surf, geo_fluxsurfave_grad_r, geo_fluxsurfave_bp2, geo_fluxsurfave_bt2, geo_bt0 - -def volp_surf_Miller_vectorized( - geo_rmaj_in, - geo_rmin_in, - geo_delta_in, - geo_kappa_in, - cos_sin, - cos_sin_s, - geo_zeta_in, - geo_zmag_in, - geo_s_delta_in, - geo_s_kappa_in, - geo_s_zeta_in, - geo_dzmag_in, - geo_drmaj_in, - geo_q_in, - n_theta=1001): - """ - Completety from f2py/geo/geo.f90 - """ - - geo_rmin_in = geo_rmin_in.clip( - 1e-10 - ) # To avoid problems at 0 (Implemented by PRF, not sure how TGYRO deals with this) - - geo_q_in = geo_q_in.clip(1e-2) # To avoid problems at 0 with some geqdsk files that are corrupted... - - - [ - geo_shape_cos0_in, - geo_shape_cos1_in, - geo_shape_cos2_in, - geo_shape_cos3_in, - geo_shape_cos4_in, - geo_shape_cos5_in, - geo_shape_cos6_in, - _, - _, - _, - geo_shape_sin3_in, - geo_shape_sin4_in, - geo_shape_sin5_in, - geo_shape_sin6_in, - ] = np.array(cos_sin).astype(float).T - - [ - geo_shape_s_cos0_in, - geo_shape_s_cos1_in, - geo_shape_s_cos2_in, - geo_shape_s_cos3_in, - geo_shape_s_cos4_in, - geo_shape_s_cos5_in, - geo_shape_s_cos6_in, - _, - _, - _, - geo_shape_s_sin3_in, - geo_shape_s_sin4_in, - geo_shape_s_sin5_in, - geo_shape_s_sin6_in, - ] = np.array(cos_sin_s).astype(float).T - - geo_signb_in = 1.0 - - geov_theta = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_bigr = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_bigr_r = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_bigr_t = np.zeros((n_theta,geo_rmin_in.shape[0])) - bigz = np.zeros((n_theta,geo_rmin_in.shape[0])) - bigz_r = np.zeros((n_theta,geo_rmin_in.shape[0])) - bigz_t = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_jac_r = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_grad_r = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_l_t = np.zeros((n_theta,geo_rmin_in.shape[0])) - r_c = np.zeros((n_theta,geo_rmin_in.shape[0])) - bigz_l = np.zeros((n_theta,geo_rmin_in.shape[0])) - bigr_l = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_l_r = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_nsin = np.zeros((n_theta,geo_rmin_in.shape[0])) - - pi_2 = 8.0 * np.arctan(1.0) - d_theta = pi_2 / (n_theta - 1) - - for i in range(n_theta): - #!----------------------------------------- - #! Generalized Miller-type parameterization - #!----------------------------------------- - - theta = -0.5 * pi_2 + (i - 1) * d_theta - - geov_theta[i] = theta - - x = np.arcsin(geo_delta_in) - - #! A - #! dA/dtheta - #! d^2A/dtheta^2 - a = ( - theta - + geo_shape_cos0_in - + geo_shape_cos1_in * np.cos(theta) - + geo_shape_cos2_in * np.cos(2 * theta) - + geo_shape_cos3_in * np.cos(3 * theta) - + geo_shape_cos4_in * np.cos(4 * theta) - + geo_shape_cos5_in * np.cos(5 * theta) - + geo_shape_cos6_in * np.cos(6 * theta) - + geo_shape_sin3_in * np.sin(3 * theta) - + x * np.sin(theta) - - geo_zeta_in * np.sin(2 * theta) - + geo_shape_sin3_in * np.sin(3 * theta) - + geo_shape_sin4_in * np.sin(4 * theta) - + geo_shape_sin5_in * np.sin(5 * theta) - + geo_shape_sin6_in * np.sin(6 * theta) - ) - a_t = ( - 1.0 - - geo_shape_cos1_in * np.sin(theta) - - 2 * geo_shape_cos2_in * np.sin(2 * theta) - - 3 * geo_shape_cos3_in * np.sin(3 * theta) - - 4 * geo_shape_cos4_in * np.sin(4 * theta) - - 5 * geo_shape_cos5_in * np.sin(5 * theta) - - 6 * geo_shape_cos6_in * np.sin(6 * theta) - + x * np.cos(theta) - - 2 * geo_zeta_in * np.cos(2 * theta) - + 3 * geo_shape_sin3_in * np.cos(3 * theta) - + 4 * geo_shape_sin4_in * np.cos(4 * theta) - + 5 * geo_shape_sin5_in * np.cos(5 * theta) - + 6 * geo_shape_sin6_in * np.cos(6 * theta) - ) - a_tt = ( - -geo_shape_cos1_in * np.cos(theta) - - 4 * geo_shape_cos2_in * np.cos(2 * theta) - - 9 * geo_shape_cos3_in * np.cos(3 * theta) - - 16 * geo_shape_cos4_in * np.cos(4 * theta) - - 25 * geo_shape_cos5_in * np.cos(5 * theta) - - 36 * geo_shape_cos6_in * np.cos(6 * theta) - - x * np.sin(theta) - + 4 * geo_zeta_in * np.sin(2 * theta) - - 9 * geo_shape_sin3_in * np.sin(3 * theta) - - 16 * geo_shape_sin4_in * np.sin(4 * theta) - - 25 * geo_shape_sin5_in * np.sin(5 * theta) - - 36 * geo_shape_sin6_in * np.sin(6 * theta) - ) - - #! R(theta) - #! dR/dr - #! dR/dtheta - #! d^2R/dtheta^2 - geov_bigr[i] = geo_rmaj_in + geo_rmin_in * np.cos(a) - geov_bigr_r[i] = ( - geo_drmaj_in - + np.cos(a) - - np.sin(a) - * ( - geo_shape_s_cos0_in - + geo_shape_s_cos1_in * np.cos(theta) - + geo_shape_s_cos2_in * np.cos(2 * theta) - + geo_shape_s_cos3_in * np.cos(3 * theta) - + geo_shape_s_cos4_in * np.cos(4 * theta) - + geo_shape_s_cos5_in * np.cos(5 * theta) - + geo_shape_s_cos6_in * np.cos(6 * theta) - + geo_s_delta_in / np.cos(x) * np.sin(theta) - - geo_s_zeta_in * np.sin(2 * theta) - + geo_shape_s_sin3_in * np.sin(3 * theta) - + geo_shape_s_sin4_in * np.sin(4 * theta) - + geo_shape_s_sin5_in * np.sin(5 * theta) - + geo_shape_s_sin6_in * np.sin(6 * theta) - ) - ) - geov_bigr_t[i] = -geo_rmin_in * a_t * np.sin(a) - bigr_tt = -geo_rmin_in * a_t**2 * np.cos(a) - geo_rmin_in * a_tt * np.sin(a) - - #!----------------------------------------------------------- - - #! A - #! dA/dtheta - #! d^2A/dtheta^2 - a = theta - a_t = 1.0 - a_tt = 0.0 - - #! Z(theta) - #! dZ/dr - #! dZ/dtheta - #! d^2Z/dtheta^2 - bigz[i] = geo_zmag_in + geo_kappa_in * geo_rmin_in * np.sin(a) - bigz_r[i] = geo_dzmag_in + geo_kappa_in * (1.0 + geo_s_kappa_in) * np.sin(a) - bigz_t[i] = geo_kappa_in * geo_rmin_in * np.cos(a) * a_t - bigz_tt = ( - -geo_kappa_in * geo_rmin_in * np.sin(a) * a_t**2 - + geo_kappa_in * geo_rmin_in * np.cos(a) * a_tt - ) - - g_tt = geov_bigr_t[i] ** 2 + bigz_t[i] ** 2 - - geov_jac_r[i] = geov_bigr[i] * ( - geov_bigr_r[i] * bigz_t[i] - geov_bigr_t[i] * bigz_r[i] - ) - - geov_grad_r[i] = geov_bigr[i] * np.sqrt(g_tt) / geov_jac_r[i] - - geov_l_t[i] = np.sqrt(g_tt) - - r_c[i] = geov_l_t[i] ** 3 / (geov_bigr_t[i] * bigz_tt - bigz_t[i] * bigr_tt) - - bigz_l[i] = bigz_t[i] / geov_l_t[i] - - bigr_l[i] = geov_bigr_t[i] / geov_l_t[i] - - geov_l_r[i] = bigz_l[i] * bigz_r[i] + bigr_l[i] * geov_bigr_r[i] - - geov_nsin[i] = ( - geov_bigr_r[i] * geov_bigr_t[i] + bigz_r[i] * bigz_t[i] - ) / geov_l_t[i] - - c = 0.0 - for i in range(n_theta): - c = c + geov_l_t[i] / (geov_bigr[i] * geov_grad_r[i]) - - f = geo_rmin_in / (c * d_theta / pi_2) - - c = 0.0 - for i in range(n_theta - 1): - c = c + geov_l_t[i] * geov_bigr[i] / geov_grad_r[i] - - geo_volume_prime = pi_2 * c * d_theta - - # Line 716 in geo.f90 - geo_surf = 0.0 - for i in range(n_theta - 1): - geo_surf = geo_surf + geov_l_t[i] * geov_bigr[i] - geo_surf = pi_2 * geo_surf * d_theta - - # ----- - c = 0.0 - for i in range(n_theta - 1): - c = c + geov_l_t[i] / (geov_bigr[i] * geov_grad_r[i]) - f = geo_rmin_in / (c * d_theta / pi_2) - - geov_b = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_g_theta = np.zeros((n_theta,geo_rmin_in.shape[0])) - geov_bt = np.zeros((n_theta,geo_rmin_in.shape[0])) - for i in range(n_theta): - geov_bt[i] = f / geov_bigr[i] - geov_bp = (geo_rmin_in / geo_q_in) * geov_grad_r[i] / geov_bigr[i] - - geov_b[i] = geo_signb_in * (geov_bt[i] ** 2 + geov_bp**2) ** 0.5 - geov_g_theta[i] = ( - geov_bigr[i] - * geov_b[i] - * geov_l_t[i] - / (geo_rmin_in * geo_rmaj_in * geov_grad_r[i]) - ) - - theta_0 = 0 - dx = geov_theta[1,0] - geov_theta[0,0] - x0 = theta_0 - geov_theta[0,0] - i1 = int(x0 / dx) + 1 - i2 = i1 + 1 - x1 = (i1 - 1) * dx - z = (x0 - x1) / dx - if i2 == n_theta: - i2 -= 1 - geo_bt0 = geov_bt[i1] + (geov_bt[i2] - geov_bt[i1]) * z - - denom = 0 - for i in range(n_theta - 1): - denom = denom + geov_g_theta[i] / geov_b[i] - - geo_fluxsurfave_grad_r = 0 - for i in range(n_theta - 1): - geo_fluxsurfave_grad_r = ( - geo_fluxsurfave_grad_r - + geov_grad_r[i] * geov_g_theta[i] / geov_b[i] / denom - ) - - geo_fluxsurfave__bp2 = 0 - for i in range(n_theta - 1): - geo_fluxsurfave__bp2 = ( - geo_fluxsurfave__bp2 - + geov_bt[i] ** 2 * geov_g_theta[i] / geov_b[i] / denom - ) - - geo_fluxsurfave_bt2 = 0 - for i in range(n_theta - 1): - geo_fluxsurfave_bt2 = ( - geo_fluxsurfave_bt2 - + geov_bp ** 2 * geov_g_theta[i] / geov_b[i] / denom - ) - - return geo_volume_prime, geo_surf, geo_fluxsurfave_grad_r, geo_fluxsurfave__bp2, geo_fluxsurfave_bt2, geo_bt0 - -def xsec_area_RZ( - R, - Z -): - # calculates the cross-sectional area of the plasma for each flux surface - xsec_area = [] - for i in range(R.shape[0]): - R0 = np.max(R[i,:]) - np.min(R[i,:]) - Z0 = np.max(Z[i,:]) - np.min(Z[i,:]) - xsec_area.append(np.trapz(R[i], Z[i])) - - xsec_area = np.array(xsec_area) - - return xsec_area \ No newline at end of file diff --git a/src/mitim_modules/powertorch/physics/TRANSPORTtools.py b/src/mitim_modules/powertorch/physics/TRANSPORTtools.py deleted file mode 100644 index 8955116c..00000000 --- a/src/mitim_modules/powertorch/physics/TRANSPORTtools.py +++ /dev/null @@ -1,881 +0,0 @@ -import copy -import shutil -import torch -import numpy as np -from mitim_tools.misc_tools import PLASMAtools, IOtools -from mitim_tools.gacode_tools import TGYROtools, PROFILEStools -from mitim_modules.portals.utils import PORTALScgyro -from mitim_tools.misc_tools.LOGtools import printMsg as print -from IPython import embed - -class power_transport: - ''' - Default class for power transport models, change "evaluate" method to implement a new model and produce_profiles if the model requires written input.gacode written - - Notes: - - After evaluation, the self.model_results attribute will contain the results of the model, which can be used for plotting and analysis - - model results can have .plot() method that can grab kwargs or be similar to TGYRO plot - - ''' - def __init__(self, powerstate, name = "test", folder = "~/scratch/", evaluation_number = 0): - - self.name = name - self.folder = IOtools.expandPath(folder) - self.evaluation_number = evaluation_number - self.powerstate = powerstate - - # Allowed fluxes in powerstate so far - self.quantities = ['Pe', 'Pi', 'Ce', 'CZ', 'Mt'] - - # Each flux has a turbulent and neoclassical component - self.variables = [f'{i}_tr_turb' for i in self.quantities] + [f'{i}_tr_neo' for i in self.quantities] - - # Each flux component has a standard deviation - self.variables += [f'{i}_stds' for i in self.variables] - - # There is also turbulent exchange - self.variables += ['PexchTurb', 'PexchTurb_stds'] - - # And total transport flux - self.variables += [f'{i}_tr' for i in self.quantities] - - # Model results is None by default, but can be assigned in evaluate - self.model_results = None - - # Assign zeros to transport ones if not evaluated - for i in self.variables: - self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 - - # There is also target components - self.variables += [f'{i}' for i in self.quantities] + [f'{i}_stds' for i in self.quantities] - - # ---------------------------------------------------------------------------------------- - # labels for plotting - # ---------------------------------------------------------------------------------------- - - self.powerstate.labelsFluxes = { - "te": "$Q_e$ ($MW/m^2$)", - "ti": "$Q_i$ ($MW/m^2$)", - "ne": ( - "$Q_{conv}$ ($MW/m^2$)" - if self.powerstate.TransportOptions["ModelOptions"].get("useConvectiveFluxes", True) - else "$\\Gamma_e$ ($10^{20}/s/m^2$)" - ), - "nZ": ( - "$Q_{conv}$ $\\cdot f_{Z,0}$ ($MW/m^2$)" - if self.powerstate.TransportOptions["ModelOptions"].get("useConvectiveFluxes", True) - else "$\\Gamma_Z$ $\\cdot f_{Z,0}$ ($10^{20}/s/m^2$)" - ), - "w0": "$M_T$ ($J/m^2$)", - } - - def produce_profiles(self): - # Only add self._produce_profiles() if it's needed (e.g. full TGLF), otherwise this is somewhat expensive (e.g. for flux matching) - pass - - def _produce_profiles(self,deriveQuantities=True): - - self.applyCorrections = self.powerstate.TransportOptions["ModelOptions"].get("MODELparameters", {}).get("applyCorrections", {}) - - # Write this updated profiles class (with parameterized profiles and target powers) - self.file_profs = self.folder / "input.gacode" - - powerstate_detached = self.powerstate.copy_state() - - self.powerstate.profiles = powerstate_detached.to_gacode( - write_input_gacode=self.file_profs, - postprocess_input_gacode=self.applyCorrections, - rederive_profiles = deriveQuantities, # Derive quantities so that it's ready for analysis and plotting later - insert_highres_powers = deriveQuantities, # Insert powers so that Q, Pfus and all that it's consistent when read later - ) - - self.profiles_transport = copy.deepcopy(self.powerstate.profiles) - - self._modify_profiles() - - def _modify_profiles(self): - ''' - Modify the profiles (e.g. lumping) before running the transport model - ''' - - # After producing the profiles, copy for future modifications - self.file_profs_unmod = self.file_profs.parent / f"{self.file_profs.name}_unmodified" - shutil.copy2(self.file_profs, self.file_profs_unmod) - - profiles_postprocessing_fun = self.powerstate.TransportOptions["ModelOptions"].get("profiles_postprocessing_fun", None) - - if profiles_postprocessing_fun is not None: - print(f"\t- Modifying input.gacode to run transport calculations based on {profiles_postprocessing_fun}",typeMsg="i") - self.profiles_transport = profiles_postprocessing_fun(self.file_profs) - - # Position of impurity ion may have changed - p_old = PROFILEStools.PROFILES_GACODE(self.file_profs_unmod) - p_new = PROFILEStools.PROFILES_GACODE(self.file_profs) - - impurity_of_interest = p_old.Species[self.powerstate.impurityPosition] - - try: - impurityPosition_new = p_new.Species.index(impurity_of_interest) - - except ValueError: - print(f"\t- Impurity {impurity_of_interest} not found in new profiles, keeping position {self.powerstate.impurityPosition}",typeMsg="w") - impurityPosition_new = self.powerstate.impurityPosition - - if impurityPosition_new != self.powerstate.impurityPosition: - print(f"\t- Impurity position has changed from {self.powerstate.impurityPosition} to {impurityPosition_new}",typeMsg="w") - self.powerstate.impurityPosition_transport = p_new.Species.index(impurity_of_interest) - - # ---------------------------------------------------------------------------------------------------- - # EVALUATE (custom part) - # ---------------------------------------------------------------------------------------------------- - def evaluate(self): - ''' - This needs to populate the following in self.powerstate.plasma - - Pe, Pe_tr, Pe_tr_turb, Pe_tr_neo -> MW/m^2 - - Pi, Pi_tr, Pi_tr_turb, Pi_tr_neo -> MW/m^2 - - Ce, Ce_tr, Ce_tr_turb, Ce_tr_neo -> MW/m^2 - * Ce_raw, Ce_raw_tr, Ce_raw_tr_turb, Ce_raw_tr_neo -> 10^20/s/m^2 - - CZ, CZ_tr, CZ_tr_turb, CZ_tr_neo -> MW/m^2 (but modified as needed, for example dividing by fZ0) - * CZ_raw, CZ_raw_tr, CZ_raw_tr_turb, CZ_raw_tr_neo -> 10^20/s/m^2 (NOT modified) - - Mt, Mt_tr, Mt_tr_turb, Mt_tr_neo -> J/m^2 - - PexchTurb -> MW/m^3 - and their respective standard deviations - ''' - - print(">> No transport fluxes to evaluate", typeMsg="w") - pass - -# ---------------------------------------------------------------------------------------------------- -# FULL TGYRO -# ---------------------------------------------------------------------------------------------------- - -class tgyro_model(power_transport): - def __init__(self, powerstate, **kwargs): - super().__init__(powerstate, **kwargs) - - def produce_profiles(self): - self._produce_profiles() - - def evaluate(self): - - # ------------------------------------------------------------------------------------------------------------------------ - # Model Options - # ------------------------------------------------------------------------------------------------------------------------ - - ModelOptions = self.powerstate.TransportOptions["ModelOptions"] - - MODELparameters = ModelOptions.get("MODELparameters",None) - includeFast = ModelOptions.get("includeFastInQi",False) - useConvectiveFluxes = ModelOptions.get("useConvectiveFluxes", True) - UseFineGridTargets = ModelOptions.get("UseFineGridTargets", False) - launchMODELviaSlurm = ModelOptions.get("launchMODELviaSlurm", False) - cold_start = ModelOptions.get("cold_start", False) - provideTurbulentExchange = ModelOptions.get("TurbulentExchange", False) - OriginalFimp = ModelOptions.get("OriginalFimp", 1.0) - forceZeroParticleFlux = ModelOptions.get("forceZeroParticleFlux", False) - percentError = ModelOptions.get("percentError", [5, 1, 0.5]) - use_tglf_scan_trick = ModelOptions.get("use_tglf_scan_trick", None) - cores_per_tglf_instance = ModelOptions.get("extra_params", {}).get('PORTALSparameters', {}).get("cores_per_tglf_instance", 1) - - # Grab impurity from powerstate ( because it may have been modified in produce_profiles() ) - impurityPosition = self.powerstate.impurityPosition_transport #ModelOptions.get("impurityPosition", 1) - - # ------------------------------------------------------------------------------------------------------------------------ - # 1. tglf_neo_original: Run TGYRO workflow - TGLF + NEO in subfolder tglf_neo_original (original as in... without stds or merging) - # ------------------------------------------------------------------------------------------------------------------------ - - RadiisToRun = [self.powerstate.plasma["rho"][0, 1:][i].item() for i in range(len(self.powerstate.plasma["rho"][0, 1:]))] - - tgyro = TGYROtools.TGYRO(cdf=dummyCDF(self.folder, self.folder)) - tgyro.prep(self.folder, profilesclass_custom=self.profiles_transport) - - if launchMODELviaSlurm: - print("\t- Launching TGYRO evaluation as a batch job") - else: - print("\t- Launching TGYRO evaluation as a terminal job") - - tgyro.run( - subFolderTGYRO="tglf_neo_original", - cold_start=cold_start, - forceIfcold_start=True, - special_radii=RadiisToRun, - iterations=0, - PredictionSet=[ - int("te" in self.powerstate.ProfilesPredicted), - int("ti" in self.powerstate.ProfilesPredicted), - int("ne" in self.powerstate.ProfilesPredicted), - ], - TGLFsettings=MODELparameters["transport_model"]["TGLFsettings"], - extraOptionsTGLF=MODELparameters["transport_model"]["extraOptionsTGLF"], - TGYRO_physics_options=MODELparameters["Physics_options"], - launchSlurm=launchMODELviaSlurm, - minutesJob=5, - forcedName=self.name, - ) - - tgyro.read(label="tglf_neo_original") - - # Copy one with evaluated targets - self.file_profs_targets = tgyro.FolderTGYRO / "input.gacode.new" - - # ------------------------------------------------------------------------------------------------------------------------ - # 2. tglf_neo: Write TGLF, NEO and TARGET errors in tgyro files as well - # ------------------------------------------------------------------------------------------------------------------------ - - # Copy original TGYRO folder - if (self.folder / "tglf_neo").exists(): - IOtools.shutil_rmtree(self.folder / "tglf_neo") - shutil.copytree(self.folder / "tglf_neo_original", self.folder / "tglf_neo") - - # Add errors and merge fluxes as we would do if this was a CGYRO run - curateTGYROfiles( - tgyro, - "tglf_neo_original", - RadiisToRun, - self.powerstate.ProfilesPredicted, - self.folder / "tglf_neo", - percentError, - impurityPosition=impurityPosition, - includeFast=includeFast, - provideTurbulentExchange=provideTurbulentExchange, - use_tglf_scan_trick = use_tglf_scan_trick, - cold_start=cold_start, - extra_name = self.name, - cores_per_tglf_instance=cores_per_tglf_instance - ) - - # Read again to capture errors - tgyro.read(label="tglf_neo", folder=self.folder / "tglf_neo") - - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - # Run TGLF standalone --> In preparation for the transition - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - - # from mitim_tools.gacode_tools import TGLFtools - # tglf = TGLFtools.TGLF(rhos=RadiisToRun) - # _ = tglf.prep( - # self.folder / 'stds', - # inputgacode=self.file_profs, - # recalculatePTOT=False, # Use what's in the input.gacode, which is what PORTALS TGYRO does - # cold_start=cold_start) - - # tglf.run( - # subFolderTGLF="tglf_neo_original", - # TGLFsettings=MODELparameters["transport_model"]["TGLFsettings"], - # cold_start=cold_start, - # forceIfcold_start=True, - # extraOptions=MODELparameters["transport_model"]["extraOptionsTGLF"], - # launchSlurm=launchMODELviaSlurm, - # slurm_setup={"cores": 4, "minutes": 1}, - # ) - - # tglf.read(label="tglf_neo_original") - - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ - - # ------------------------------------------------------------------------------------------------------------------------ - # 3. tglf_neo: Populate powerstate with the TGYRO results - # ------------------------------------------------------------------------------------------------------------------------ - - # Produce right quantities (TGYRO -> powerstate.plasma) - self.powerstate = tgyro.results["tglf_neo"].TGYROmodeledVariables( - self.powerstate, - useConvectiveFluxes=useConvectiveFluxes, - includeFast=includeFast, - impurityPosition=impurityPosition, - UseFineGridTargets=UseFineGridTargets, - OriginalFimp=OriginalFimp, - forceZeroParticleFlux=forceZeroParticleFlux, - provideTurbulentExchange=provideTurbulentExchange, - provideTargets=self.powerstate.TargetOptions['ModelOptions']['TargetCalc'] == "tgyro", - ) - - # ------------------------------------------------------------------------------------------------------------------------ - # 4. cgyro_neo: Trick to fake a tgyro output to reflect CGYRO - # ------------------------------------------------------------------------------------------------------------------------ - - if MODELparameters['transport_model']['turbulence'] == 'CGYRO': - - print("\t- Checking whether cgyro_neo folder exists and it was written correctly via cgyro_trick...") - - correctly_run = (self.folder / "cgyro_neo").exists() - if correctly_run: - print("\t\t- Folder exists, but was cgyro_trick run?") - with open(self.folder / "cgyro_neo" / "mitim_flag", "r") as f: - correctly_run = bool(float(f.readline())) - - if correctly_run: - print("\t\t\t* Yes, it was", typeMsg="w") - else: - print("\t\t\t* No, it was not, repating process", typeMsg="i") - - # Remove cgyro_neo folder - if (self.folder / "cgyro_neo").exists(): - IOtools.shutil_rmtree(self.folder / "cgyro_neo") - - # Copy tglf_neo results - shutil.copytree(self.folder / "tglf_neo", self.folder / "cgyro_neo") - - # CGYRO writter - cgyro_trick(self,self.folder / "cgyro_neo") - - # Read TGYRO files and construct portals variables - - tgyro.read(label="cgyro_neo", folder=self.folder / "cgyro_neo") - - powerstate_orig = copy.deepcopy(self.powerstate) - - self.powerstate = tgyro.results["cgyro_neo"].TGYROmodeledVariables( - self.powerstate, - useConvectiveFluxes=useConvectiveFluxes, - includeFast=includeFast, - impurityPosition=impurityPosition, - UseFineGridTargets=UseFineGridTargets, - OriginalFimp=OriginalFimp, - forceZeroParticleFlux=forceZeroParticleFlux, - provideTurbulentExchange=provideTurbulentExchange, - provideTargets=self.powerstate.TargetOptions['ModelOptions']['TargetCalc'] == "tgyro", - ) - - print("\t- Checking model modifications:") - for r in ["Pe_tr_turb", "Pi_tr_turb", "Ce_tr_turb", "CZ_tr_turb", "Mt_tr_turb"]: #, "PexchTurb"]: #TODO: FIX - print(f"\t\t{r}(tglf) = {' '.join([f'{k:.1e} (+-{ke:.1e})' for k,ke in zip(powerstate_orig.plasma[r][0][1:],powerstate_orig.plasma[r+'_stds'][0][1:]) ])}") - print(f"\t\t{r}(cgyro) = {' '.join([f'{k:.1e} (+-{ke:.1e})' for k,ke in zip(self.powerstate.plasma[r][0][1:],self.powerstate.plasma[r+'_stds'][0][1:]) ])}") - - # ** - tgyro.results["use"] = tgyro.results["cgyro_neo"] - - else: - # copy profiles too! - profilesToShare(self) - - # ** - tgyro.results["use"] = tgyro.results["tglf_neo"] - - # ------------------------------------------------------------------------------------------------------------------------ - # Results class that can be used for further plotting and analysis in PORTALS - # ------------------------------------------------------------------------------------------------------------------------ - - self.model_results = copy.deepcopy(tgyro.results["use"]) # Pass the TGYRO results class that should be use for plotting and analysis - - self.model_results.extra_analysis = {} - for ikey in tgyro.results: - if ikey != "use": - self.model_results.extra_analysis[ikey] = tgyro.results[ikey] - -def tglf_scan_trick( - fluxesTGYRO, - tgyro, - label, - RadiisToRun, - ProfilesPredicted, - impurityPosition=1, includeFast=False, - delta=0.02, - cold_start=False, - check_coincidence_thr=1E-2, - extra_name="", - remove_folders_out = False, - cores_per_tglf_instance = 4 # e.g. 4 core per radius, since this is going to launch ~ Nr=5 x (Nv=6 x Nd=2 + 1) = 65 TGLFs at once - ): - - print(f"\t- Running TGLF standalone scans ({delta = }) to determine relative errors") - - # Grab fluxes from TGYRO - Qe_tgyro, Qi_tgyro, Ge_tgyro, GZ_tgyro, Mt_tgyro, Pexch_tgyro = fluxesTGYRO - - # ------------------------------------------------------------------------------------------------------------------------ - # TGLF scans - # ------------------------------------------------------------------------------------------------------------------------ - - # Prepare scan - - tglf = tgyro.grab_tglf_objects(fromlabel=label, subfolder = 'tglf_explorations') - - variables_to_scan = [] - for i in ProfilesPredicted: - if i == 'te': variables_to_scan.append('RLTS_1') - if i == 'ti': variables_to_scan.append('RLTS_2') - if i == 'ne': variables_to_scan.append('RLNS_1') - if i == 'nZ': variables_to_scan.append(f'RLNS_{impurityPosition+2}') - if i == 'w0': variables_to_scan.append('VEXB_SHEAR') #TODO: is this correct? or VPAR_SHEAR? - - #TODO: Only if that parameter is changing at that location - if 'te' in ProfilesPredicted or 'ti' in ProfilesPredicted: - variables_to_scan.append('TAUS_2') - if 'te' in ProfilesPredicted or 'ne' in ProfilesPredicted: - variables_to_scan.append('XNUE') - if 'te' in ProfilesPredicted or 'ne' in ProfilesPredicted: - variables_to_scan.append('BETAE') - - relative_scan = [1-delta, 1+delta] - - name = 'turb_drives' - - tglf.rhos = RadiisToRun # To avoid the case in which TGYRO was run with an extra rho point - - # Estimate job minutes based on cases and cores (mostly IO I think at this moment, otherwise it should be independent on cases) - num_cases = len(RadiisToRun) * len(variables_to_scan) * len(relative_scan) - if cores_per_tglf_instance == 1: - minutes = 10 * (num_cases / 60) # Ad-hoc formula - else: - minutes = 1 * (num_cases / 60) # Ad-hoc formula - - # Enforce minimum minutes - minutes = max(2, minutes) - - tglf.runScanTurbulenceDrives( - subFolderTGLF = name, - variablesDrives = variables_to_scan, - varUpDown = relative_scan, - TGLFsettings = None, - ApplyCorrections = False, - add_baseline_to = 'first', - cold_start=cold_start, - forceIfcold_start=True, - slurm_setup={ - "cores": cores_per_tglf_instance, - "minutes": minutes, - }, - extra_name = f'{extra_name}_{name}', - positionIon=impurityPosition+2, - attempts_execution=2, - only_minimal_files=True, # Since I only care about fluxes here, do not retrieve all the files - ) - - # Remove folders because they are heavy to carry many throughout - if remove_folders_out: - IOtools.shutil_rmtree(tglf.FolderGACODE) - - Qe = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - Qi = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - Ge = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - GZ = np.zeros((len(RadiisToRun), len(variables_to_scan)*len(relative_scan)+1 )) - - cont = 0 - for vari in variables_to_scan: - jump = tglf.scans[f'{name}_{vari}']['Qe'].shape[-1] - - Qe[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qe'] - Qi[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qi'] - Ge[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Ge'] - GZ[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Gi'] - cont += jump - - # ---------------------------------------------------- - # Do a check that TGLF scans are consistent with TGYRO - Qe_err = np.abs( (Qe[:,0] - Qe_tgyro) / Qe_tgyro ) if 'te' in ProfilesPredicted else np.zeros_like(Qe[:,0]) - Qi_err = np.abs( (Qi[:,0] - Qi_tgyro) / Qi_tgyro ) if 'ti' in ProfilesPredicted else np.zeros_like(Qi[:,0]) - Ge_err = np.abs( (Ge[:,0] - Ge_tgyro) / Ge_tgyro ) if 'ne' in ProfilesPredicted else np.zeros_like(Ge[:,0]) - GZ_err = np.abs( (GZ[:,0] - GZ_tgyro) / GZ_tgyro ) if 'nZ' in ProfilesPredicted else np.zeros_like(GZ[:,0]) - - F_err = np.concatenate((Qe_err, Qi_err, Ge_err, GZ_err)) - if F_err.max() > check_coincidence_thr: - print(f"\t- TGLF scans are not consistent with TGYRO, maximum error = {F_err.max()*100:.2f}%",typeMsg="w") - if 'te' in ProfilesPredicted: - print('\t\t* Qe:',Qe_err) - if 'ti' in ProfilesPredicted: - print('\t\t* Qi:',Qi_err) - if 'ne' in ProfilesPredicted: - print('\t\t* Ge:',Ge_err) - if 'nZ' in ProfilesPredicted: - print('\t\t* GZ:',GZ_err) - else: - print(f"\t- TGLF scans are consistent with TGYRO, maximum error = {F_err.max()*100:.2f}%") - # ---------------------------------------------------- - - # Calculate the standard deviation of the scans, that's going to be the reported stds - - def calculate_mean_std(Q): - # Assumes Q is [radii, points], with [radii, 0] being the baseline - - Qm = np.mean(Q, axis=1) - Qstd = np.std(Q, axis=1) - - # Qm = Q[:,0] - # Qstd = np.std(Q, axis=1) - - # Qstd = ( Q.max(axis=1)-Q.min(axis=1) )/2 /2 # Such that the range is 2*std - # Qm = Q.min(axis=1) + Qstd*2 # Mean is at the middle of the range - - return Qm, Qstd - - Qe_point, Qe_std = calculate_mean_std(Qe) - Qi_point, Qi_std = calculate_mean_std(Qi) - Ge_point, Ge_std = calculate_mean_std(Ge) - GZ_point, GZ_std = calculate_mean_std(GZ) - - #TODO: Implement Mt and Pexch - Mt_point, Pexch_point = Mt_tgyro, Pexch_tgyro - Mt_std, Pexch_std = abs(Mt_point) * 0.1, abs(Pexch_point) * 0.1 - - #TODO: Careful with fast particles - - return Qe_point, Qi_point, Ge_point, GZ_point, Mt_point, Pexch_point, Qe_std, Qi_std, Ge_std, GZ_std, Mt_std, Pexch_std - - -# ------------------------------------------------------------------ -# SIMPLE Diffusion (#TODO: implement with particle flux and the raw) -# ------------------------------------------------------------------ - -class diffusion_model(power_transport): - def __init__(self, powerstate, **kwargs): - super().__init__(powerstate, **kwargs) - - # Ensure that the provided diffusivities include the zero location - self.chi_e = self.powerstate.TransportOptions["ModelOptions"]["chi_e"] - self.chi_i = self.powerstate.TransportOptions["ModelOptions"]["chi_i"] - - if self.chi_e.shape[0] < self.powerstate.plasma['rho'].shape[-1]: - self.chi_e = torch.cat((torch.zeros(1), self.chi_e)) - - if self.chi_i.shape[0] < self.powerstate.plasma['rho'].shape[-1]: - self.chi_i = torch.cat((torch.zeros(1), self.chi_i)) - - def produce_profiles(self): - pass - - def evaluate(self): - - # Make sure the chis are applied to all the points in the batch - Pe_tr = PLASMAtools.conduction( - self.powerstate.plasma["ne"], - self.powerstate.plasma["te"], - self.chi_e.repeat(self.powerstate.plasma['rho'].shape[0],1), - self.powerstate.plasma["aLte"], - self.powerstate.plasma["a"].unsqueeze(-1), - ) - Pi_tr = PLASMAtools.conduction( - self.powerstate.plasma["ni"].sum(axis=-1), - self.powerstate.plasma["ti"], - self.chi_i.repeat(self.powerstate.plasma['rho'].shape[0],1), - self.powerstate.plasma["aLti"], - self.powerstate.plasma["a"].unsqueeze(-1), - ) - - self.powerstate.plasma["Pe_tr_turb"] = Pe_tr * 2 / 3 - self.powerstate.plasma["Pi_tr_turb"] = Pi_tr * 2 / 3 - - self.powerstate.plasma["Pe_tr_neo"] = Pe_tr * 1 / 3 - self.powerstate.plasma["Pi_tr_neo"] = Pi_tr * 1 / 3 - - self.powerstate.plasma["Pe_tr"] = self.powerstate.plasma["Pe_tr_turb"] + self.powerstate.plasma["Pe_tr_neo"] - self.powerstate.plasma["Pi_tr"] = self.powerstate.plasma["Pi_tr_turb"] + self.powerstate.plasma["Pi_tr_neo"] - -# ------------------------------------------------------------------ -# SURROGATE -# ------------------------------------------------------------------ - -class surrogate_model(power_transport): - def __init__(self, powerstate, **kwargs): - super().__init__(powerstate, **kwargs) - - def produce_profiles(self): - pass - - def evaluate(self): - - """ - flux_fun as given in ModelOptions must produce Q and Qtargets in order of te,ti,ne - """ - - X = torch.Tensor() - for prof in self.powerstate.ProfilesPredicted: - X = torch.cat((X,self.powerstate.plasma['aL'+prof][:,1:]),axis=1) - - _, Q, _, _ = self.powerstate.TransportOptions["ModelOptions"]["flux_fun"](X) - - numeach = self.powerstate.plasma["rho"].shape[1] - 1 - - quantities = { - "te": "Pe", - "ti": "Pi", - "ne": "Ce", - "nZ": "CZ", - "w0": "Mt", - } - - for c, i in enumerate(self.powerstate.ProfilesPredicted): - self.powerstate.plasma[f"{quantities[i]}_tr"] = torch.cat((torch.tensor([[0.0]]),Q[:, numeach * c : numeach * (c + 1)]),dim=1) - -# ************************************************************************************************** -# Functions -# ************************************************************************************************** - -def curateTGYROfiles( - tgyroObject, - label, - RadiisToRun, - ProfilesPredicted, - folder, - percentError, - provideTurbulentExchange=False, - impurityPosition=1, - includeFast=False, - use_tglf_scan_trick=None, - cold_start=False, - extra_name="", - cores_per_tglf_instance = 4 - ): - - tgyro = tgyroObject.results[label] - - # Determine NEO and Target errors - relativeErrorNEO = percentError[1] / 100.0 - relativeErrorTAR = percentError[2] / 100.0 - - # ************************************************************************************************************************** - # TGLF - # ************************************************************************************************************************** - - # Grab fluxes - Qe = tgyro.Qe_sim_turb[0, 1:] - Qi = tgyro.QiIons_sim_turb[0, 1:] if includeFast else tgyro.QiIons_sim_turb_thr[0, 1:] - Ge = tgyro.Ge_sim_turb[0, 1:] - GZ = tgyro.Gi_sim_turb[impurityPosition, 0, 1:] - Mt = tgyro.Mt_sim_turb[0, 1:] - Pexch = tgyro.EXe_sim_turb[0, 1:] - - # Determine TGLF standard deviations - if use_tglf_scan_trick is not None: - - if provideTurbulentExchange: - print("> Turbulent exchange not implemented yet in TGLF scans", typeMsg="w") #TODO - - # -------------------------------------------------------------- - # If using the scan trick - # -------------------------------------------------------------- - - Qe, Qi, Ge, GZ, Mt, Pexch, QeE, QiE, GeE, GZE, MtE, PexchE = tglf_scan_trick( - [Qe, Qi, Ge, GZ, Mt, Pexch], - tgyroObject, - label, - RadiisToRun, - ProfilesPredicted, - impurityPosition=impurityPosition, - includeFast=includeFast, - delta = use_tglf_scan_trick, - cold_start=cold_start, - extra_name=extra_name, - cores_per_tglf_instance=cores_per_tglf_instance - ) - - min_relative_error = 0.01 # To avoid problems with gpytorch, 1% error minimum - - QeE = QeE.clip(abs(Qe)*min_relative_error) - QiE = QiE.clip(abs(Qi)*min_relative_error) - GeE = GeE.clip(abs(Ge)*min_relative_error) - GZE = GZE.clip(abs(GZ)*min_relative_error) - MtE = MtE.clip(abs(Mt)*min_relative_error) - PexchE = PexchE.clip(abs(Pexch)*min_relative_error) - - else: - - # -------------------------------------------------------------- - # If simply a percentage error provided - # -------------------------------------------------------------- - - relativeErrorTGLF = [percentError[0] / 100.0]*len(RadiisToRun) - - QeE = abs(Qe) * relativeErrorTGLF - QiE = abs(Qi) * relativeErrorTGLF - GeE = abs(Ge) * relativeErrorTGLF - GZE = abs(GZ) * relativeErrorTGLF - MtE = abs(Mt) * relativeErrorTGLF - PexchE = abs(Pexch) * relativeErrorTGLF - - # ************************************************************************************************************************** - # Neo - # ************************************************************************************************************************** - - QeNeo = tgyro.Qe_sim_neo[0, 1:] - if includeFast: - QiNeo = tgyro.QiIons_sim_neo[0, 1:] - else: - QiNeo = tgyro.QiIons_sim_neo_thr[0, 1:] - GeNeo = tgyro.Ge_sim_neo[0, 1:] - GZNeo = tgyro.Gi_sim_neo[impurityPosition, 0, 1:] - MtNeo = tgyro.Mt_sim_neo[0, 1:] - - QeNeoE = abs(tgyro.Qe_sim_neo[0, 1:]) * relativeErrorNEO - if includeFast: - QiNeoE = abs(tgyro.QiIons_sim_neo[0, 1:]) * relativeErrorNEO - else: - QiNeoE = abs(tgyro.QiIons_sim_neo_thr[0, 1:]) * relativeErrorNEO - GeNeoE = abs(tgyro.Ge_sim_neo[0, 1:]) * relativeErrorNEO - GZNeoE = abs(tgyro.Gi_sim_neo[impurityPosition, 0, 1:]) * relativeErrorNEO - MtNeoE = abs(tgyro.Mt_sim_neo[0, 1:]) * relativeErrorNEO - - # Merge - - PORTALScgyro.modifyFLUX( - tgyro, - folder, - Qe, - Qi, - Ge, - GZ, - Mt, - Pexch, - QeNeo=QeNeo, - QiNeo=QiNeo, - GeNeo=GeNeo, - GZNeo=GZNeo, - MtNeo=MtNeo, - impurityPosition=impurityPosition, - ) - - PORTALScgyro.modifyFLUX( - tgyro, - folder, - QeE, - QiE, - GeE, - GZE, - MtE, - PexchE, - QeNeo=QeNeoE, - QiNeo=QiNeoE, - GeNeo=GeNeoE, - GZNeo=GZNeoE, - MtNeo=MtNeoE, - impurityPosition=impurityPosition, - special_label="_stds", - ) - - # ************************************************************************************************************************** - # Targets - # ************************************************************************************************************************** - - QeTargetE = abs(tgyro.Qe_tar[0, 1:]) * relativeErrorTAR - QiTargetE = abs(tgyro.Qi_tar[0, 1:]) * relativeErrorTAR - GeTargetE = abs(tgyro.Ge_tar[0, 1:]) * relativeErrorTAR - GZTargetE = GeTargetE * 0.0 - MtTargetE = abs(tgyro.Mt_tar[0, 1:]) * relativeErrorTAR - - PORTALScgyro.modifyEVO( - tgyro, - folder, - QeTargetE * 0.0, - QiTargetE * 0.0, - GeTargetE * 0.0, - GZTargetE * 0.0, - MtTargetE * 0.0, - impurityPosition=impurityPosition, - positionMod=1, - special_label="_stds", - ) - PORTALScgyro.modifyEVO( - tgyro, - folder, - QeTargetE, - QiTargetE, - GeTargetE, - GZTargetE, - MtTargetE, - impurityPosition=impurityPosition, - positionMod=2, - special_label="_stds", - ) - - -def profilesToShare(self): - if "extra_params" in self.powerstate.TransportOptions["ModelOptions"] and "folder" in self.powerstate.TransportOptions["ModelOptions"]["extra_params"]: - whereFolder = IOtools.expandPath(self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["folder"] / "Outputs" / "portals_profiles") - if not whereFolder.exists(): - IOtools.askNewFolder(whereFolder) - - fil = whereFolder / f"input.gacode.{self.evaluation_number}" - shutil.copy2(self.file_profs, fil) - shutil.copy2(self.file_profs_unmod, fil.parent / f"{fil.name}_unmodified") - shutil.copy2(self.file_profs_targets, fil.parent / f"{fil.name}.new") - print(f"\t- Copied profiles to {IOtools.clipstr(fil)}") - else: - print("\t- Could not move files", typeMsg="w") - - -def cgyro_trick(self,FolderEvaluation_TGYRO): - - with open(FolderEvaluation_TGYRO / "mitim_flag", "w") as f: - f.write("0") - - # ************************************************************************************************************************** - # Print Information - # ************************************************************************************************************************** - - txt = "\nFluxes to be matched by CGYRO ( TARGETS - NEO ):" - - for var, varn in zip( - ["r/a ", "rho ", "a/LTe", "a/LTi", "a/Lne", "a/LnZ", "a/Lw0"], - ["roa", "rho", "aLte", "aLti", "aLne", "aLnZ", "aLw0"], - ): - txt += f"\n{var} = " - for j in range(self.powerstate.plasma["rho"].shape[1] - 1): - txt += f"{self.powerstate.plasma[varn][0,j+1]:.6f} " - - for var, varn in zip( - ["Qe (MW/m^2)", "Qi (MW/m^2)", "Ce (MW/m^2)", "CZ (MW/m^2)", "Mt (J/m^2) "], - ["Pe", "Pi", "Ce", "CZ", "Mt"], - ): - txt += f"\n{var} = " - for j in range(self.powerstate.plasma["rho"].shape[1] - 1): - txt += f"{self.powerstate.plasma[varn][0,j+1]-self.powerstate.plasma[f'{varn}_tr_neo'][0,j+1]:.4e} " - - print(txt) - - # Copy profiles so that later it is easy to grab all the input.gacodes that were evaluated - profilesToShare(self) - - # ************************************************************************************************************************** - # Evaluate CGYRO - # ************************************************************************************************************************** - - PORTALScgyro.evaluateCGYRO( - self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["PORTALSparameters"], - self.powerstate.TransportOptions["ModelOptions"]["extra_params"]["folder"], - self.evaluation_number, - FolderEvaluation_TGYRO, - self.file_profs, - self.powerstate.plasma["roa"][0,1:], - self.powerstate.ProfilesPredicted, - ) - - # ************************************************************************************************************************** - # EXTRA - # ************************************************************************************************************************** - - # Make tensors - for i in ["Pe_tr_turb", "Pi_tr_turb", "Ce_tr_turb", "CZ_tr_turb", "Mt_tr_turb"]: - try: - self.powerstate.plasma[i] = torch.from_numpy(self.powerstate.plasma[i]).to(self.powerstate.dfT).unsqueeze(0) - except: - pass - - # Write a flag indicating this was performed, to avoid an issue that... the script crashes when it has copied tglf_neo, without cgyro_trick modification - with open(FolderEvaluation_TGYRO / "mitim_flag", "w") as f: - f.write("1") - -def dummyCDF(GeneralFolder, FolderEvaluation): - """ - This routine creates path to a dummy CDF file in FolderEvaluation, with the name "simulation_evaluation.CDF" - - GeneralFolder, e.g. ~/runs_portals/run10/ - FolderEvaluation, e.g. ~/runs_portals/run10000/Execution/Evaluation.0/model_complete/ - """ - - # ------- Name construction for scratch folders in parallel ---------------- - - GeneralFolder = IOtools.expandPath(GeneralFolder, ensurePathValid=True) - - a, subname = IOtools.reducePathLevel(GeneralFolder, level=1, isItFile=False) - - FolderEvaluation = IOtools.expandPath(FolderEvaluation) - - name = FolderEvaluation.name.split(".")[-1] # 0 (evaluation #) - - if name == "": - name = "0" - - cdf = FolderEvaluation / f"{subname}_ev{name}.CDF" - - return cdf diff --git a/src/mitim_modules/powertorch/physics_models/parameterizers.py b/src/mitim_modules/powertorch/physics_models/parameterizers.py new file mode 100644 index 00000000..56a4dfa6 --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/parameterizers.py @@ -0,0 +1,175 @@ +import copy +import torch +import numpy as np +from mitim_modules.powertorch.utils import CALCtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +# <> Function to interpolate a curve <> +from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function + +def piecewise_linear( + x_coord, + y_coord_raw, + x_coarse_tensor, + parameterize_in_aLx=True, + multiplier_quantity=1.0, + ): + """ + Notes: + - x_coarse_tensor must be torch + """ + + # ********************************************************************************************************** + # Define the integrator and derivator functions (based on whether I want to parameterize in aLx or in gradX) + # ********************************************************************************************************** + + if parameterize_in_aLx: + # 1/Lx = -1/X*dX/dr + integrator_function, derivator_function = ( + CALCtools.integration_Lx, + CALCtools.derivation_into_Lx, + ) + else: + # -dX/dr + integrator_function, derivator_function = ( + CALCtools.integration_dxdr, + CALCtools.derivation_into_dxdr, + ) + + y_coord = torch.from_numpy(y_coord_raw).to(x_coarse_tensor) * multiplier_quantity + + ygrad_coord = derivator_function( torch.from_numpy(x_coord).to(x_coarse_tensor), y_coord ) + + # ********************************************************************************************************** + # Get control points + # ********************************************************************************************************** + + x_coarse = x_coarse_tensor[1:].cpu().numpy() + + """ + Define region to get control points from + ------------------------------------------------------------ + Trick: Addition of extra point + This is important because if I don't, when I combine the trailing edge and the new + modified profile, there's going to be a discontinuity in the gradient. + """ + + ir_end = np.argmin(np.abs(x_coord - x_coarse[-1])) + + if ir_end < len(x_coord) - 1: + ir = ir_end + 2 # To prevent that TGYRO does a 2nd order derivative + x_coarse = np.append(x_coarse, [x_coord[ir]]) + else: + ir = ir_end + + # Definition of trailing edge. Any point after, and including, the extra point + x_trail = torch.from_numpy(x_coord[ir:]).to(x_coarse_tensor) + y_trail = y_coord[ir:] + x_notrail = torch.from_numpy(x_coord[: ir + 1]).to(x_coarse_tensor) + + # Produce control points, including a zero at the beginning + aLy_coarse = [[0.0, 0.0]] + for cont, i in enumerate(x_coarse): + yValue = ygrad_coord[np.argmin(np.abs(x_coord - i))] + aLy_coarse.append([i, yValue.cpu().item()]) + + aLy_coarse = torch.from_numpy(np.array(aLy_coarse)).to(ygrad_coord) + + # Since the last one is an extra point very close, I'm making it the same + aLy_coarse[-1, 1] = aLy_coarse[-2, 1] + + # Boundary condition at point moved by gridPointsAllowed + y_bc = torch.from_numpy(interpolation_function([x_coarse[-1]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) + + # Boundary condition at point (ACTUAL THAT I WANT to keep fixed, i.e. rho=0.8) + y_bc_real = torch.from_numpy(interpolation_function([x_coarse[-2]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) + + # ********************************************************************************************************** + # Define profile_constructor functions + # ********************************************************************************************************** + + def profile_constructor_coarse(x, y, multiplier=multiplier_quantity): + """ + Construct curve in a coarse grid + ---------------------------------------------------------------------------------------------------- + This constructs a curve in any grid, with any batch given in y=y. + Useful for surrogate evaluations. Fast in a coarse grid. For HF evaluations, + I need to do in a finer grid so that it is consistent with TGYRO. + x, y must be (batch, radii), y_bc must be (1) + """ + return x, integrator_function(x, y, y_bc_real) / multiplier + + def profile_constructor_middle(x, y, multiplier=multiplier_quantity): + """ + Deparamterizes a finer profile based on the values in the coarse. + Reason why something like this is not used for the full profile is because derivative of this will not be as original, + which is needed to match TGYRO + """ + yCPs = CALCtools.Interp1d_torch()(aLy_coarse[:, 0][:-1].repeat((y.shape[0], 1)), y, x) + return x, integrator_function(x, yCPs, y_bc_real) / multiplier + + def profile_constructor_fine(x, y, multiplier=multiplier_quantity): + """ + Notes: + - x is a 1D array, but y can be a 2D array for a batch of individuals: (batch,x) + - I am assuming it is 1/LT for parameterization, but gives T + """ + + y = torch.atleast_2d(y) + x = x[0, :] if x.dim() == 2 else x + + # Add the extra trick point + x = torch.cat((x, aLy_coarse[-1][0].repeat((1)))) + y = torch.cat((y, aLy_coarse[-1][-1].repeat((y.shape[0], 1))), dim=1) + + # Model curve (basically, what happens in between points) + yBS = CALCtools.Interp1d_torch()(x.repeat(y.shape[0], 1), y, x_notrail.repeat(y.shape[0], 1)) + + """ + --------------------------------------------------------------------------------------------------------- + Trick 1: smoothAroundCoarsing + TGYRO will use a 2nd order scheme to obtain gradients out of the profile, so a piecewise linear + will simply not give the right derivatives. + Here, this rough trick is to modify the points in gradient space around the coarse grid with the + same value of gradient, so in principle it doesn't matter the order of the derivative. + """ + num_around = 1 + for i in range(x.shape[0] - 2): + ir = torch.argmin(torch.abs(x[i + 1] - x_notrail)) + for k in range(-num_around, num_around + 1, 1): + yBS[:, ir + k] = yBS[:, ir] + # -------------------------------------------------------------------------------------------------------- + + yBS = integrator_function(x_notrail.repeat(yBS.shape[0], 1), yBS.clone(), y_bc) + + """ + Trick 2: Correct y_bc + The y_bc for the profile integration started at gridPointsAllowed, but that's not the real + y_bc. I want the temperature fixed at my first point that I actually care for. + Here, I multiply the profile to get that. + Multiplication works because: + 1/LT = 1/T * dT/dr + 1/LT' = 1/(T*m) * d(T*m)/dr = 1/T * dT/dr = 1/LT + Same logarithmic gradient, but with the right boundary condition + + """ + ir = torch.argmin(torch.abs(x_notrail - x[-2])) + yBS = yBS * torch.transpose((y_bc_real / yBS[:, ir]).repeat(yBS.shape[1], 1), 0, 1) + + # Add trailing edge + y_trailnew = copy.deepcopy(y_trail).repeat(yBS.shape[0], 1) + + x_notrail_t = torch.cat((x_notrail[:-1], x_trail), dim=0) + yBS = torch.cat((yBS[:, :-1], y_trailnew), dim=1) + + return x_notrail_t, yBS / multiplier + + # ********************************************************************************************************** + + return ( + aLy_coarse, + profile_constructor_fine, + profile_constructor_coarse, + profile_constructor_middle, + ) \ No newline at end of file diff --git a/src/mitim_modules/powertorch/physics/radiation_chebyshev.csv b/src/mitim_modules/powertorch/physics_models/radiation_chebyshev.csv similarity index 100% rename from src/mitim_modules/powertorch/physics/radiation_chebyshev.csv rename to src/mitim_modules/powertorch/physics_models/radiation_chebyshev.csv diff --git a/src/mitim_modules/powertorch/physics/TARGETStools.py b/src/mitim_modules/powertorch/physics_models/targets_analytic.py similarity index 51% rename from src/mitim_modules/powertorch/physics/TARGETStools.py rename to src/mitim_modules/powertorch/physics_models/targets_analytic.py index a13bd90c..45408678 100644 --- a/src/mitim_modules/powertorch/physics/TARGETStools.py +++ b/src/mitim_modules/powertorch/physics_models/targets_analytic.py @@ -1,201 +1,9 @@ import torch from mitim_tools.misc_tools import PLASMAtools +from mitim_modules.powertorch.utils import TARGETStools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -# ------------------------------------------------------------------ -# Main classes -# ------------------------------------------------------------------ - -class power_targets: - ''' - Default class for power target models, change "evaluate" method to implement a new model - ''' - - def evaluate(self): - print("No model implemented for power targets", typeMsg="w") - - def __init__(self,powerstate): - self.powerstate = powerstate - - # Make sub-targets equal to zero - variables_to_zero = ["qfuse", "qfusi", "qie", "qrad", "qrad_bremms", "qrad_line", "qrad_sync"] - for i in variables_to_zero: - self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 - - # ---------------------------------------------------- - # Fixed Targets (targets without a model) - # ---------------------------------------------------- - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 1: - self.Pe_orig, self.Pi_orig = ( - self.powerstate.plasma["Pe_orig_fusradexch"], - self.powerstate.plasma["Pi_orig_fusradexch"], - ) # Original integrated from input.gacode - elif self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 2: - self.Pe_orig, self.Pi_orig = ( - self.powerstate.plasma["Pe_orig_fusrad"], - self.powerstate.plasma["Pi_orig_fusrad"], - ) - elif self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: - self.Pe_orig, self.Pi_orig = self.powerstate.plasma["te"] * 0.0, self.powerstate.plasma["te"] * 0.0 - - # For the moment, I don't have a model for these, so I just grab the original from input.gacode - self.CextraE = self.powerstate.plasma["Gaux_e"] # 1E20/s/m^2 - self.CextraZ = self.powerstate.plasma["Gaux_Z"] # 1E20/s/m^2 - self.Mextra = self.powerstate.plasma["Maux"] # J/m^2 - - def fine_grid(self): - - """ - Make all quantities needed on the fine resolution - ------------------------------------------------- - In the powerstate creation, the plasma variables are stored in two different resolutions, one for the coarse grid and one for the fine grid, - if the option is activated. - - Here, at calculation stage I use some precalculated quantities in the fine grid and then integrate the gradients into that resolution - - Note that the set ['te','ti','ne','nZ','w0','ni'] will automatically be substituted during the update_var() that comes next, so - it's ok that I lose the torch leaf here. However, I must do this copy here because if any of those variables are not updated in - update_var() then it would fail. But first store them for later use. - """ - - self.plasma_original = {} - - # Bring to fine grid - variables_to_fine = ["B_unit", "B_ref", "volp", "rmin", "roa", "rho", "ni"] - for variable in variables_to_fine: - self.plasma_original[variable] = self.powerstate.plasma[variable].clone() - self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] - - # Bring also the gradients and kinetic variables - for variable in self.powerstate.profile_map.keys(): - - # Kinetic variables (te,ti,ne,nZ,w0,ni) - self.plasma_original[variable] = self.powerstate.plasma[variable].clone() - self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] - - # Bring also the gradients that are part of the torch trees, so that the derivative is not lost - self.plasma_original[f'aL{variable}'] = self.powerstate.plasma[f'aL{variable}'].clone() - - # ---------------------------------------------------- - # Integrate through fine de-parameterization - # ---------------------------------------------------- - for i in self.powerstate.ProfilesPredicted: - _ = self.powerstate.update_var(i,specific_deparametrizer=self.powerstate.deparametrizers_coarse_middle) - - def flux_integrate(self): - """ - ************************************************************************************************** - Calculate integral of all targets, and then sum aux. - Reason why I do it this convoluted way is to make it faster in mitim, not to run integrateQuadPoly all the time. - Run once for all the batch and also for electrons and ions - (in MW/m^2) - ************************************************************************************************** - """ - - qe = self.powerstate.plasma["te"]*0.0 - qi = self.powerstate.plasma["te"]*0.0 - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] >= 2: - qe += -self.powerstate.plasma["qie"] - qi += self.powerstate.plasma["qie"] - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: - qe += self.powerstate.plasma["qfuse"] - self.powerstate.plasma["qrad"] - qi += self.powerstate.plasma["qfusi"] - - q = torch.cat((qe, qi)).to(qe) - self.P = self.powerstate.volume_integrate(q, force_dim=q.shape[0]) - - def coarse_grid(self): - - # ************************************************************************************************** - # Come back to original grid for targets - # ************************************************************************************************** - - # Interpolate results from fine to coarse (i.e. whole point is that it is better than integrate interpolated values) - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] >= 2: - for i in ["qie"]: - self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] - - if self.powerstate.TargetOptions['ModelOptions']['TypeTarget'] == 3: - for i in [ - "qfuse", - "qfusi", - "qrad", - "qrad_bremms", - "qrad_line", - "qrad_sync", - ]: - self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] - - self.P = self.P[:, self.powerstate.positions_targets] - - # Recover variables calculated prior to the fine-targets method - for i in self.plasma_original: - self.powerstate.plasma[i] = self.plasma_original[i] - - def postprocessing(self, useConvectiveFluxes=False, forceZeroParticleFlux=False, assumedPercentError=1.0): - - # ************************************************************************************************** - # Plug-in Targets - # ************************************************************************************************** - - self.powerstate.plasma["Pe"] = ( - self.powerstate.plasma["Paux_e"] + self.P[: self.P.shape[0]//2, :] + self.Pe_orig - ) # MW/m^2 - self.powerstate.plasma["Pi"] = ( - self.powerstate.plasma["Paux_i"] + self.P[self.P.shape[0]//2 :, :] + self.Pi_orig - ) # MW/m^2 - self.powerstate.plasma["Ce_raw"] = self.CextraE - self.powerstate.plasma["CZ_raw"] = self.CextraZ - self.powerstate.plasma["Mt"] = self.Mextra - - # Merge convective fluxes - - if useConvectiveFluxes: - self.powerstate.plasma["Ce"] = PLASMAtools.convective_flux( - self.powerstate.plasma["te"], self.powerstate.plasma["Ce_raw"] - ) # MW/m^2 - self.powerstate.plasma["CZ"] = PLASMAtools.convective_flux( - self.powerstate.plasma["te"], self.powerstate.plasma["CZ_raw"] - ) # MW/m^2 - else: - self.powerstate.plasma["Ce"] = self.powerstate.plasma["Ce_raw"] - self.powerstate.plasma["CZ"] = self.powerstate.plasma["CZ_raw"] - - if forceZeroParticleFlux: - self.powerstate.plasma["Ce"] = self.powerstate.plasma["Ce"] * 0 - self.powerstate.plasma["Ce_raw"] = self.powerstate.plasma["Ce_raw"] * 0 - - # ************************************************************************************************** - # Error - # ************************************************************************************************** - - variables_to_error = ["Pe", "Pi", "Ce", "CZ", "Mt", "Ce_raw", "CZ_raw"] - - for i in variables_to_error: - self.powerstate.plasma[i + "_stds"] = abs(self.powerstate.plasma[i]) * assumedPercentError / 100 - - """ - ************************************************************************************************** - GB Normalized - ************************************************************************************************** - Note: This is useful for mitim surrogate variables of targets - """ - - gb_mapping = { - "Pe": "Qgb", - "Pi": "Qgb", - "Ce": "Qgb" if useConvectiveFluxes else "Ggb", - "CZ": "Qgb" if useConvectiveFluxes else "Ggb", - "Mt": "Pgb", - } - - for i in gb_mapping.keys(): - self.powerstate.plasma[f"{i}GB"] = self.powerstate.plasma[i] / self.powerstate.plasma[gb_mapping[i]] - # ---------------------------------------------------------------------------------------------------- # Full analytical models taken from TGYRO # ---------------------------------------------------------------------------------------------------- @@ -214,17 +22,19 @@ def postprocessing(self, useConvectiveFluxes=False, forceZeroParticleFlux=False, c4, c5, c6, c7 = 4.60643e-3, 1.3500e-2, -1.06750e-4, 1.36600e-5 bg, er = 34.3827, 1.124656e6 -class analytical_model(power_targets): +class analytical_model(TARGETStools.power_targets): def __init__(self,powerstate, **kwargs): super().__init__(powerstate, **kwargs) def evaluate(self): - if self.powerstate.TargetOptions["ModelOptions"]["TypeTarget"] >= 2: + if "qie" in self.powerstate.target_options["options"]["targets_evolve"]: self._evaluate_energy_exchange() - if self.powerstate.TargetOptions["ModelOptions"]["TypeTarget"] == 3: + if "qfus" in self.powerstate.target_options["options"]["targets_evolve"]: self._evaluate_alpha_heating() + + if "qrad" in self.powerstate.target_options["options"]["targets_evolve"]: self._evaluate_radiation() def _evaluate_energy_exchange(self): @@ -288,9 +98,7 @@ def _evaluate_alpha_heating(self): for i in range(self.powerstate.plasma["ni"].shape[2]): c_a += (self.powerstate.plasma["ni"][..., i] / self.powerstate.plasma["ne"]) * self.powerstate.plasma["ions_set_Zi"][:,i].unsqueeze(-1) ** 2 * (Aalpha / self.powerstate.plasma["ions_set_mi"][:,i].unsqueeze(-1)) - W_crit = (self.powerstate.plasma["te"] * 1e3) * (4 * (Ae / Aalpha) ** 0.5 / (3 * pi**0.5 * c_a)) ** ( - -2.0 / 3.0 - ) # in eV + W_crit = (self.powerstate.plasma["te"] * 1e3) * (4 * (Ae / Aalpha) ** 0.5 / (3 * pi**0.5 * c_a)) ** (-2.0 / 3.0) # in eV frac_ai = sivukhin(Ealpha / W_crit) # This solves Eq 17 of Stix diff --git a/src/mitim_modules/powertorch/physics_models/transport_analytic.py b/src/mitim_modules/powertorch/physics_models/transport_analytic.py new file mode 100644 index 00000000..cedce490 --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_analytic.py @@ -0,0 +1,90 @@ +import torch +from mitim_tools.misc_tools import PLASMAtools +from mitim_modules.powertorch.utils import TRANSPORTtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +# ------------------------------------------------------------------ +# SIMPLE Diffusion (#TODO: implement with particle flux and the raw) +# ------------------------------------------------------------------ + +class diffusion_model(TRANSPORTtools.power_transport): + def __init__(self, powerstate, **kwargs): + super().__init__(powerstate, **kwargs) + + # Ensure that the provided diffusivities include the zero location + self.chi_e = self.powerstate.transport_options["options"]["chi_e"] + self.chi_i = self.powerstate.transport_options["options"]["chi_i"] + + if self.chi_e.shape[0] < self.powerstate.plasma['rho'].shape[-1]: + self.chi_e = torch.cat((torch.zeros(1), self.chi_e)) + + if self.chi_i.shape[0] < self.powerstate.plasma['rho'].shape[-1]: + self.chi_i = torch.cat((torch.zeros(1), self.chi_i)) + + def produce_profiles(self): + pass + + def evaluate(self): + + # Make sure the chis are applied to all the points in the batch + Pe_tr = PLASMAtools.conduction( + self.powerstate.plasma["ne"], + self.powerstate.plasma["te"], + self.chi_e.repeat(self.powerstate.plasma['rho'].shape[0],1), + self.powerstate.plasma["aLte"], + self.powerstate.plasma["a"].unsqueeze(-1), + ) + Pi_tr = PLASMAtools.conduction( + self.powerstate.plasma["ni"].sum(axis=-1), + self.powerstate.plasma["ti"], + self.chi_i.repeat(self.powerstate.plasma['rho'].shape[0],1), + self.powerstate.plasma["aLti"], + self.powerstate.plasma["a"].unsqueeze(-1), + ) + + self.QeMWm2_tr_turb = Pe_tr * 2 / 3 + self.QiMWm2_tr_turb = Pi_tr * 2 / 3 + + self.QeMWm2_tr_neoc = Pe_tr * 1 / 3 + self.QiMWm2_tr_neoc = Pi_tr * 1 / 3 + + self.QeMWm2_tr = self.QeMWm2_tr_turb + self.QeMWm2_tr_neoc + self.QiMWm2_tr = self.QiMWm2_tr_turb + self.QiMWm2_tr_neoc + +# ------------------------------------------------------------------ +# SURROGATE +# ------------------------------------------------------------------ + +class surrogate(TRANSPORTtools.power_transport): + def __init__(self, powerstate, **kwargs): + super().__init__(powerstate, **kwargs) + + def produce_profiles(self): + pass + + def evaluate(self): + + """ + flux_fun as given in transport_evaluator_options must produce Q and Qtargets in order of te,ti,ne + """ + + X = torch.Tensor() + for prof in self.powerstate.predicted_channels: + X = torch.cat((X,self.powerstate.plasma['aL'+prof][:,1:]),axis=1) + + _, Q, _, _ = self.powerstate.transport_options["options"]["flux_fun"](X) + + numeach = self.powerstate.plasma["rho"].shape[1] - 1 + + quantities = { + "te": "QeMWm2", + "ti": "QiMWm2", + "ne": "Ce", + "nZ": "CZ", + "w0": "MtJm2", + } + + for c, i in enumerate(self.powerstate.predicted_channels): + self.powerstate.plasma[f"{quantities[i]}_tr"] = torch.cat((torch.tensor([[0.0]]),Q[:, numeach * c : numeach * (c + 1)]),dim=1) + diff --git a/src/mitim_modules/powertorch/physics_models/transport_cgyro.py b/src/mitim_modules/powertorch/physics_models/transport_cgyro.py new file mode 100644 index 00000000..a9065d1c --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_cgyro.py @@ -0,0 +1,254 @@ +import json +import numpy as np +from mitim_tools.gacode_tools import CGYROtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class gyrokinetic_model: + + def _evaluate_gyrokinetic_model(self, code = 'cgyro', gk_object = None): + # ------------------------------------------------------------------------------------------------------------------------ + # Grab options + # ------------------------------------------------------------------------------------------------------------------------ + + simulation_options = self.transport_evaluator_options[code] + cold_start = self.cold_start + + rho_locations = [self.powerstate.plasma["rho"][0, 1:][i].item() for i in range(len(self.powerstate.plasma["rho"][0, 1:]))] + run_type = simulation_options["run"]["run_type"] + + # ------------------------------------------------------------------------------------------------------------------------ + # Prepare object + # ------------------------------------------------------------------------------------------------------------------------ + + gk_object = gk_object(rhos=rho_locations) + + _ = gk_object.prep( + self.powerstate.profiles_transport, + self.folder, + ) + + subfolder_name = f"base_{code}" + + _ = gk_object.run( + subfolder_name, + cold_start=cold_start, + forceIfcold_start=True, + **simulation_options["run"] + ) + + if run_type in ['normal', 'submit']: + + if run_type in ['submit']: + gk_object.check(every_n_minutes=10) + gk_object.fetch() + + gk_object.read( + label=subfolder_name, + **simulation_options["read"] + ) + + # ------------------------------------------------------------------------------------------------------------------------ + # Pass the information to what power_transport expects + # ------------------------------------------------------------------------------------------------------------------------ + + self.QeGB_turb = np.array([gk_object.results[subfolder_name]['output'][i].Qe_mean for i in range(len(rho_locations))]) + self.QeGB_turb_stds = np.array([gk_object.results[subfolder_name]['output'][i].Qe_std for i in range(len(rho_locations))]) + + self.QiGB_turb = np.array([gk_object.results[subfolder_name]['output'][i].Qi_mean for i in range(len(rho_locations))]) + self.QiGB_turb_stds = np.array([gk_object.results[subfolder_name]['output'][i].Qi_std for i in range(len(rho_locations))]) + + self.GeGB_turb = np.array([gk_object.results[subfolder_name]['output'][i].Ge_mean for i in range(len(rho_locations))]) + self.GeGB_turb_stds = np.array([gk_object.results[subfolder_name]['output'][i].Ge_std for i in range(len(rho_locations))]) + + self.GZGB_turb = self.QeGB_turb*0.0 #TODO + self.GZGB_turb_stds = self.QeGB_turb*0.0 #TODO + + self.MtGB_turb = self.QeGB_turb*0.0 #TODO + self.MtGB_turb_stds = self.QeGB_turb*0.0 #TODO + + self.QieGB_turb = self.QeGB_turb*0.0 #TODO + self.QieGB_turb_stds = self.QeGB_turb*0.0 #TODO + + elif run_type == 'prep': + + # Prevent writing the json file from variables, as we will wait for the user to run CGYRO externally and provide the json themselves + self._write_json_from_variables_turb = False + + # Wait until the user has placed the json file in the right folder + + self.powerstate.profiles_transport.write_state(self.folder / subfolder_name / "input.gacode") + + pre_checks(self) + + file_path = self.folder / 'fluxes_turb.json' + + attempts = 0 + all_good = post_checks(self) if file_path.exists() else False + while (file_path.exists() is False) or (not all_good): + if attempts > 0: + print(f"\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", typeMsg='i') + print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", typeMsg='i') + print(f" MITIM could not find the file... looping back", typeMsg='i') + print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", typeMsg='i') + print(f"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", typeMsg='i') + logic_to_wait(self.folder, self.folder / subfolder_name) + attempts += 1 + + if file_path.exists(): + all_good = post_checks(self) + + if 'Qi_stable_criterion' in simulation_options: + self._stable_correction(simulation_options) + + def _stable_correction(self, simulation_options): + + Qi_stable_criterion = simulation_options["Qi_stable_criterion"] + Qi_stable_percent_error = simulation_options["Qi_stable_percent_error"] + + # Check if Qi in MW/m2 < Qi_stable_criterion + QiMWm2 = self.QiGB_turb * self.powerstate.plasma['Qgb'][0,1:].cpu().numpy() + QiGB_target = self.powerstate.plasma['QiGB'][0,1:].cpu().numpy() + + radii_stable = QiMWm2 < Qi_stable_criterion + + for i in range(len(radii_stable)): + if radii_stable[i]: + print(f"\t- Qi considered stable at radius #{i}, ({QiMWm2[i]:.2f} < {Qi_stable_criterion:.2f})", typeMsg='i') + Qi_std = QiGB_target[i] * Qi_stable_percent_error / 100 + print(f"\t\t- Assigning {Qi_stable_percent_error:.1f}% from target as standard deviation: {Qi_std:.2f} instead of {self.QiGB_turb_stds[i]}", typeMsg='i') + self.QiGB_turb_stds[i] = Qi_std + + +class cgyro_model(gyrokinetic_model): + + def evaluate_turbulence(self): + + if self.transport_evaluator_options["cgyro"].get("run_base_tglf", True): + # Run base TGLF, to keep track of discrepancies! --------------------------------------------- + simulation_options_tglf = self.transport_evaluator_options["tglf"] + simulation_options_tglf["use_scan_trick_for_stds"] = None + self._evaluate_tglf(pass_info = False) + # -------------------------------------------------------------------------------------------- + + self._evaluate_gyrokinetic_model(code = 'cgyro', gk_object = CGYROtools.CGYRO) + + +def pre_checks(self): + + plasma = self.powerstate.plasma + + txt = "\nFluxes to be matched by turbulence ( Target - Neoclassical ):" + + # Print gradients + for var, varn in zip( + ["r/a ", "rho ", "a/LTe", "a/LTi", "a/Lne", "a/LnZ", "a/Lw0"], + ["roa", "rho", "aLte", "aLti", "aLne", "aLnZ", "aLw0"], + ): + txt += f"\n{var} = " + for j in range(plasma["rho"].shape[1] - 1): + txt += f"{plasma[varn][0,j+1]:.6f} " + + # Print target fluxes + for var, varn in zip( + ["Qe (GB)", "Qi (GB)", "Ge (GB)", "GZ (GB)", "Mt (GB)"], + ["QeGB", "QiGB", "GeGB", "GZGB", "MtGB"], + ): + txt += f"\n{var} = " + for j in range(plasma["rho"].shape[1] - 1): + txt += f"{plasma[varn][0,j+1]-self.__dict__[f'{varn}_neoc'][j]:.4e} " + + print(txt) + +def logic_to_wait(folder, subfolder): + print(f"\n**** Simulation inputs prepared. Please, run it from the simulation setup in folder:\n", typeMsg='i') + print(f"\t {subfolder}\n", typeMsg='i') + print(f"**** When finished, the fluxes_turb.json file should be placed in:\n", typeMsg='i') + print(f"\t {folder}/fluxes_turb.json\n", typeMsg='i') + while not print(f"**** When you have done that, please say yes", typeMsg='q'): + pass + +def post_checks(self, rtol = 1e-3): + + with open(self.folder / 'fluxes_turb.json', 'r') as f: + json_dict = json.load(f) + + additional_info_from_json = json_dict.get('additional_info', {}) + + all_good = True + + if len(additional_info_from_json) == 0: + print(f"\t- No additional info found in fluxes_turb.json to be compared with", typeMsg='i') + + else: + print(f"\t- Additional info found in fluxes_turb.json:", typeMsg='i') + for k, v in additional_info_from_json.items(): + vP = self.powerstate.plasma[k].cpu().numpy()[0,1:] + print(f"\t {k} from JSON : {[round(i,4) for i in v]}", typeMsg='i') + print(f"\t {k} from POWERSTATE: {[round(i,4) for i in vP]}", typeMsg='i') + + if not np.allclose(v, vP, rtol=rtol): + all_good = print(f"{k} does not match with a relative tolerance of {rtol*100.0:.2f}%:", typeMsg='q') + + return all_good + +def write_json_CGYRO(roa, fluxes_mean, fluxes_stds, additional_info = None, file = 'fluxes_turb.json'): + ''' + ********************* + Helper to write JSON + ********************* + roa + Must be an array: [0.25, 0.35, ...] + fluxes_mean + Must be a dictionary with the fields and arrays: + 'QeMWm2': [0.1, 0.2, ...], + 'QiMWm2': ..., + 'Ge1E20m2': ..., + 'GZ1E20m2': ..., + 'MtJm2': ..., + 'QieMWm3': .. + or, alternatively (or complementary), in GB units: + 'QeGB': [0.1, 0.2, ...], + 'QiGB': ..., + 'GeGB': ..., + 'GZGB': ..., + 'MtGB': ..., + 'QieGB': .. + fluxes_stds + Exact same structure as fluxes_mean + additional_info + A dictionary with any additional information to include in the JSON and compare to powerstate, + for example (and recommended): + 'aLte': [0.2, 0.5, ...], + 'aLti': [0.3, 0.6, ...], + 'aLne': [0.3, 0.6, ...], + 'Qgb': [0.4, 0.7, ...], + 'rho': [0.2, 0.5, ...], + ''' + + if additional_info is None: + additional_info = {} + + with open(file, 'w') as f: + + additional_info_extended = additional_info | {'roa': roa.tolist() if not isinstance(roa, list) else roa} + + json_dict = { + 'fluxes_mean': fluxes_mean, + 'fluxes_stds': fluxes_stds, + 'additional_info': additional_info_extended + } + + def convert_numpy(obj): + if isinstance(obj, dict): + return {k: convert_numpy(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_numpy(v) for v in obj] + elif isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, (np.generic,)): + return obj.item() + else: + return obj + + json.dump(convert_numpy(json_dict), f, indent=4) diff --git a/src/mitim_modules/powertorch/physics_models/transport_gx.py b/src/mitim_modules/powertorch/physics_models/transport_gx.py new file mode 100644 index 00000000..fb9d6a80 --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_gx.py @@ -0,0 +1,8 @@ +from mitim_modules.powertorch.physics_models.transport_cgyro import gyrokinetic_model +from mitim_tools.simulation_tools.physics import GXtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class gx_model(gyrokinetic_model): + def evaluate_turbulence(self): + self._evaluate_gyrokinetic_model(code = 'gx', gk_object = GXtools.GX) diff --git a/src/mitim_modules/powertorch/physics_models/transport_neo.py b/src/mitim_modules/powertorch/physics_models/transport_neo.py new file mode 100644 index 00000000..9411f92b --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_neo.py @@ -0,0 +1,68 @@ +import numpy as np +from mitim_tools.gacode_tools import NEOtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class neo_model: + + def evaluate_neoclassical(self): + + # ------------------------------------------------------------------------------------------------------------------------ + # Grab options + # ------------------------------------------------------------------------------------------------------------------------ + + simulation_options = self.transport_evaluator_options["neo"] + cold_start = self.cold_start + + percent_error = simulation_options["percent_error"] + impurityPosition = self.powerstate.impurityPosition_transport + + # ------------------------------------------------------------------------------------------------------------------------ + # Run + # ------------------------------------------------------------------------------------------------------------------------ + + rho_locations = [self.powerstate.plasma["rho"][0, 1:][i].item() for i in range(len(self.powerstate.plasma["rho"][0, 1:]))] + + neo = NEOtools.NEO(rhos=rho_locations) + + _ = neo.prep( + self.powerstate.profiles_transport, + self.folder, + cold_start = cold_start, + ) + + neo.run( + 'base_neo', + cold_start=cold_start, + forceIfcold_start=True, + **simulation_options["run"] + ) + + neo.read( + label='base', + **simulation_options["read"]) + + Qe = np.array([neo.results['base']['output'][i].Qe for i in range(len(rho_locations))]) + Qi = np.array([neo.results['base']['output'][i].Qi for i in range(len(rho_locations))]) + Ge = np.array([neo.results['base']['output'][i].Ge for i in range(len(rho_locations))]) + GZ = np.array([neo.results['base']['output'][i].GiAll[impurityPosition-1] for i in range(len(rho_locations))]) + Mt = np.array([neo.results['base']['output'][i].Mt for i in range(len(rho_locations))]) + + # ------------------------------------------------------------------------------------------------------------------------ + # Pass the information to what power_transport expects + # ------------------------------------------------------------------------------------------------------------------------ + + self.QeGB_neoc = Qe + self.QiGB_neoc = Qi + self.GeGB_neoc = Ge + self.GZGB_neoc = GZ + self.MtGB_neoc = Mt + + # Uncertainties is just a percent of the value + self.QeGB_neoc_stds = abs(Qe) * percent_error/100.0 + self.QiGB_neoc_stds = abs(Qi) * percent_error/100.0 + self.GeGB_neoc_stds = abs(Ge) * percent_error/100.0 + self.GZGB_neoc_stds = abs(GZ) * percent_error/100.0 + self.MtGB_neoc_stds = abs(Mt) * percent_error/100.0 + + return neo diff --git a/src/mitim_modules/powertorch/physics_models/transport_tglf.py b/src/mitim_modules/powertorch/physics_models/transport_tglf.py new file mode 100644 index 00000000..cf0c14cf --- /dev/null +++ b/src/mitim_modules/powertorch/physics_models/transport_tglf.py @@ -0,0 +1,471 @@ +from pathlib import Path +import numpy as np +import pandas as pd +from mitim_tools.misc_tools import IOtools +from mitim_tools.gacode_tools import TGLFtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class tglf_model: + + def evaluate_turbulence(self): + self._evaluate_tglf() + + # Have it separate such that I can call it from the CGYRO class but without the decorator + def _evaluate_tglf(self, pass_info = True): + + # ------------------------------------------------------------------------------------------------------------------------ + # Grab options + # ------------------------------------------------------------------------------------------------------------------------ + + simulation_options = self.transport_evaluator_options["tglf"] + cold_start = self.cold_start + + Qi_includes_fast = simulation_options["Qi_includes_fast"] + use_tglf_scan_trick = simulation_options["use_scan_trick_for_stds"] + reuse_scan_ball_file = self.powerstate.transport_options['folder'] / 'Outputs' / 'tglf_ball.npz' if simulation_options.get("reuse_scan_ball", False) else None + cores_per_tglf_instance = simulation_options["cores_per_tglf_instance"] + keep_tglf_files = simulation_options["keep_files"] + percent_error = simulation_options["percent_error"] + + # Grab impurity from powerstate ( because it may have been modified in produce_profiles() ) + impurityPosition = self.powerstate.impurityPosition_transport + + # ------------------------------------------------------------------------------------------------------------------------ + # Prepare TGLF object + # ------------------------------------------------------------------------------------------------------------------------ + + rho_locations = [self.powerstate.plasma["rho"][0, 1:][i].item() for i in range(len(self.powerstate.plasma["rho"][0, 1:]))] + + tglf = TGLFtools.TGLF(rhos=rho_locations) + + _ = tglf.prep( + self.powerstate.profiles_transport, + self.folder, + cold_start = cold_start, + ) + + # ------------------------------------------------------------------------------------------------------------------------ + # Run TGLF (base) + # ------------------------------------------------------------------------------------------------------------------------ + + tglf.run( + 'base_tglf', + ApplyCorrections=False, + cold_start= cold_start, + forceIfcold_start=True, + extra_name= self.name, + slurm_setup={ + "cores": cores_per_tglf_instance, + "minutes": 2, + }, + attempts_execution=2, + only_minimal_files=keep_tglf_files in ['none'], + **simulation_options["run"] + ) + + tglf.read( + label='base', + require_all_files=False, + **simulation_options["read"]) + + # Grab values + Qe = np.array([tglf.results['base']['output'][i].Qe for i in range(len(rho_locations))]) + Qi = np.array([tglf.results['base']['output'][i].Qi for i in range(len(rho_locations))]) + Ge = np.array([tglf.results['base']['output'][i].Ge for i in range(len(rho_locations))]) + GZ = np.array([tglf.results['base']['output'][i].GiAll[impurityPosition] for i in range(len(rho_locations))]) + Mt = np.array([tglf.results['base']['output'][i].Mt for i in range(len(rho_locations))]) + S = np.array([tglf.results['base']['output'][i].Se for i in range(len(rho_locations))]) + + if Qi_includes_fast: + + Qifast = [tglf.results['base']['output'][i].Qifast for i in range(len(rho_locations))] + + if Qifast.sum() != 0.0: + print(f"\t- Qi includes fast ions, adding their contribution") + Qi += Qifast + + Flux_base = np.array([Qe, Qi, Ge, GZ, Mt, S]) + + # ------------------------------------------------------------------------------------------------------------------------ + # Evaluate TGLF uncertainty + # ------------------------------------------------------------------------------------------------------------------------ + + if use_tglf_scan_trick is None: + + # ******************************************************************* + # Just apply an ad-hoc percent error to the results + # ******************************************************************* + + Flux_mean = Flux_base + Flux_std = abs(Flux_mean)*percent_error/100.0 + + else: + + # ******************************************************************* + # Run TGLF with scans to estimate the uncertainty + # ******************************************************************* + + Flux_mean, Flux_std = _run_tglf_uncertainty_model( + tglf, + rho_locations, + self.powerstate.predicted_channels, + Flux_base = Flux_base, + impurityPosition=impurityPosition, + delta = use_tglf_scan_trick, + cold_start=cold_start, + extra_name=self.name, + cores_per_tglf_instance=cores_per_tglf_instance, + Qi_includes_fast=Qi_includes_fast, + only_minimal_files=keep_tglf_files in ['none', 'base'], + reuse_scan_ball_file=reuse_scan_ball_file, + **simulation_options["run"] + ) + + self._raise_warnings(tglf, rho_locations, Qi_includes_fast) + + # ------------------------------------------------------------------------------------------------------------------------ + # Pass the information to what power_transport expects + # ------------------------------------------------------------------------------------------------------------------------ + + if pass_info: + + self.QeGB_turb = Flux_mean[0] + self.QeGB_turb_stds = Flux_std[0] + + self.QiGB_turb = Flux_mean[1] + self.QiGB_turb_stds = Flux_std[1] + + self.GeGB_turb = Flux_mean[2] + self.GeGB_turb_stds = Flux_std[2] + + self.GZGB_turb = Flux_mean[3] + self.GZGB_turb_stds = Flux_std[3] + + self.MtGB_turb = Flux_mean[4] + self.MtGB_turb_stds = Flux_std[4] + + self.QieGB_turb = Flux_mean[5] + self.QieGB_turb_stds = Flux_std[5] + + return tglf + + def _raise_warnings(self, tglf, rho_locations, Qi_includes_fast): + + for i in range(len(tglf.profiles.Species)): + gacode_type = tglf.profiles.Species[i]['S'] + for rho in rho_locations: + try: + tglf_type = tglf.inputs_files[rho].ions_info[i+2]['type'] + except KeyError: + print(f"\t\t\t* Could not determine ion type from TGLF inputs because ion {i+2} was not there for {rho =}, skipping consistency check", typeMsg="w") + continue + + if gacode_type[:5] != tglf_type[:5]: + print(f"\t- For location {rho=:.2f}, ion specie #{i+1} ({tglf.profiles.Species[i]['N']}) is considered '{gacode_type}' by gacode but '{tglf_type}' by TGLF. Make sure this is consistent with your use case", typeMsg="w") + + if tglf_type == 'fast': + + if Qi_includes_fast: + print(f"\t\t\t* The fast ion considered by TGLF was summed into the Qi", typeMsg="i") + else: + print(f"\t\t\t* The fast ion considered by TGLF was NOT summed into the Qi", typeMsg="i") + + else: + print(f"\t\t\t* The thermal ion considered by TGLF was summed into the Qi", typeMsg="i") + +def _run_tglf_uncertainty_model( + tglf, + rho_locations, + predicted_channels, + Flux_base = None, + code_settings=None, + extraOptions=None, + impurityPosition=1, + delta=0.02, + minimum_abs_gradient=0.005, # This is 0.5% of aLx=1.0, to avoid extremely small scans when, for example, having aLn ~ 0.0 + cold_start=False, + extra_name="", + remove_folders_out = False, + cores_per_tglf_instance = 4, # e.g. 4 core per radius, since this is going to launch ~ Nr=5 x (Nv=6 x Nd=2 + 1) = 65 TGLFs at once + Qi_includes_fast=False, + only_minimal_files=True, # Since I only care about fluxes here, do not retrieve all the files + reuse_scan_ball_file=None, # If not None, it will reuse previous evaluations within the delta ball (to capture combinations) + ): + + print(f"\t- Running TGLF standalone scans ({delta = }) to determine relative errors") + + # Prepare scan + variables_to_scan = [] + for i in predicted_channels: + if i == 'te': variables_to_scan.append('RLTS_1') + if i == 'ti': variables_to_scan.append('RLTS_2') + if i == 'ne': variables_to_scan.append('RLNS_1') + if i == 'nZ': variables_to_scan.append(f'RLNS_{impurityPosition+2}') + if i == 'w0': variables_to_scan.append('VEXB_SHEAR') #TODO: is this correct? or VPAR_SHEAR? + + #TODO: Only if that parameter is changing at that location + if 'te' in predicted_channels or 'ti' in predicted_channels: + variables_to_scan.append('TAUS_2') + if 'te' in predicted_channels or 'ne' in predicted_channels: + variables_to_scan.append('XNUE') + if 'te' in predicted_channels or 'ne' in predicted_channels: + variables_to_scan.append('BETAE') + + relative_scan = [1-delta, 1+delta] + + # Enforce at least "minimum_abs_gradient" in gradient, to avoid zero gradient situations + minimum_delta_abs = {} + for ikey in variables_to_scan: + if 'RL' in ikey: + minimum_delta_abs[ikey] = minimum_abs_gradient + + name = 'turb_drives' + + tglf.rhos = rho_locations # To avoid the case in which TGYRO was run with an extra rho point + + # Estimate job minutes based on cases and cores (mostly IO I think at this moment, otherwise it should be independent on cases) + num_cases = len(rho_locations) * len(variables_to_scan) * len(relative_scan) + if cores_per_tglf_instance == 1: + minutes = 10 * (num_cases / 60) # Ad-hoc formula + else: + minutes = 1 * (num_cases / 60) # Ad-hoc formula + + # Enforce minimum minutes + minutes = max(2, minutes) + + tglf.runScanTurbulenceDrives( + subfolder = name, + variablesDrives = variables_to_scan, + varUpDown = relative_scan, + minimum_delta_abs = minimum_delta_abs, + code_settings = code_settings, + extraOptions = extraOptions, + ApplyCorrections = False, + add_baseline_to = 'none', + cold_start=cold_start, + forceIfcold_start=True, + slurm_setup={ + "cores": cores_per_tglf_instance, + "minutes": minutes, + }, + extra_name = f'{extra_name}_{name}', + positionIon=impurityPosition+2, + attempts_execution=2, + only_minimal_files=only_minimal_files, + ) + + # Remove folders because they are heavy to carry many throughout + if remove_folders_out: + IOtools.shutil_rmtree(tglf.FolderGACODE) + + Qe = np.zeros((len(rho_locations), len(variables_to_scan)*len(relative_scan) )) + Qi = np.zeros((len(rho_locations), len(variables_to_scan)*len(relative_scan) )) + Ge = np.zeros((len(rho_locations), len(variables_to_scan)*len(relative_scan) )) + GZ = np.zeros((len(rho_locations), len(variables_to_scan)*len(relative_scan) )) + Mt = np.zeros((len(rho_locations), len(variables_to_scan)*len(relative_scan) )) + S = np.zeros((len(rho_locations), len(variables_to_scan)*len(relative_scan) )) + + cont = 0 + for vari in variables_to_scan: + jump = tglf.scans[f'{name}_{vari}']['Qe'].shape[-1] + + # Outputs + Qe[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qe_gb'] + Qi[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Qi_gb'] + (0 if not Qi_includes_fast else tglf.scans[f'{name}_{vari}']['Qifast_gb']) + Ge[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Ge_gb'] + GZ[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Gi_gb'] + Mt[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['Mt_gb'] + S[:,cont:cont+jump] = tglf.scans[f'{name}_{vari}']['S_gb'] + + cont += jump + + if Qi_includes_fast: + print(f"\t- Qi includes fast ions, adding their contribution") + + # Add the base that was calculated earlier + if Flux_base is not None: + Qe = np.append(np.atleast_2d(Flux_base[0]).T, Qe, axis=1) + Qi = np.append(np.atleast_2d(Flux_base[1]).T, Qi, axis=1) + Ge = np.append(np.atleast_2d(Flux_base[2]).T, Ge, axis=1) + GZ = np.append(np.atleast_2d(Flux_base[3]).T, GZ, axis=1) + Mt = np.append(np.atleast_2d(Flux_base[4]).T, Mt, axis=1) + S = np.append(np.atleast_2d(Flux_base[5]).T, S, axis=1) + + if reuse_scan_ball_file is not None: + Qe, Qi, Ge, GZ, Mt, S = _ball_workflow(reuse_scan_ball_file, variables_to_scan, rho_locations, tglf, impurityPosition, Qi_includes_fast, Qe, Qi, Ge, GZ, Mt, S, delta_ball=delta) + + # Calculate the standard deviation of the scans, that's going to be the reported stds + + def calculate_mean_std(Q): + # Assumes Q is [radii, points], with [radii, 0] being the baseline + + Qm = np.nanmean(Q, axis=1) + Qstd = np.nanstd(Q, axis=1) + + # Qm = Q[:,0] + # Qstd = np.std(Q, axis=1) + + # Qstd = ( Q.max(axis=1)-Q.min(axis=1) )/2 /2 # Such that the range is 2*std + # Qm = Q.min(axis=1) + Qstd*2 # Mean is at the middle of the range + + return Qm, Qstd + + Qe_point, Qe_std = calculate_mean_std(Qe) + Qi_point, Qi_std = calculate_mean_std(Qi) + Ge_point, Ge_std = calculate_mean_std(Ge) + GZ_point, GZ_std = calculate_mean_std(GZ) + Mt_point, Mt_std = calculate_mean_std(Mt) + S_point, S_std = calculate_mean_std(S) + + #TODO: Careful with fast particles + Flux_mean = [Qe_point, Qi_point, Ge_point, GZ_point, Mt_point, S_point] + Flux_std = [Qe_std, Qi_std, Ge_std, GZ_std, Mt_std, S_std] + + return Flux_mean, Flux_std + + +def _ball_workflow(file, variables_to_scan, rho_locations, tglf, impurityPosition, Qi_includes_fast, Qe_orig, Qi_orig, Ge_orig, GZ_orig, Mt_orig, S_orig, delta_ball=0.02): + ''' + Workflow to reuse previous TGLF evaluations within a delta ball to capture combinations + around the current base case. + ''' + + # Grab all inputs and outputs of the current run + input_params_keys = variables_to_scan + input_params = np.zeros((len(rho_locations), len(input_params_keys), len(tglf.results))) + + input_params_base = np.zeros((len(rho_locations), len(input_params_keys))) + + output_params_keys = ['Qe', 'Qi', 'Ge', 'Gi', 'Mt', 'S'] + output_params = np.zeros((len(rho_locations), len(output_params_keys), len(tglf.results))) + + for i, key in enumerate(tglf.results.keys()): + for irho in range(len(rho_locations)): + + # Grab all inputs in array with shape (Nr, Ninputs, Ncases) + for ikey in range(len(input_params_keys)): + input_params[irho, ikey, i] = tglf.results[key]['parsed'][irho][input_params_keys[ikey]] + + # Grab base inputs in array with shape (Nr, Ninputs) + if key == 'base': + for ikey in range(len(input_params_keys)): + input_params_base[irho, ikey] = tglf.results[key]['parsed'][irho][input_params_keys[ikey]] + + # Grab all outputs in array with shape (Nr, Noutputs, Ncases) + output_params[irho, 0, i] = tglf.results[key]['output'][irho].Qe + output_params[irho, 1, i] = tglf.results[key]['output'][irho].Qi + (0 if not Qi_includes_fast else tglf.results[key]['output'][irho].Qifast) + output_params[irho, 2, i] = tglf.results[key]['output'][irho].Ge + output_params[irho, 3, i] = tglf.results[key]['output'][irho].GiAll[impurityPosition] + output_params[irho, 4, i] = tglf.results[key]['output'][irho].Mt + output_params[irho, 5, i] = tglf.results[key]['output'][irho].Se + + # -------------------------------------------------------------------------------------------------------- + # Read previous ball and append + # -------------------------------------------------------------------------------------------------------- + + if Path(file).exists(): + + print(f"\t- Reusing previous TGLF scan evaluations within the delta ball to capture combinations") + + # Grab ball contents + with np.load(file) as data: + rho_ball = data['rho'] + input_ball = data['input_params'] + output_ball = data['output_params'] + + precision_check = 1E-5 # I needed to add a small number to avoid numerical issues because TGLF input files have limited precision + + # Get the indeces of the points within the delta ball (condition in which all inputs are within the delta of the base case for that specific radius) + indices_to_grab = {} + for irho in range(len(rho_locations)): + indices_to_grab[irho] = [] + inputs_base = input_params_base[irho, :] + for icase in range(input_ball.shape[-1]): + inputs_case = input_ball[irho, :, icase] + + # Check if all inputs are within the delta ball (but not exactly equal, in case the ball has been run at the wrong time) + is_this_within_ball = True + for ikey in range(len(input_params_keys)): + val_current = inputs_base[ikey] + val_ball = inputs_case[ikey] + + # I need to have all inputs within the delta ball + is_this_within_ball = is_this_within_ball and ( abs(val_current-val_ball) <= abs(val_current*delta_ball) + precision_check ) + + if is_this_within_ball: + indices_to_grab[irho].append(icase) + + print(f"\t\t- Out of {input_ball.shape[-1]} points in file, found {len(indices_to_grab[irho])} at location {irho} within the delta ball ({delta_ball*100}%)", typeMsg="i" if len(indices_to_grab[irho]) > 0 else "") + + # Make an output_ball_select array equivalent to output_ball but only with the points within the delta ball (rest make them NaN) + output_ball_select = np.full_like(output_ball, np.nan) + for irho in range(len(rho_locations)): + for icase in indices_to_grab[irho]: + output_ball_select[irho, :, icase] = output_ball[irho, :, icase] + + # Append those points to the current run (these will always have shape (Nr, Ncases+original) but those cases that were not in the ball will be NaN) + # The reason to do it this way is that I want to keep it as a uniform shape to be able to calculate stds later, and I would risk otherwise having different shapes per radius + Qe = np.append(Qe_orig, output_ball_select[:, 0, :], axis=1) + Qi = np.append(Qi_orig, output_ball_select[:, 1, :], axis=1) + Ge = np.append(Ge_orig, output_ball_select[:, 2, :], axis=1) + GZ = np.append(GZ_orig, output_ball_select[:, 3, :], axis=1) + Mt = np.append(Mt_orig, output_ball_select[:, 4, :], axis=1) + S = np.append(S_orig, output_ball_select[:, 5, :], axis=1) + print(f"\t\t>>> Flux arrays have shape {Qe.shape} after appending ball points (NaNs are added to those locations and cases that did not fall within delta ball)") + + # Remove repeat points (for example when transitioning from simple relaxation initialization to full optimization) + def remove_duplicate_cases(*arrays): + """Remove duplicate cases (columns) from arrays of shape (rho_size, cases_size) + + Returns: + tuple: (unique_arrays, duplicate_arrays) where each is a tuple of arrays + """ + + # Stack all arrays to create a combined signature for each case + combined = np.vstack(arrays) # Shape: (total_channels, cases_size) + + # Find unique cases, handling NaN values properly (Use pandas for robust duplicate detection with NaN support) + df = pd.DataFrame(combined.T) # Transpose so each row is a case + unique_indices = df.drop_duplicates().index.values + nan_indices = np.where(np.all(np.isnan(combined), axis=0))[0] + unique_notnan_indeces = [idx for idx in unique_indices if idx not in nan_indices] + all_indices = np.arange(combined.shape[1]) + duplicate_indices = np.setdiff1d(all_indices, unique_notnan_indeces) + + print(f"\t\t* Removed {len(duplicate_indices)} duplicate / all-nan cases, keeping {len(unique_notnan_indeces)} unique cases", typeMsg="i") + + # Return arrays with unique cases and duplicate cases + unique_arrays = tuple(arr[:, unique_notnan_indeces] for arr in arrays) + duplicate_arrays = tuple(arr[:, duplicate_indices] for arr in arrays) + + return unique_arrays, duplicate_arrays + + unique_results, duplicate_results = remove_duplicate_cases(Qe, Qi, Ge, GZ, Mt, S) + + Qe, Qi, Ge, GZ, Mt, S = unique_results + + print(f"\t\t>>> Flux arrays have shape {Qe.shape} after finding unique points") + + else: + + rho_ball = np.array([]) + input_ball = np.array([]) + output_ball = np.array([]) + + Qe, Qi, Ge, GZ, Mt, S = Qe_orig, Qi_orig, Ge_orig, GZ_orig, Mt_orig, S_orig + + # -------------------------------------------------------------------------------------------------------- + # Save new ball + # -------------------------------------------------------------------------------------------------------- + + # Append to the values read from previous ball + if rho_ball.shape[0] != 0: + input_params = np.append(input_ball, input_params, axis=2) + output_params = np.append(output_ball, output_params, axis=2) + + # Save the new ball + np.savez(file, rho=rho_locations, input_params=input_params, output_params=output_params) + print(f"\t- Saved updated ball with {input_params.shape[-1]} points to {IOtools.clipstr(file)}", typeMsg="i") + + return Qe, Qi, Ge, GZ, Mt, S diff --git a/src/mitim_modules/powertorch/scripts/calculateTargets.py b/src/mitim_modules/powertorch/scripts/calculateTargets.py index 9b478dc0..d82cd4ce 100644 --- a/src/mitim_modules/powertorch/scripts/calculateTargets.py +++ b/src/mitim_modules/powertorch/scripts/calculateTargets.py @@ -1,5 +1,5 @@ """ -calculateTargets.py input.gacode 1 +calculateTargets.py input.gacode run1 """ import sys @@ -7,89 +7,47 @@ from mitim_tools.misc_tools import IOtools from mitim_tools.gacode_tools import PROFILEStools from mitim_modules.powertorch import STATEtools -from mitim_modules.powertorch.physics import TRANSPORTtools,TARGETStools +from mitim_modules.powertorch.physics_models import targets_analytic from IPython import embed def calculator( input_gacode, - typeCalculation=2, - TypeTarget=3, + targets_evolve=["qie", "qrad", "qfus"], folder="~/scratch/", cold_start=True, - rho_vec=np.linspace(0.1, 0.9, 9), + file_name = "input.gacode.new.powerstate", + rho_vec=np.linspace(0.01, 0.94, 50), profProvided=False, - fineTargetsResolution = None, + targets_resolution = None, ): - profiles = ( - input_gacode if profProvided else PROFILEStools.PROFILES_GACODE(input_gacode) - ) + profiles = input_gacode if profProvided else PROFILEStools.gacode_state(input_gacode) - # Calculate using TGYRO - if typeCalculation == 1: - p = STATEtools.powerstate( - profiles, - EvolutionOptions={ - "rhoPredicted": rho_vec, - 'fineTargetsResolution': fineTargetsResolution, - }, - TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, - "ModelOptions": { - "TypeTarget": TypeTarget, - "TargetCalc": "tgyro"}, - }, - TransportOptions={ - "transport_evaluator": TRANSPORTtools.tgyro_model, - "ModelOptions": { - "cold_start": cold_start, - "launchSlurm": True, - "MODELparameters": { - "Physics_options": { - "TypeTarget": 3, - "TurbulentExchange": 0, - "PtotType": 1, - "GradientsType": 0, - "InputType": 1, - }, - "ProfilesPredicted": ["te", "ti", "ne"], - "RhoLocations": rho_vec, - "applyCorrections": { - "Tfast_ratio": False, - "Ti_thermals": True, - "ni_thermals": True, - "recompute_ptot": False, - }, - "transport_model": {"turbulence": 'TGLF',"TGLFsettings": 5, "extraOptionsTGLF": {}}, - }, - "includeFastInQi": False, + p = STATEtools.powerstate( + profiles, + evolution_options={ + "rhoPredicted": rho_vec, + }, + target_options={ + "evaluator": targets_analytic.analytical_model, + "options": { + "targets_evolve": targets_evolve, + "target_evaluator_method": "powerstate", + "targets_resolution": targets_resolution }, - }, - ) - - # Calculate using powerstate - elif typeCalculation == 2: - p = STATEtools.powerstate( - profiles, - EvolutionOptions={ - "rhoPredicted": rho_vec, - 'fineTargetsResolution': fineTargetsResolution, - }, - TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, - "ModelOptions": { - "TypeTarget": TypeTarget, - "TargetCalc": "powerstate"}, - }, - TransportOptions={ - "transport_evaluator": None, - "ModelOptions": {} - }, - ) + }, + transport_options={ + "evaluator": None, + "options": {} + }, + ) # Determine performance nameRun="test" folder=IOtools.expandPath(folder) + if not folder.exists(): + folder.mkdir(parents=True) + # ************************************ # Calculate state # ************************************ @@ -101,44 +59,39 @@ def calculator( # ************************************ p.plasma["Pfus"] = ( - p.volume_integrate( + p.from_density_to_flux( (p.plasma["qfuse"] + p.plasma["qfusi"]) * 5.0 ) * p.plasma["volp"] )[..., -1] p.plasma["Prad"] = ( - p.volume_integrate(p.plasma["qrad"]) * p.plasma["volp"] + p.from_density_to_flux(p.plasma["qrad"]) * p.plasma["volp"] )[..., -1] - p.profiles.deriveQuantities() + p.profiles.derive_quantities() - p.to_gacode( - write_input_gacode=folder / "input.gacode.new.powerstate", + p.from_powerstate( + write_input_gacode=folder / file_name, position_in_powerstate_batch=0, postprocess_input_gacode={ "Tfast_ratio": False, "Ti_thermals": False, "ni_thermals": False, - "recompute_ptot": False, - "ensureMachNumber": None, + "recalculate_ptot": False, + "force_mach": None, }, insert_highres_powers=True, rederive_profiles=False, ) - p.plasma["Pin"] = ( - (p.plasma["Paux_e"] + p.plasma["Paux_i"]) * p.plasma["volp"] - )[..., -1] - p.plasma["Q"] = p.plasma["Pfus"] / p.plasma["Pin"] + p.plasma["Q"] = p.profiles.derived["Q"] + p.plasma['Prad'] = p.profiles.derived['Prad'] # ************************************ # Print Info # ************************************ - print( - f"Q = {p.plasma['Q'].item():.2f} (Pfus = {p.plasma['Pfus'].item():.2f}MW, Pin = {p.plasma['Pin'].item():.2f}MW)" - ) - + print(f"Q = {p.plasma['Q'].item():.2f}") print(f"Prad = {p.plasma['Prad'].item():.2f}MW") return p @@ -146,6 +99,6 @@ def calculator( if __name__ == "__main__": input_gacode = IOtools.expandPath(sys.argv[1]) - typeCalculation = int(sys.argv[2]) + folder = IOtools.expandPath(sys.argv[2]) - calculator(input_gacode, typeCalculation=typeCalculation) + calculator(input_gacode, folder=folder) diff --git a/src/mitim_modules/powertorch/scripts/compareRadialResolution.py b/src/mitim_modules/powertorch/scripts/compareRadialResolution.py index db600e89..bdf140d9 100644 --- a/src/mitim_modules/powertorch/scripts/compareRadialResolution.py +++ b/src/mitim_modules/powertorch/scripts/compareRadialResolution.py @@ -28,21 +28,21 @@ inputgacode = IOtools.expandPath(args.file) rho = np.array([float(i) for i in args.rhos]) -profiles = PROFILEStools.PROFILES_GACODE(inputgacode) +profiles = PROFILEStools.gacode_state(inputgacode) markersize_coarse = 6 markersize_fine = 3 ls = "o-" -sC = STATEtools.powerstate(profiles,EvolutionOptions={"rhoPredicted": rho},) +sC = STATEtools.powerstate(profiles,evolution_options={"rhoPredicted": rho},) sC.calculateProfileFunctions() sC.calculateTargets() # Full state rho = np.linspace(rho[0], rho[-1], args.res) -sF = STATEtools.powerstate(profiles,EvolutionOptions={"rhoPredicted": rho}) +sF = STATEtools.powerstate(profiles,evolution_options={"rhoPredicted": rho}) sF.calculateProfileFunctions() sF.calculateTargets() @@ -151,7 +151,7 @@ ax.legend() ax = axs[0, 1] -varsS = ["Pe", "Pi"] +varsS = ["QeMWm2", "QiMWm2"] s, lab = sF, "Fine " for var in varsS: diff --git a/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py b/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py index 41155bab..011cc2d2 100644 --- a/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py +++ b/src/mitim_modules/powertorch/scripts/compareWithTGYRO.py @@ -22,15 +22,15 @@ # TGYRO t = TGYROtools.TGYROoutput(folderTGYRO) -t.profiles.deriveQuantities() +t.profiles.derive_quantities() t.useFineGridTargets() # STATE -s = STATEtools.powerstate(t.profiles, EvolutionOptions={"rhoPredicted": t.rho[0,1:]}) +s = STATEtools.powerstate(t.profiles, evolution_options={"rhoPredicted": t.rho[0,1:]}) s.calculateProfileFunctions() -# s.TargetOptions['ModelOptions']['TypeTarget'] = 1 +# s.target_options['options']['targets_evolve'] = 1 s.calculateTargets() # @@ -99,7 +99,7 @@ label="TGYRO " + label, markersize=markersize, ) - P = s.volume_integrate(stateQuantity, dim=2) * s.plasma["volp"] + P = s.from_density_to_flux(stateQuantity, dim=2) * s.plasma["volp"] ax.plot( s.plasma["rho"][0], P[0], @@ -132,7 +132,7 @@ label="TGYRO " + label, markersize=markersize, ) - P = s.volume_integrate(stateQuantity, dim=2) * s.plasma["volp"] + P = s.from_density_to_flux(stateQuantity, dim=2) * s.plasma["volp"] ax.plot( s.plasma["rho"][0], P[0], @@ -154,13 +154,13 @@ ax = axs[0, 2] ax.plot(t.rho[0], t.Qe_tar[0], "s-", lw=0.5, label="TGYRO Pe", markersize=markersize) -P = s.plasma["Pe"] +P = s.plasma["QeMWm2"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pe", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qe_tarMW[0] - P[0].cpu().numpy()) / t.Qe_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") ax.plot(t.rho[0], t.Qi_tar[0], "s-", lw=0.5, label="TGYRO Pi", markersize=markersize) -P = s.plasma["Pi"] +P = s.plasma["QiMWm2"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pi", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qi_tarMW[0] - P[0].cpu().numpy()) / t.Qi_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") @@ -178,13 +178,13 @@ ax = axs[1, 2] ax.plot(t.rho[0], t.Qe_tarMW[0], "s-", lw=0.5, label="TGYRO Pe", markersize=markersize) -P = s.plasma["Pe"] * s.plasma["volp"] +P = s.plasma["QeMWm2"] * s.plasma["volp"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pe", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qe_tarMW[0] - P[0].cpu().numpy()) / t.Qe_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") ax.plot(t.rho[0], t.Qi_tarMW[0], "s-", lw=0.5, label="TGYRO Pi", markersize=markersize) -P = s.plasma["Pi"] * s.plasma["volp"] +P = s.plasma["QiMWm2"] * s.plasma["volp"] ax.plot(s.plasma["rho"][0], P[0], ls, lw=0.5, label="STATE Pi", markersize=markersize) MaxError = np.nanmax(np.abs(t.Qi_tarMW[0] - P[0].cpu().numpy()) / t.Qi_tarMW[0] * 100.0) print(f"{MaxError = :.3f} %") diff --git a/src/mitim_modules/powertorch/physics/CALCtools.py b/src/mitim_modules/powertorch/utils/CALCtools.py similarity index 80% rename from src/mitim_modules/powertorch/physics/CALCtools.py rename to src/mitim_modules/powertorch/utils/CALCtools.py index 93b0b58c..6dc0d8ea 100644 --- a/src/mitim_modules/powertorch/physics/CALCtools.py +++ b/src/mitim_modules/powertorch/utils/CALCtools.py @@ -1,20 +1,27 @@ import torch import numpy as np +import contextlib from mitim_tools.misc_tools import MATHtools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed # ******************************************************************************************************************** -# Normalized logaritmic gradient calculations +# Gradient calculations # ******************************************************************************************************************** -def integrateGradient(x, z, z0_bound): +def integration_Lx(x, z, f_bound): """ - inputs as - (batch,dim) - From tgyro_profile_functions.f90 - x is r - z is 1/LT = =-1/T*dT/dr + Integrates the gradient scale length into the profile + (adapted from tgyro_profile_functions.f90) + Inputs as + (batch,dim) + + x is r + z is 1/LT = =-1/T*dT/dr + f_bound is T @ at boundary condition (last point of the given profile) + + Notes: + - If x is r/a, then z is a/LT """ @@ -23,31 +30,25 @@ def integrateGradient(x, z, z0_bound): f1 = b / torch.cumprod(b, 1) * torch.prod(b, 1, keepdims=True) # Add the extra point of bounday condition - f = torch.cat((f1, torch.ones(z.shape[0], 1).to(f1)), dim=1) * z0_bound + f = torch.cat((f1, torch.ones(z.shape[0], 1).to(f1)), dim=1) * f_bound return f - -def produceGradient(r, p): +def derivation_into_Lx(r, p): """ Produces -1/p * dp/dr - or if r is roa: a/Lp + (adapted from expro_util.f90, bound_deriv) + + Notes: + - if r is r/a: a/Lp """ - # This is the same as it happens in expro_util.f90, bound_deriv z = MATHtools.deriv(r, -torch.log(p), array=False) - # # COMMENTED because this should happen at the coarse grid - # z = tgyro_math_zfind(r,p,z=z) - - return z # .nan_to_num(0.0) # Added this so that, when evaluating things like rotation shear, it doesn't blow - -# ******************************************************************************************************************** -# Linear gradient calculations -# ******************************************************************************************************************** + return z -def integrateGradient_lin(x, z, z0_bound): +def integration_dxdr(x, z, z0_bound): """ (batch,dim) From tgyro_profile_functions.f90 @@ -66,7 +67,7 @@ def integrateGradient_lin(x, z, z0_bound): return f -def produceGradient_lin(r, p): +def derivation_into_dxdr(r, p): """ Produces -dp/dr """ @@ -76,72 +77,49 @@ def produceGradient_lin(r, p): return z +# ******************************************************************************************************************** +# Volume calculations +# ******************************************************************************************************************** +def _to_2d(x, xp): + """Ensure shape (batch, N) for either NumPy or Torch.""" + if xp is np: + return np.atleast_2d(x) + else: # torch + return x.unsqueeze(0) if x.ndim == 1 else x -def integrateQuadPoly(r, s, p=None): +def volume_integration(p, r, volp): """ - (batch,dim) - - Computes int(s*dr), so if s is s*dV/dr, then int(s*dV), which is the full integral - - From tgyro_volume_int.f90 - r - minor raidus - s - s*volp - - (Modified to avoid if statements and for loops) - + Compute the volume integral ∫ p · dV with dV/dr = volp. + + Parameters + ---------- + p, r, volp : 1-D or 2-D NumPy arrays (shape: (N,) or (M, N)) + • If they are 1-D, each represents a single radial profile. + • If they are 2-D -> (batch,dim_radius) + + Returns + ------- + out : ndarray + • 1-D array if the inputs were 1-D + • 2-D array (same leading dimension as the inputs) otherwise """ - if p is None: - p = torch.zeros((r.shape[0], r.shape[1])).to(r) - - # First point - - x1, x2, x3 = r[..., 0], r[..., 1], r[..., 2] - f1, f2, f3 = s[..., 0], s[..., 1], s[..., 2] - - p[..., 1] = (x2 - x1) * ( - (3 * x3 - x2 - 2 * x1) * f1 / 6 / (x3 - x1) - + (3 * x3 - 2 * x2 - x1) * f2 / 6 / (x3 - x2) - - (x2 - x1) ** 2 * f3 / 6 / (x3 - x1) / (x3 - x2) - ) - - # Next points - x1, x2, x3 = r[..., :-2], r[..., 1:-1], r[..., 2:] - f1, f2, f3 = s[..., :-2], s[..., 1:-1], s[..., 2:] - - p[..., 2:] = ( - (x3 - x2) - / (x3 - x1) - / 6 - * ( - (2 * x3 + x2 - 3 * x1) * f3 - + (x3 + 2 * x2 - 3 * x1) * f2 * (x3 - x1) / (x2 - x1) - - (x3 - x2) ** 2 * f1 / (x2 - x1) - ) - ) - - try: - p = torch.cumsum(p, 1) - except: - p = np.cumsum(p, 1) + # Decide backend from *p* only + xp = torch if isinstance(p, torch.Tensor) else np - return p + # Remember whether the caller passed 1-D profiles + one_dim = (p.ndim == 1) and (r.ndim == 1) and (volp.ndim == 1) + # Promote to 2-D for vectorised processing + r_2d = _to_2d(r, xp) + pdVdr_2d = _to_2d(p * volp, xp) -def integrateFS(P, r, volp): - """ - Based on the idea that volp = dV/dr, whatever r is - - Ptot = int_V P*dV = int_r P*V'*dr - - """ - - I = integrateQuadPoly( - np.atleast_2d(r), np.atleast_2d(P * volp), p=np.zeros((1, P.shape[0])) - )[0, :] + # Integrate row-wise (using your original routine) + result_2d = MATHtools.integrateQuadPoly(r_2d, pdVdr_2d) - return I + # Collapse back to original rank if necessary + return result_2d[0] if one_dim else result_2d """ @@ -150,11 +128,7 @@ def integrateFS(P, r, volp): ---------------------------------------------------------------------------------------------------------------- """ -import torch -import contextlib - - -class Interp1d(torch.autograd.Function): +class Interp1d_torch(torch.autograd.Function): def __call__(self, x, y, xnew, out=None): return self.forward(x, y, xnew, out) diff --git a/src/mitim_modules/powertorch/utils/POWERplot.py b/src/mitim_modules/powertorch/utils/POWERplot.py index 51bbe449..009dc9a3 100644 --- a/src/mitim_modules/powertorch/utils/POWERplot.py +++ b/src/mitim_modules/powertorch/utils/POWERplot.py @@ -1,7 +1,8 @@ -from mitim_tools.gacode_tools import PROFILEStools +from mitim_tools.plasmastate_tools.utils import state_plotting from mitim_tools.misc_tools import GRAPHICStools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed +from mitim_tools.plasmastate_tools.utils import state_plotting def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, compare_to_state=None, c_orig = "b"): @@ -12,10 +13,10 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co if figs is not None: # Insert profiles with the latest powerstate - profiles_new = self.to_gacode(insert_highres_powers=True) + profiles_new = self.from_powerstate(insert_highres_powers=True) # Plot the inserted profiles together with the original ones - _ = PROFILEStools.plotAll([self.profiles, profiles_new], figs=figs) + _ = state_plotting.plotAll([self.profiles, profiles_new], figs=figs) # ----------------------------------------------------------------------------------------------------------- # ---- Plot plasma state @@ -23,59 +24,47 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co set_plots = [ ] - if "te" in self.ProfilesPredicted: + if "te" in self.predicted_channels: set_plots.append( - [ 'te', 'aLte', 'Pe_tr', 'Pe', + [ 'te', 'aLte', 'QeMWm2_tr', 'QeMWm2', 'Electron Temperature','$T_e$ (keV)','$a/LT_e$','$Q_e$ (GB)','$Q_e$ ($MW/m^2$)', 1.0,"Qgb"]) - if "ti" in self.ProfilesPredicted: + if "ti" in self.predicted_channels: set_plots.append( - [ 'ti', 'aLti', 'Pi_tr', 'Pi', + [ 'ti', 'aLti', 'QiMWm2_tr', 'QiMWm2', 'Ion Temperature','$T_i$ (keV)','$a/LT_i$','$Q_i$ (GB)','$Q_i$ ($MW/m^2$)', 1.0,"Qgb"]) - if "ne" in self.ProfilesPredicted: + if "ne" in self.predicted_channels: # If this model provides the raw particle flux, go for it - if 'Ce_raw_tr' in self.plasma: + if 'Ge1E20m2_tr' in self.plasma: set_plots.append( - [ 'ne', 'aLne', 'Ce_raw_tr', 'Ce_raw', + [ 'ne', 'aLne', 'Ge1E20m2_tr', 'Ge1E20m2', 'Electron Density','$n_e$ ($10^{20}m^{-3}$)','$a/Ln_e$','$\\Gamma_e$ (GB)','$\\Gamma_e$ ($10^{20}m^{-3}/s$)', 1E-1,"Ggb"]) else: - if self.useConvectiveFluxes: - set_plots.append( - [ 'ne', 'aLne', 'Ce_tr', 'Ce', - 'Electron Density','$n_e$ ($10^{20}m^{-3}$)','$a/Ln_e$','$Q_{conv,e}$ (GB)','$Q_{conv,e}$ ($MW/m^2$)', - 1E-1,"Qgb"]) - else: - set_plots.append( - [ 'ne', 'aLne', 'Ce_tr', 'Ce', - 'Electron Density','$n_e$ ($10^{20}m^{-3}$)','$a/Ln_e$','$\\Gamma_e$ (GB)','$\\Gamma_e$ ($10^{20}m^{-3}/s$)', - 1E-1,"Ggb"]) - - if "nZ" in self.ProfilesPredicted: + set_plots.append( + [ 'ne', 'aLne', 'Ce_tr', 'Ce', + 'Electron Density','$n_e$ ($10^{20}m^{-3}$)','$a/Ln_e$','$Q_{conv,e}$ (GB)','$Q_{conv,e}$ ($MW/m^2$)', + 1E-1,"Qgb"]) + + if "nZ" in self.predicted_channels: # If this model provides the raw particle flux, go for it - if 'CZ_raw_tr' in self.plasma: + if 'GZ1E20m2_tr' in self.plasma: set_plots.append( - [ 'nZ', 'aLnZ', 'CZ_raw_tr', 'CZ_raw', + [ 'nZ', 'aLnZ', 'GZ1E20m2_tr', 'GZ1E20m2', 'Impurity Density','$n_Z$ ($10^{20}m^{-3}$)','$a/Ln_Z$','$\\Gamma_Z$ (GB)','$\\Gamma_Z$ ($10^{20}m^{-3}/s$)', 1E-1,"Ggb"]) else: - if self.useConvectiveFluxes: - set_plots.append( - [ 'nZ', 'aLnZ', 'CZ_tr', 'CZ', - 'Impurity Density','$n_Z$ ($10^{20}m^{-3}$)','$a/Ln_Z$','$\\widehat{Q}_{conv,Z}$ (GB)','$\\widehat{Q}_{conv,Z}$ ($MW/m^2$)', - 1E-1,"Qgb"]) - else: - set_plots.append( - [ 'nZ', 'aLnZ', 'CZ_tr', 'CZ', - 'Impurity Density','$n_Z$ ($10^{20}m^{-3}$)','$a/Ln_Z$','$\\Gamma_Z$ (GB)','$\\Gamma_Z$ ($10^{20}m^{-3}/s$)', - 1E-1,"Ggb"]) - - if "w0" in self.ProfilesPredicted: + set_plots.append( + [ 'nZ', 'aLnZ', 'CZ_tr', 'CZ', + 'Impurity Density','$n_Z$ ($10^{20}m^{-3}$)','$a/Ln_Z$','$\\widehat{Q}_{conv,Z}$ (GB)','$\\widehat{Q}_{conv,Z}$ ($MW/m^2$)', + 1E-1,"Qgb"]) + + if "w0" in self.predicted_channels: set_plots.append( - [ 'w0', 'aLw0', 'Mt_tr', 'Mt', + [ 'w0', 'aLw0', 'MtJm2_tr', 'MtJm2', 'Rotation','$\\omega_0$ ($krad/s$)','$-d\\omega_0/dr$ ($krad/s/cm$)','$\\Pi$ (GB)','$\\Pi$ ($J/m^2$)', 1E-3,"Pgb"]) @@ -115,7 +104,7 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co colors = GRAPHICStools.listColors() cont = 0 - for i in range(len(self.ProfilesPredicted)): + for i in range(len(self.predicted_channels)): # Plot gradient evolution ax = axsRes[1+cont] @@ -123,14 +112,14 @@ def plot(self, axs, axsRes, figs=None, c="r", label="powerstate",batch_num=0, co position_in_batch = i * ( self.plasma['rho'].shape[-1] -1 ) + j - ax.plot(self.FluxMatch_Xopt[:,position_in_batch], "-o", color=colors[j], lw=1.0, label = f"r/a = {self.plasma['roa'][batch_num,j]:.2f}",markersize=0.5) + ax.plot(self.FluxMatch_Xopt[:,position_in_batch], "-o", color=colors[j], lw=1.0, label = f"r/a = {self.plasma['roa'][batch_num,j+1]:.2f}",markersize=0.5) if self.bounds_current is not None: for u in [0,1]: ax.axhline(y=self.bounds_current[u,position_in_batch], color=colors[j], linestyle='-.', lw=0.2) ax.set_ylabel(self.labelsFM[i][0]) - if i == len(self.ProfilesPredicted)-1: + if i == len(self.predicted_channels)-1: GRAPHICStools.addLegendApart(ax, ratio=1.0,extraPad=0.05, size=9) # Plot residual evolution @@ -240,7 +229,7 @@ def plot_metrics_powerstates(axsM, powerstates, profiles=None, profiles_color='b x , y = [], [] for h in range(len(powerstates)): x.append(h) - Pfus = powerstates[h].volume_integrate( + Pfus = powerstates[h].from_density_to_flux( (powerstates[h].plasma["qfuse"] + powerstates[h].plasma["qfusi"]) * 5.0 ) * powerstates[h].plasma["volp"] y.append(Pfus[..., -1].item()) diff --git a/src/mitim_modules/powertorch/utils/TARGETStools.py b/src/mitim_modules/powertorch/utils/TARGETStools.py new file mode 100644 index 00000000..8d426864 --- /dev/null +++ b/src/mitim_modules/powertorch/utils/TARGETStools.py @@ -0,0 +1,161 @@ +import torch +from mitim_tools.misc_tools import PLASMAtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class power_targets: + ''' + Default class for power target models, change "evaluate" method to implement a new model + ''' + + def evaluate(self): + print("No model implemented for power targets", typeMsg="w") + + def __init__(self,powerstate): + self.powerstate = powerstate + + # Make sub-targets equal to zero + variables_to_zero = ["qfuse", "qfusi", "qie", "qrad", "qrad_bremms", "qrad_line", "qrad_sync"] + for i in variables_to_zero: + self.powerstate.plasma[i] = self.powerstate.plasma["te"] * 0.0 + + def fine_grid(self): + + """ + Make all quantities needed on the fine resolution + ------------------------------------------------- + In the powerstate creation, the plasma variables are stored in two different resolutions, one for the coarse grid and one for the fine grid, + if the option is activated. + + Here, at calculation stage I use some precalculated quantities in the fine grid and then integrate the gradients into that resolution + + Note that the set ['te','ti','ne','nZ','w0','ni'] will automatically be substituted during the update_var() that comes next, so + it's ok that I lose the torch leaf here. However, I must do this copy here because if any of those variables are not updated in + update_var() then it would fail. But first store them for later use. + """ + + self.plasma_original = {} + + # Bring to fine grid + variables_to_fine = ["B_unit", "B_ref", "volp", "rmin", "roa", "rho", "ni"] + for variable in variables_to_fine: + self.plasma_original[variable] = self.powerstate.plasma[variable].clone() + self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] + + # Bring also the gradients and kinetic variables + for variable in self.powerstate.profile_map.keys(): + + # Kinetic variables (te,ti,ne,nZ,w0,ni) + self.plasma_original[variable] = self.powerstate.plasma[variable].clone() + self.powerstate.plasma[variable] = self.powerstate.plasma_fine[variable] + + # Bring also the gradients that are part of the torch trees, so that the derivative is not lost + self.plasma_original[f'aL{variable}'] = self.powerstate.plasma[f'aL{variable}'].clone() + + # ---------------------------------------------------- + # Integrate through fine profile constructors + # ---------------------------------------------------- + for i in self.powerstate.predicted_channels: + _ = self.powerstate.update_var(i,specific_profile_constructor=self.powerstate.profile_constructors_coarse_middle) + + def flux_integrate(self): + """ + ************************************************************************************************** + Calculate integral of all targets, and then sum aux. + Reason why I do it this convoluted way is to make it faster in mitim, not to run the volume integral all the time. + Run once for all the batch and also for electrons and ions + (in MW/m^2) + ************************************************************************************************** + """ + + qe = self.powerstate.plasma["te"]*0.0 + qi = self.powerstate.plasma["te"]*0.0 + + if "qie" in self.powerstate.target_options['options']['targets_evolve']: + qe += -self.powerstate.plasma["qie"] + qi += self.powerstate.plasma["qie"] + + if "qfus" in self.powerstate.target_options['options']['targets_evolve']: + qe += self.powerstate.plasma["qfuse"] + qi += self.powerstate.plasma["qfusi"] + + if "qrad" in self.powerstate.target_options['options']['targets_evolve']: + qe -= self.powerstate.plasma["qrad"] + + q = torch.cat((qe, qi)).to(qe) + self.P = self.powerstate.from_density_to_flux(q, force_dim=q.shape[0]) + + def coarse_grid(self): + + # ************************************************************************************************** + # Come back to original grid for targets + # ************************************************************************************************** + + # Interpolate results from fine to coarse (i.e. whole point is that it is better than integrate interpolated values) + if "qie" in self.powerstate.target_options['options']['targets_evolve']: + for i in ["qie"]: + self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] + + if "qfus" in self.powerstate.target_options['options']['targets_evolve']: + for i in [ + "qfuse", + "qfusi", + ]: + self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] + + if "qrad" in self.powerstate.target_options['options']['targets_evolve']: + for i in [ + "qrad", + "qrad_bremms", + "qrad_line", + "qrad_sync", + ]: + self.powerstate.plasma[i] = self.powerstate.plasma[i][:, self.powerstate.positions_targets] + + + + self.P = self.P[:, self.powerstate.positions_targets] + + # Recover variables calculated prior to the fine-targets method + for i in self.plasma_original: + self.powerstate.plasma[i] = self.plasma_original[i] + + def postprocessing(self, force_zero_particle_flux=False, relative_error_assumed=1.0): + + # ************************************************************************************************** + # Plug-in targets that were fixed + # ************************************************************************************************** + + self.powerstate.plasma["QeMWm2"] = self.powerstate.plasma["QeMWm2_fixedtargets"] + self.P[: self.P.shape[0]//2, :] # MW/m^2 + self.powerstate.plasma["QiMWm2"] = self.powerstate.plasma["QiMWm2_fixedtargets"] + self.P[self.P.shape[0]//2 :, :] # MW/m^2 + self.powerstate.plasma["Ge1E20m2"] = self.powerstate.plasma["Ge_fixedtargets"] # 1E20/s/m^2 + self.powerstate.plasma["GZ1E20m2"] = self.powerstate.plasma["GZ_fixedtargets"] # 1E20/s/m^2 + self.powerstate.plasma["MtJm2"] = self.powerstate.plasma["MtJm2_fixedtargets"] # J/m^2 + + if force_zero_particle_flux: + self.powerstate.plasma["Ge1E20m2"] = self.powerstate.plasma["Ge1E20m2"] * 0 + + # Convective fluxes + self.powerstate.plasma["Ce"] = PLASMAtools.convective_flux(self.powerstate.plasma["te"], self.powerstate.plasma["Ge1E20m2"]) # MW/m^2 + self.powerstate.plasma["CZ"] = PLASMAtools.convective_flux(self.powerstate.plasma["te"], self.powerstate.plasma["GZ1E20m2"]) # MW/m^2 + + # ************************************************************************************************** + # Error + # ************************************************************************************************** + + variables_to_error = ["QeMWm2", "QiMWm2", "Ce", "CZ", "MtJm2", "Ge1E20m2", "GZ1E20m2"] + + for i in variables_to_error: + self.powerstate.plasma[i + "_stds"] = abs(self.powerstate.plasma[i]) * relative_error_assumed / 100 + + # ************************************************************************************************** + # GB Normalized (Note: This is useful for mitim surrogate variables of targets) + # ************************************************************************************************** + + self.powerstate.plasma["QeGB"] = self.powerstate.plasma["QeMWm2"] / self.powerstate.plasma["Qgb"] + self.powerstate.plasma["QiGB"] = self.powerstate.plasma["QiMWm2"] / self.powerstate.plasma["Qgb"] + self.powerstate.plasma["GeGB"] = self.powerstate.plasma["Ge1E20m2"] / self.powerstate.plasma["Ggb"] + self.powerstate.plasma["GZGB"] = self.powerstate.plasma["GZ1E20m2"] / self.powerstate.plasma["Ggb"] + self.powerstate.plasma["CeGB"] = self.powerstate.plasma["Ce"] / self.powerstate.plasma["Qgb"] + self.powerstate.plasma["CZGB"] = self.powerstate.plasma["CZ"] / self.powerstate.plasma["Qgb"] + self.powerstate.plasma["MtGB"] = self.powerstate.plasma["MtJm2"] / self.powerstate.plasma["Pgb"] diff --git a/src/mitim_modules/powertorch/utils/TRANSFORMtools.py b/src/mitim_modules/powertorch/utils/TRANSFORMtools.py index 405add08..18327bc7 100644 --- a/src/mitim_modules/powertorch/utils/TRANSFORMtools.py +++ b/src/mitim_modules/powertorch/utils/TRANSFORMtools.py @@ -1,11 +1,11 @@ import copy import torch +from pathlib import Path import numpy as np import pandas as pd -from mitim_modules.powertorch.physics import CALCtools -from mitim_tools.misc_tools import LOGtools -from mitim_tools.gacode_tools import PROFILEStools -from mitim_modules.powertorch.physics import TARGETStools +from mitim_tools.misc_tools import LOGtools, IOtools +from mitim_tools.plasmastate_tools.utils import state_plotting +from mitim_modules.powertorch.physics_models import targets_analytic, parameterizers from mitim_tools.misc_tools.LOGtools import printMsg as print from mitim_tools import __mitimroot__ from IPython import embed @@ -13,7 +13,7 @@ # <> Function to interpolate a curve <> from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function -def gacode_to_powerstate(self, input_gacode, rho_vec): +def gacode_to_powerstate(self, rho_vec=None): """ This function converts from the fine input.gacode grid to a powertorch object and grid. Notes: @@ -30,6 +30,10 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): print("\t- Producing powerstate object from input.gacode") + input_gacode = self.profiles + if rho_vec is None: + rho_vec = self.plasma["rho"] + # ********************************************************************************************* # Radial grid # ********************************************************************************************* @@ -65,7 +69,7 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): ["rho", "rho(-)", None, True, False], ["roa", "roa", None, True, True], ["Rmajoa", "Rmajoa", None, True, True], - ["volp", "volp_miller", None, True, True], + ["volp", "volp_geo", None, True, True], ["rmin", "rmin(m)", None, True, False], ["te", "te(keV)", None, True, False], ["ti", "ti(keV)", 0, True, False], @@ -105,39 +109,37 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): ).to(rho_vec) # ********************************************************************************************* - quantities_to_interpolate_and_volp = [ - ["Paux_e", "qe_aux_MWmiller"], - ["Paux_i", "qi_aux_MWmiller"], - ["Gaux_e", "ge_10E20miller"], - ["Maux", "mt_Jmiller"], - ] - - for key in quantities_to_interpolate_and_volp: - - # ********************************************************************************************* - # Extract the quantity via interpolation and tensorization - # ********************************************************************************************* - self.plasma[key[0]] = torch.from_numpy( - interpolation_function(rho_vec.cpu(), rho_use, input_gacode.derived[key[1]]) - ).to(rho_vec) / self.plasma["volp"] - # ********************************************************************************************* - - self.plasma["Gaux_Z"] = self.plasma["Gaux_e"] * 0.0 + # ********************************************************************************************* + # Fixed targets + # ********************************************************************************************* quantitites = {} - quantitites["Pe_orig_fusrad"] = input_gacode.derived["qe_fus_MWmiller"] - input_gacode.derived["qrad_MWmiller"] - quantitites["Pi_orig_fusrad"] = input_gacode.derived["qi_fus_MWmiller"] - quantitites["Pe_orig_fusradexch"] = quantitites["Pe_orig_fusrad"] - input_gacode.derived["qe_exc_MWmiller"] - quantitites["Pi_orig_fusradexch"] = quantitites["Pi_orig_fusrad"] + input_gacode.derived["qe_exc_MWmiller"] + quantitites["QeMWm2_fixedtargets"] = input_gacode.derived["qe_aux_MW"] + quantitites["QiMWm2_fixedtargets"] = input_gacode.derived["qi_aux_MW"] + quantitites["Ge_fixedtargets"] = input_gacode.derived["ge_10E20"] + quantitites["GZ_fixedtargets"] = input_gacode.derived["ge_10E20"] * 0.0 + quantitites["MtJm2_fixedtargets"] = input_gacode.derived["mt_Jmiller"] + + if 'qfus' not in self.target_options["options"]["targets_evolve"]: + # Fusion fixed + quantitites["QeMWm2_fixedtargets"] += input_gacode.derived["qe_fus_MW"] + quantitites["QiMWm2_fixedtargets"] += input_gacode.derived["qi_fus_MW"] + + if 'qrad' not in self.target_options["options"]["targets_evolve"]: + # Fusion fixed + quantitites["QeMWm2_fixedtargets"] -= input_gacode.derived["qrad_MW"] + + if 'qie' not in self.target_options["options"]["targets_evolve"]: + # Exchange fixed if 1 + quantitites["QeMWm2_fixedtargets"] -= input_gacode.derived["qe_exc_MW"] + quantitites["QiMWm2_fixedtargets"] += input_gacode.derived["qe_exc_MW"] for key in quantitites: # ********************************************************************************************* # Extract the quantity via interpolation and tensorization # ********************************************************************************************* - self.plasma[key] = torch.from_numpy( - interpolation_function(rho_vec.cpu(), rho_use, quantitites[key]) - ).to(rho_vec) / self.plasma["volp"] + self.plasma[key] = torch.from_numpy(interpolation_function(rho_vec.cpu(), rho_use, quantitites[key])).to(rho_vec) / self.plasma["volp"] # ********************************************************************************************* # ********************************************************************************************* @@ -159,9 +161,10 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): self.plasma["kradcm"] = 1e-5 / self.plasma["a"] # ********************************************************************************************* - # Define deparametrizer functions for the varying profiles and gradients from here + # Define profile_constructor functions for the varying profiles and gradients from here # ********************************************************************************************* + # [quantiy in powerstate, quantity in input.gacode, index of the ion, multiplier, parameterize_in_aLx] cases_to_parameterize = [ ["te", "te(keV)", None, 1.0, True], ["ti", "ti(keV)", 0, 1.0, True], @@ -174,16 +177,16 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): for i in range(input_gacode.profiles['ni(10^19/m^3)'].shape[1]): cases_to_parameterize.append([f"ni{i}", "ni(10^19/m^3)", i, 1.0, True]) - self.deparametrizers_fine, self.deparametrizers_coarse, self.deparametrizers_coarse_middle = {}, {}, {} + self.profile_constructors_fine, self.profile_constructors_coarse, self.profile_constructors_coarse_middle = {}, {}, {} for key in cases_to_parameterize: quant = input_gacode.profiles[key[1]] if key[2] is None else input_gacode.profiles[key[1]][:, key[2]] ( aLy_coarse, - self.deparametrizers_fine[key[0]], - self.deparametrizers_coarse[key[0]], - self.deparametrizers_coarse_middle[key[0]], - ) = parameterize_curve( + self.profile_constructors_fine[key[0]], + self.profile_constructors_coarse[key[0]], + self.profile_constructors_coarse_middle[key[0]], + ) = parameterizers.piecewise_linear( input_gacode.derived["roa"], quant, self.plasma["roa"], @@ -194,12 +197,53 @@ def gacode_to_powerstate(self, input_gacode, rho_vec): self.plasma[f"aL{key[0]}"] = aLy_coarse[:-1, 1] # Check that it's not completely zero - if key[0] in self.ProfilesPredicted: + if key[0] in self.predicted_channels: if self.plasma[f"aL{key[0]}"].sum() == 0.0: addT = 1e-15 print(f"\t- All values of {key[0]} detected to be zero, to avoid NaNs, inserting {addT} at the edge",typeMsg="w") self.plasma[f"aL{key[0]}"][..., -1] += addT +def to_gacode( + self, + write_input_gacode=None, + position_in_powerstate_batch=0, + postprocess_input_gacode={}, + insert_highres_powers=False, + rederive_profiles=True, + debugPlot=False, +): + ''' + Notes: + - insert_highres_powers: whether to insert high resolution powers (will calculate them with powerstate targets object, not other custom ones) + ''' + print(">> Inserting powerstate into input.gacode") + + profiles = powerstate_to_gacode( + self, + position_in_powerstate_batch=position_in_powerstate_batch, + postprocess_input_gacode=postprocess_input_gacode, + insert_highres_powers=insert_highres_powers, + rederive=rederive_profiles, + debugPlot=debugPlot, + ) + + # Write input.gacode + if write_input_gacode is not None: + write_input_gacode = Path(write_input_gacode) + print(f"\t- Writing input.gacode file: {IOtools.clipstr(write_input_gacode)}") + write_input_gacode.parent.mkdir(parents=True, exist_ok=True) + profiles.write_state(file=write_input_gacode) + + # If corrections modify the ions set... it's better to re-read, otherwise powerstate will be confused + if rederive_profiles: + defineIons(self, profiles, self.plasma["rho"][position_in_powerstate_batch, :], self.dfT) + # Repeat, that's how it's done earlier + self._repeat_tensors(batch_size=self.plasma["rho"].shape[0], + specific_keys=["ni","ions_set_mi","ions_set_Zi","ions_set_Dion","ions_set_Tion","ions_set_c_rad"], + positionToUnrepeat=None) + + return profiles + def powerstate_to_gacode( self, postprocess_input_gacode={}, @@ -210,7 +254,7 @@ def powerstate_to_gacode( ): """ Notes: - - This function assumes that "profiles" is the PROFILES_GACODE that everything started with. + - This function assumes that "profiles" is the gacode_state that everything started with. - We assume that what changes is only the kinetic profiles allowed to vary. - This only works for a single profile, in position_in_powerstate_batch - rederive is expensive, so I'm not re-deriving the geometry which is the most expensive @@ -221,8 +265,8 @@ def powerstate_to_gacode( Tfast_ratio = postprocess_input_gacode.get("Tfast_ratio", True) Ti_thermals = postprocess_input_gacode.get("Ti_thermals", True) ni_thermals = postprocess_input_gacode.get("ni_thermals", True) - recompute_ptot = postprocess_input_gacode.get("recompute_ptot", True) - ensureMachNumber = postprocess_input_gacode.get("ensureMachNumber", None) + recalculate_ptot = postprocess_input_gacode.get("recalculate_ptot", True) + force_mach = postprocess_input_gacode.get("force_mach", None) # ------------------------------------------------------------------------------------------ # Insert profiles @@ -240,13 +284,13 @@ def powerstate_to_gacode( ] for key in quantities: - if key[0] in self.ProfilesPredicted: + if key[0] in self.predicted_channels: print(f"\t- Inserting {key[0]} into input.gacode profiles") # ********************************************************************************************* - # From a/Lx to x via fine deparametrizer + # From a/Lx to x via fine profile_constructor # ********************************************************************************************* - x, y = self.deparametrizers_fine[key[0]]( + x, y = self.profile_constructors_fine[key[0]]( self.plasma["roa"][position_in_powerstate_batch, :], self.plasma[f"aL{key[0]}"][position_in_powerstate_batch, :], ) @@ -281,25 +325,25 @@ def powerstate_to_gacode( print("\t\t* Adjusting ni of thermal ions", typeMsg="i") profiles.scaleAllThermalDensities(scaleFactor=scaleFactor) - if "w0" not in self.ProfilesPredicted and ensureMachNumber is not None: + if "w0" not in self.predicted_channels and force_mach is not None: # Rotation fixed to ensure Mach number - profiles.introduceRotationProfile(Mach_LF=ensureMachNumber) + profiles.introduceRotationProfile(Mach_LF=force_mach) # ------------------------------------------------------------------------------------------ # Insert Powers # ------------------------------------------------------------------------------------------ if insert_highres_powers: - powerstate_to_gacode_powers(self, profiles, position_in_powerstate_batch) + powerstate_to_gacode_powers(self, profiles) # ------------------------------------------------------------------------------------------ # Recalculate and change ptot to make it consistent? # ------------------------------------------------------------------------------------------ - if rederive or recompute_ptot: - profiles.deriveQuantities(rederiveGeometry=False) + if rederive or recalculate_ptot: + profiles.derive_quantities(rederiveGeometry=False) - if recompute_ptot: + if recalculate_ptot: profiles.selfconsistentPTOT() if debugPlot: @@ -307,11 +351,11 @@ def powerstate_to_gacode( return profiles -def powerstate_to_gacode_powers(self, profiles, position_in_powerstate_batch=0): +def powerstate_to_gacode_powers(self, profiles): - profiles.deriveQuantities(rederiveGeometry=False) + profiles.derive_quantities(rederiveGeometry=False) - print("\t- Insering powers") + print("\t- Inserting powers") state_temp = self.copy_state() @@ -325,39 +369,43 @@ def powerstate_to_gacode_powers(self, profiles, position_in_powerstate_batch=0): with LOGtools.HiddenPrints(): state_temp.__init__( profiles, - EvolutionOptions={"rhoPredicted": rhoy}, - TargetOptions={ - "targets_evaluator": TARGETStools.analytical_model, - "ModelOptions": { - "TypeTarget": self.TargetOptions["ModelOptions"]["TypeTarget"], # Important to keep the same as in the original - "TargetCalc": "powerstate", + evolution_options={"rhoPredicted": rhoy}, + target_options={ + "evaluator": targets_analytic.analytical_model, + "options": { + "targets_evolve": self.target_options["options"]["targets_evolve"], # Important to keep the same as in the original + "target_evaluator_method": "powerstate", + "force_zero_particle_flux": self.target_options["options"]["force_zero_particle_flux"], + "percent_error": self.target_options["options"]["percent_error"] } }, increase_profile_resol = False ) state_temp.calculateProfileFunctions() - state_temp.TargetOptions["ModelOptions"]["TargetCalc"] = "powerstate" + state_temp.target_options["options"]["target_evaluator_method"] = "powerstate" state_temp.calculateTargets() # ------------------------------------------------------------------------------------------ conversions = {} - if self.TargetOptions["ModelOptions"]["TypeTarget"] > 1: + if 'qie' in self.target_options["options"]["targets_evolve"]: conversions['qie'] = "qei(MW/m^3)" - if self.TargetOptions["ModelOptions"]["TypeTarget"] > 2: + if 'qrad' in self.target_options["options"]["targets_evolve"]: conversions['qrad_bremms'] = "qbrem(MW/m^3)" conversions['qrad_sync'] = "qsync(MW/m^3)" conversions['qrad_line'] = "qline(MW/m^3)" + if 'qfus' in self.target_options["options"]["targets_evolve"]: conversions['qfuse'] = "qfuse(MW/m^3)" conversions['qfusi'] = "qfusi(MW/m^3)" + position_in_powerstate_batch = 0 + for ikey in conversions: if conversions[ikey] in profiles.profiles: profiles.profiles[conversions[ikey]][:-extra_points] = state_temp.plasma[ikey][position_in_powerstate_batch,:].cpu().numpy() else: profiles.profiles[conversions[ikey]] = np.zeros(len(profiles.profiles["qei(MW/m^3)"])) profiles.profiles[conversions[ikey]][:-extra_points] = state_temp.plasma[ikey][position_in_powerstate_batch,:].cpu().numpy() - def defineIons(self, input_gacode, rho_vec, dfT): """ @@ -378,18 +426,16 @@ def defineIons(self, input_gacode, rho_vec, dfT): self.plasma["ni"], mi, Zi, c_rad = [], [], [], [] for i in range(len(input_gacode.profiles["mass"])): if input_gacode.profiles["type"][i] == "[therm]": - self.plasma["ni"].append( - interpolation_function(rho_vec, rho_use, input_gacode.profiles["ni(10^19/m^3)"][:, i]) - ) + self.plasma["ni"].append(interpolation_function(rho_vec, rho_use, input_gacode.profiles["ni(10^19/m^3)"][:, i])) mi.append(input_gacode.profiles["mass"][i]) Zi.append(input_gacode.profiles["z"][i]) # Grab chebyshev coefficients from file - data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics" / "radiation_chebyshev.csv") + data_df = pd.read_csv(__mitimroot__ / "src" / "mitim_modules" / "powertorch" / "physics_models" / "radiation_chebyshev.csv") try: c = data_df[data_df['Ion'].str.lower()==input_gacode.profiles["name"][i].lower()].to_numpy()[0,2:].astype(float) except IndexError: - print(f'\t- Specie {input_gacode.profiles["name"][i]} not found in ADAS database, assuming zero radiation from it',typeMsg="w") + print(f'\t- Specie {input_gacode.profiles["name"][i]} not found in radiation database, assuming zero radiation from it',typeMsg="w") c = [-1e10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] c_rad.append(c) @@ -410,179 +456,6 @@ def defineIons(self, input_gacode, rho_vec, dfT): self.plasma["ions_set_Tion"] = Tion self.plasma["ions_set_c_rad"] = c_rad -def parameterize_curve( - x_coord, - y_coord_raw, - x_coarse_tensor, - parameterize_in_aLx=True, - multiplier_quantity=1.0, - PreventNegative=False, - ): - """ - Notes: - - x_coarse_tensor must be torch - """ - - # ********************************************************************************************************** - # Define the integrator and derivator functions (based on whether I want to parameterize in aLx or in gradX) - # ********************************************************************************************************** - - if parameterize_in_aLx: - # 1/Lx = -1/X*dX/dr - integrator_function, derivator_function = ( - CALCtools.integrateGradient, - CALCtools.produceGradient, - ) - else: - # -dX/dr - integrator_function, derivator_function = ( - CALCtools.integrateGradient_lin, - CALCtools.produceGradient_lin, - ) - - y_coord = torch.from_numpy(y_coord_raw).to(x_coarse_tensor) * multiplier_quantity - - ygrad_coord = derivator_function( torch.from_numpy(x_coord).to(x_coarse_tensor), y_coord ) - - # ********************************************************************************************************** - # Get control points - # ********************************************************************************************************** - - x_coarse = x_coarse_tensor[1:].cpu().numpy() - - # Clip to zero if I want to prevent negative values - ygrad_coord = ygrad_coord.clip(0) if PreventNegative else ygrad_coord - - """ - Define region to get control points from - ------------------------------------------------------------ - Trick: Addition of extra point - This is important because if I don't, when I combine the trailing edge and the new - modified profile, there's going to be a discontinuity in the gradient. - """ - - ir_end = np.argmin(np.abs(x_coord - x_coarse[-1])) - - if ir_end < len(x_coord) - 1: - ir = ir_end + 2 # To prevent that TGYRO does a 2nd order derivative - x_coarse = np.append(x_coarse, [x_coord[ir]]) - else: - ir = ir_end - - # Definition of trailing edge. Any point after, and including, the extra point - x_trail = torch.from_numpy(x_coord[ir:]).to(x_coarse_tensor) - y_trail = y_coord[ir:] - x_notrail = torch.from_numpy(x_coord[: ir + 1]).to(x_coarse_tensor) - - # Produce control points, including a zero at the beginning - aLy_coarse = [[0.0, 0.0]] - for cont, i in enumerate(x_coarse): - yValue = ygrad_coord[np.argmin(np.abs(x_coord - i))] - aLy_coarse.append([i, yValue.cpu().item()]) - - aLy_coarse = torch.from_numpy(np.array(aLy_coarse)).to(ygrad_coord) - - # Since the last one is an extra point very close, I'm making it the same - aLy_coarse[-1, 1] = aLy_coarse[-2, 1] - - # Boundary condition at point moved by gridPointsAllowed - y_bc = torch.from_numpy(interpolation_function([x_coarse[-1]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) - - # Boundary condition at point (ACTUAL THAT I WANT to keep fixed, i.e. rho=0.8) - y_bc_real = torch.from_numpy(interpolation_function([x_coarse[-2]], x_coord, y_coord.cpu().numpy())).to(ygrad_coord) - - # ********************************************************************************************************** - # Define deparametrizer functions - # ********************************************************************************************************** - - def deparametrizer_coarse(x, y, multiplier=multiplier_quantity): - """ - Construct curve in a coarse grid - ---------------------------------------------------------------------------------------------------- - This constructs a curve in any grid, with any batch given in y=y. - Useful for surrogate evaluations. Fast in a coarse grid. For HF evaluations, - I need to do in a finer grid so that it is consistent with TGYRO. - x, y must be (batch, radii), y_bc must be (1) - """ - return ( - x, - integrator_function(x, y, y_bc_real) / multiplier, - ) - - def deparametrizer_coarse_middle(x, y, multiplier=multiplier_quantity): - """ - Deparamterizes a finer profile based on the values in the coarse. - Reason why something like this is not used for the full profile is because derivative of this will not be as original, - which is needed to match TGYRO - """ - yCPs = CALCtools.Interp1d()(aLy_coarse[:, 0][:-1].repeat((y.shape[0], 1)), y, x) - return x, integrator_function(x, yCPs, y_bc_real) / multiplier - - def deparametrizer_fine(x, y, multiplier=multiplier_quantity): - """ - Notes: - - x is a 1D array, but y can be a 2D array for a batch of individuals: (batch,x) - - I am assuming it is 1/LT for parameterization, but gives T - """ - - y = torch.atleast_2d(y) - x = x[0, :] if x.dim() == 2 else x - - # Add the extra trick point - x = torch.cat((x, aLy_coarse[-1][0].repeat((1)))) - y = torch.cat((y, aLy_coarse[-1][-1].repeat((y.shape[0], 1))), dim=1) - - # Model curve (basically, what happens in between points) - yBS = CALCtools.Interp1d()(x.repeat(y.shape[0], 1), y, x_notrail.repeat(y.shape[0], 1)) - - """ - --------------------------------------------------------------------------------------------------------- - Trick 1: smoothAroundCoarsing - TGYRO will use a 2nd order scheme to obtain gradients out of the profile, so a piecewise linear - will simply not give the right derivatives. - Here, this rough trick is to modify the points in gradient space around the coarse grid with the - same value of gradient, so in principle it doesn't matter the order of the derivative. - """ - num_around = 1 - for i in range(x.shape[0] - 2): - ir = torch.argmin(torch.abs(x[i + 1] - x_notrail)) - for k in range(-num_around, num_around + 1, 1): - yBS[:, ir + k] = yBS[:, ir] - # -------------------------------------------------------------------------------------------------------- - - yBS = integrator_function(x_notrail.repeat(yBS.shape[0], 1), yBS.clone(), y_bc) - - """ - Trick 2: Correct y_bc - The y_bc for the profile integration started at gridPointsAllowed, but that's not the real - y_bc. I want the temperature fixed at my first point that I actually care for. - Here, I multiply the profile to get that. - Multiplication works because: - 1/LT = 1/T * dT/dr - 1/LT' = 1/(T*m) * d(T*m)/dr = 1/T * dT/dr = 1/LT - Same logarithmic gradient, but with the right boundary condition - - """ - ir = torch.argmin(torch.abs(x_notrail - x[-2])) - yBS = yBS * torch.transpose((y_bc_real / yBS[:, ir]).repeat(yBS.shape[1], 1), 0, 1) - - # Add trailing edge - y_trailnew = copy.deepcopy(y_trail).repeat(yBS.shape[0], 1) - - x_notrail_t = torch.cat((x_notrail[:-1], x_trail), dim=0) - yBS = torch.cat((yBS[:, :-1], y_trailnew), dim=1) - - return x_notrail_t, yBS / multiplier - - # ********************************************************************************************************** - - return ( - aLy_coarse, - deparametrizer_fine, - deparametrizer_coarse, - deparametrizer_coarse_middle, - ) - def improve_resolution_profiles(profiles, rhoMODEL): """ Resolution of input.gacode @@ -633,7 +506,6 @@ def improve_resolution_profiles(profiles, rhoMODEL): # ---------------------------------------------------------------------------------- profiles.changeResolution(rho_new=rho_new) - def debug_transformation(p, p_new, s): rho = s.plasma['rho'][0][1:] @@ -684,7 +556,7 @@ def debug_transformation(p, p_new, s): print(f'Profile mean error: {np.mean(err_prof):.2f}%', typeMsg='i' if np.mean(err_prof) < 1e-0 else 'w') print(f'Gradient mean error (ignoring 0.0): {np.mean(err_grad):.2f}%', typeMsg='i' if np.mean(err_grad) < 1e-0 else 'w') - fn = PROFILEStools.plotAll([p,p_new],extralabs=['Original','New'],lastRhoGradients=rho[-1].item()+0.01) + fn = state_plotting.plotAll([p,p_new],extralabs=['Original','New'],lastRhoGradients=rho[-1].item()+0.01) axs = fn.figure_handles[3].figure.axes diff --git a/src/mitim_modules/powertorch/utils/TRANSPORTtools.py b/src/mitim_modules/powertorch/utils/TRANSPORTtools.py new file mode 100644 index 00000000..b7c4438b --- /dev/null +++ b/src/mitim_modules/powertorch/utils/TRANSPORTtools.py @@ -0,0 +1,433 @@ +import json +from matplotlib.pylab import f +import torch +import numpy as np +from functools import partial +import copy +import shutil +from mitim_tools.misc_tools import IOtools, PLASMAtools +from mitim_tools.gacode_tools import PROFILEStools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +def write_json(self, file_name = 'fluxes_turb.json', suffix= 'turb'): + ''' + For tracking and reproducibility (e.g. external runs), we want to write a json file + containing the simulation results. JSON should look like: + + { + 'fluxes_mean': + { + 'QeGB': ... + 'QiGB': ... + 'GeGB': ... + 'GZGB': ... + 'MtGB': ... + 'QieGB': ... + }, + 'fluxes_stds': + { + 'QeGB': ... + 'QiGB': ... + 'GeGB': ... + 'GZGB': ... + 'MtGB': ... + 'QieGB': ... + }, + 'additional_info': { + 'rho': rho.tolist(), + } + } + ''' + + write_json_from_variables = self._write_json_from_variables_turb if suffix == 'turb' else self._write_json_from_variables_neoc + + if self.folder.exists() and write_json_from_variables: + + with open(self.folder / file_name, 'w') as f: + + fluxes_mean = {} + fluxes_stds = {} + + for var in ['QeGB', 'QiGB', 'GeGB', 'GZGB', 'MtGB']: + fluxes_mean[var] = self.__dict__[f"{var}_{suffix}"].tolist() + fluxes_stds[var] = self.__dict__[f"{var}_{suffix}_stds"].tolist() + + try: + var = 'QieGB' + fluxes_mean[var] = self.__dict__[f"{var}_{suffix}"].tolist() + fluxes_stds[var] = self.__dict__[f"{var}_{suffix}_stds"].tolist() + except KeyError: + # NEO file may not have it + pass + + json_dict = { + 'fluxes_mean': fluxes_mean, + 'fluxes_stds': fluxes_stds, + 'additional_info': { + 'rho': self.powerstate.plasma["rho"][0, 1:].cpu().numpy().tolist(), + 'roa': self.powerstate.plasma["roa"][0, 1:].cpu().numpy().tolist(), + 'Qgb': self.powerstate.plasma["Qgb"][0, 1:].cpu().numpy().tolist(), + 'aLte': self.powerstate.plasma["aLte"][0, 1:].cpu().numpy().tolist(), + 'aLti': self.powerstate.plasma["aLti"][0, 1:].cpu().numpy().tolist(), + 'aLne': self.powerstate.plasma["aLne"][0, 1:].cpu().numpy().tolist(), + } + } + + json.dump(json_dict, f, indent=4) + + print(f"\t* Written JSON with {suffix} information to {self.folder / file_name}") + + else: + + print(f"\t* Folder {self.folder} does not exist, cannot write {file_name}", typeMsg='w') + +class power_transport: + + def __init__(self, powerstate, name = "test", folder = "~/scratch/", evaluation_number = 0): + + self.name = name + self.folder = IOtools.expandPath(folder) + self.evaluation_number = evaluation_number + self.powerstate = powerstate + + self.transport_evaluator_options = self.powerstate.transport_options["options"] + self.cold_start = self.powerstate.transport_options["cold_start"] + + # Model results is None by default, but can be assigned in evaluate + self.model_results = None + + # By default, write the json files after evaluating the variables (will be changed in gyrokinetic "prep" run mode) + self._write_json_from_variables_turb = True + self._write_json_from_variables_neoc = True + + # ---------------------------------------------------------------------------------------- + # labels for plotting + # ---------------------------------------------------------------------------------------- + + self.powerstate.labelsFluxes = { + "te": "$Q_e$ ($MW/m^2$)", + "ti": "$Q_i$ ($MW/m^2$)", + "ne": "$Q_{conv}$ ($MW/m^2$)", + "nZ": "$Q_{conv}$ $\\cdot f_{Z,0}$ ($MW/m^2$)", + "w0": "$M_T$ ($J/m^2$)", + } + + def evaluate(self): + + # Copy the input.gacode files to the output folder + self._profiles_to_store() + + ''' + ****************************************************************************************************** + Evaluate neoclassical and turbulent transport (*in GB units*). + These functions use a hook to write the .json files to communicate the results to powerstate.plasma + ****************************************************************************************************** + ''' + + # Initialize them as zeros + for var in ['QeGB','QiGB','GeGB','GZGB','MtGB','QieGB']: + for suffix in ['turb', 'neoc']: + for suffix0 in ['', '_stds']: + self.__dict__[f"{var}_{suffix}{suffix0}"] = torch.zeros(self.powerstate.plasma['rho'].shape[-1]-1) + + neoclassical = self.evaluate_neoclassical() + turbulence = self.evaluate_turbulence() + + ''' + ****************************************************************************************************** + From the json to powerstate.plasma and GB to real units transformation + ****************************************************************************************************** + ''' + self._populate_from_json(file_name = 'fluxes_turb.json', suffix= 'turb') + self._populate_from_json(file_name = 'fluxes_neoc.json', suffix= 'neoc') + + ''' + ****************************************************************************************************** + Post-process the data: add turb and neoc, tensorize and transformations + ****************************************************************************************************** + ''' + self._postprocess() + + def _postprocess(self): + ''' + Curate information for the powerstate (e.g. add models, add batch dimension, rho=0.0, and tensorize) + Before calling this function, the powerstate.plasma should have the following variables: + 'QeMWm2_tr_X', 'QiMWm2_tr_X', 'Ge1E20m2_tr_X', 'GZ1E20m2_tr_X', 'MtJm2_tr_X', 'QieMWm3_tr_X' + where X = 'turb' or 'neoc' + and also the corresponding _stds versions + ''' + + variables = ['QeMWm2', 'QiMWm2', 'Ge1E20m2', 'GZ1E20m2', 'MtJm2', 'QieMWm3'] + + for variable in variables: + for suffix in ['_tr_turb', '_tr_turb_stds', '_tr_neoc', '_tr_neoc_stds']: + + # Make them tensors and add a batch dimension + self.powerstate.plasma[f"{variable}{suffix}"] = torch.Tensor(self.powerstate.plasma[f"{variable}{suffix}"]).to(self.powerstate.dfT).unsqueeze(0) + + # Pad with zeros at rho=0.0 + self.powerstate.plasma[f"{variable}{suffix}"] = torch.cat(( + torch.zeros((1, 1)), + self.powerstate.plasma[f"{variable}{suffix}"], + ), dim=1) + + # ----------------------------------------------------------- + # Sum the turbulent and neoclassical contributions + # ----------------------------------------------------------- + + variables = ['QeMWm2', 'QiMWm2', 'Ge1E20m2', 'GZ1E20m2', 'MtJm2'] + + for variable in variables: + self.powerstate.plasma[f"{variable}_tr"] = self.powerstate.plasma[f"{variable}_tr_turb"] + self.powerstate.plasma[f"{variable}_tr_neoc"] + + # --------------------------------------------------------------------------------- + # Convective fluxes (& Re-scale the GZ flux by the original impurity concentration) + # --------------------------------------------------------------------------------- + + mapper_convective = { + 'Ce': 'Ge1E20m2', + 'CZ': 'GZ1E20m2', + } + + for key in mapper_convective.keys(): + for tt in ['','_turb', '_turb_stds', '_neoc', '_neoc_stds']: + + mult = 1/self.powerstate.fImp_orig if key == 'CZ' else 1.0 + + self.powerstate.plasma[f"{key}_tr{tt}"] = PLASMAtools.convective_flux( + self.powerstate.plasma["te"], + self.powerstate.plasma[f"{mapper_convective[key]}_tr{tt}"] + ) * mult + + def produce_profiles(self): + # Only add self._produce_profiles() if it's needed (e.g. full TGLF), otherwise this is somewhat expensive + # (e.g. for flux matching of analytical models) + pass + + def _produce_profiles(self,derive_quantities=True): + + self.applyCorrections = self.powerstate.transport_options["applyCorrections"] + + # Write this updated profiles class (with parameterized profiles and target powers) + self.file_profs = self.folder / "input.gacode" + + powerstate_detached = self.powerstate.copy_state() + + self.powerstate.profiles = powerstate_detached.from_powerstate( + write_input_gacode=self.file_profs, + postprocess_input_gacode=self.applyCorrections, + rederive_profiles = derive_quantities, # Derive quantities so that it's ready for analysis and plotting later + insert_highres_powers = derive_quantities, # Insert powers so that Q, Pfus and all that it's consistent when read later + ) + + self.powerstate.profiles_transport = copy.deepcopy(self.powerstate.profiles) + + self._modify_profiles() + + def _modify_profiles(self): + ''' + Modify the profiles (e.g. lumping) before running the transport model + ''' + + # After producing the profiles, copy for future modifications + self.file_profs_unmod = self.file_profs.parent / f"{self.file_profs.name}_unmodified" + shutil.copy2(self.file_profs, self.file_profs_unmod) + + profiles_postprocessing_fun = self.powerstate.transport_options["profiles_postprocessing_fun"] + + if profiles_postprocessing_fun is not None: + print(f"\t- Modifying input.gacode to run transport calculations based on {profiles_postprocessing_fun}",typeMsg="i") + self.powerstate.profiles_transport = profiles_postprocessing_fun(self.file_profs) + + # Position of impurity ion may have changed + p_old = PROFILEStools.gacode_state(self.file_profs_unmod) + p_new = PROFILEStools.gacode_state(self.file_profs) + + impurity_of_interest = p_old.Species[self.powerstate.impurityPosition] + + try: + impurityPosition_new = p_new.Species.index(impurity_of_interest) + + except ValueError: + print(f"\t- Impurity {impurity_of_interest} not found in new profiles, keeping position {self.powerstate.impurityPosition}",typeMsg="w") + impurityPosition_new = self.powerstate.impurityPosition + + if impurityPosition_new != self.powerstate.impurityPosition: + print(f"\t- Impurity position has changed from {self.powerstate.impurityPosition} to {impurityPosition_new}",typeMsg="i") + self.powerstate.impurityPosition_transport = p_new.Species.index(impurity_of_interest) + + def _profiles_to_store(self): + + if "folder" in self.powerstate.transport_options: + whereFolder = IOtools.expandPath(self.powerstate.transport_options["folder"] / "Outputs" / "portals_profiles") + if not whereFolder.exists(): + IOtools.askNewFolder(whereFolder) + + fil = whereFolder / f"input.gacode.{self.evaluation_number}" + shutil.copy2(self.file_profs, fil) + shutil.copy2(self.file_profs_unmod, fil.parent / f"{fil.name}_unmodified") + print(f"\t- Copied profiles to {IOtools.clipstr(fil)}") + else: + print("\t- Could not move files", typeMsg="w") + + def _populate_from_json(self, file_name = 'fluxes_turb.json', suffix= 'turb'): + ''' + Populate the powerstate.plasma with the results from the json file + ''' + + mapper = { + 'QeGB': ['Qgb', 'QeMWm2'], + 'QiGB': ['Qgb', 'QiMWm2'], + 'GeGB': ['Ggb', 'Ge1E20m2'], + 'GZGB': ['Ggb', 'GZ1E20m2'], + 'MtGB': ['Pgb', 'MtJm2'], + 'QieGB': ['Sgb', 'QieMWm3'] + } + + ''' + ********************************************************************************************** + If no population file exists, I only convert from GB to real units and return + ********************************************************************************************** + ''' + if not (self.folder / file_name).exists(): + print(f"\t* File {self.folder / file_name} does not exist, cannot populate powerstate.plasma", typeMsg='w') + print(f"\t- Tranforming from GB to real units:") + + for var in mapper: + self.powerstate.plasma[f"{mapper[var][1]}_tr_{suffix}"] = self.__dict__[f"{var}_{suffix}"] * self.powerstate.plasma[f"{mapper[var][0]}"][0,1:] + self.powerstate.plasma[f"{mapper[var][1]}_tr_{suffix}_stds"] = self.__dict__[f"{var}_{suffix}_stds"] * self.powerstate.plasma[f"{mapper[var][0]}"][0,1:] + + return + + ''' + ********************************************************************************************** + Populate the powerstate.plasma from the json file + ********************************************************************************************** + ''' + print(f"\t* Populating powerstate.plasma with JSON data from {self.folder / file_name}") + + with open(self.folder / file_name, 'r') as f: + json_dict = json.load(f) + + # See if the file has GB or real units + units_GB, units_real = False, False + if 'QeGB' in json_dict['fluxes_mean']: + units_GB = True + if 'QeMWm2' in json_dict['fluxes_mean']: + units_real = True + + units = 'both' if (units_GB and units_real) else 'GB' if units_GB else 'real' if units_real else 'none' + + if units == 'real': + + print("\t\t- File has fluxes in real units... populating powerstate directly") + + for var in ['QeMWm2', 'QiMWm2', 'Ge1E20m2', 'GZ1E20m2', 'MtJm2', 'QieMWm3']: + self.powerstate.plasma[f"{var}_tr_{suffix}"] = np.array(json_dict['fluxes_mean'][var]) + self.powerstate.plasma[f"{var}_tr_{suffix}_stds"] = np.array(json_dict['fluxes_stds'][var]) + + elif units == 'GB' or units == 'both': + + dum = {} + for var in mapper: + gb = self.powerstate.plasma[f"{mapper[var][0]}"][0,1:].cpu().numpy() + dum[f"{mapper[var][1]}_tr_{suffix}"] = np.array(json_dict['fluxes_mean'][var]) * gb + dum[f"{mapper[var][1]}_tr_{suffix}_stds"] = np.array(json_dict['fluxes_stds'][var]) * gb + + if units == 'GB': + + print("\t\t- File has fluxes in GB units... using GB units from powerstate to convert to real units") + + for var in mapper: + self.powerstate.plasma[f"{mapper[var][1]}_tr_{suffix}"] = dum[f"{mapper[var][1]}_tr_{suffix}"] + self.powerstate.plasma[f"{mapper[var][1]}_tr_{suffix}_stds"] = dum[f"{mapper[var][1]}_tr_{suffix}_stds"] + + elif units == 'both': + + print("\t\t- File has fluxes in both GB and real units... using real units and checking consistency") + + for var in mapper: + if not np.allclose(self.powerstate.plasma[f"{mapper[var][1]}_tr_{suffix}"], dum[f"{mapper[var][1]}_tr_{suffix}"]): + print(f"\t\t\t- Inconsistent values found for {mapper[var][1]}_tr_{suffix}") + + for var in ['QeMWm2', 'QiMWm2', 'Ge1E20m2', 'GZ1E20m2', 'MtJm2', 'QieMWm3']: + self.powerstate.plasma[f"{var}_tr_{suffix}"] = np.array(json_dict['fluxes_mean'][var]) + self.powerstate.plasma[f"{var}_tr_{suffix}_stds"] = np.array(json_dict['fluxes_stds'][var]) + + else: + raise ValueError("[MITIM] Unknown units in JSON file") + + # ---------------------------------------------------------------------------------------------------- + # EVALUATE (custom part) + # ---------------------------------------------------------------------------------------------------- + @IOtools.hook_method(after=partial(write_json, file_name = 'fluxes_turb.json', suffix= 'turb')) + def evaluate_turbulence(self): + ''' + This needs to populate the following np.arrays in self., with dimensions of rho: + - QeGB_turb + - QiGB_turb + - GeGB_turb + - GZGB_turb + - MtGB_turb + - QieGB_turb (turbulence exchange) + and their respective standard deviations, e.g. QeGB_turb_stds + ''' + + print(">> No turbulent fluxes to evaluate", typeMsg="w") + + @IOtools.hook_method(after=partial(write_json, file_name = 'fluxes_neoc.json', suffix= 'neoc')) + def evaluate_neoclassical(self): + ''' + This needs to populate the following np.arrays in self.: + - QeGB_neoc + - QiGB_neoc + - GeGB_neoc + - GZGB_neoc + - MtGB_neoc + - QieGB_neoc (zero) + and their respective standard deviations, e.g. QeGB_neoc_stds + ''' + + print(">> No neoclassical fluxes to evaluate", typeMsg="w") + + +# ******************************************************************************************* +# Combinations +# ******************************************************************************************* + +from mitim_modules.powertorch.physics_models.transport_tglf import tglf_model +from mitim_modules.powertorch.physics_models.transport_neo import neo_model +from mitim_modules.powertorch.physics_models.transport_cgyro import cgyro_model +from mitim_modules.powertorch.physics_models.transport_gx import gx_model + +class portals_transport_model(power_transport, tglf_model, neo_model, cgyro_model, gx_model): + + def __init__(self, powerstate, **kwargs): + super().__init__(powerstate, **kwargs) + + # Defaults + self.turbulence_model = 'tglf' + self.neoclassical_model = 'neo' + + def produce_profiles(self): + self._produce_profiles() + + @IOtools.hook_method(after=partial(write_json, file_name = 'fluxes_turb.json', suffix= 'turb')) + def evaluate_turbulence(self): + + if self.turbulence_model == 'tglf': + return tglf_model.evaluate_turbulence(self) + elif self.turbulence_model == 'cgyro': + return cgyro_model.evaluate_turbulence(self) + elif self.turbulence_model == 'gx': + return gx_model.evaluate_turbulence(self) + else: + raise Exception(f"Unknown turbulence model {self.turbulence_model}") + + @IOtools.hook_method(after=partial(write_json, file_name = 'fluxes_neoc.json', suffix= 'neoc')) + def evaluate_neoclassical(self): + if self.neoclassical_model == 'neo': + return neo_model.evaluate_neoclassical(self) + else: + raise Exception(f"Unknown neoclassical model {self.neoclassical_model}") diff --git a/src/mitim_modules/vitals/VITALSmain.py b/src/mitim_modules/vitals/VITALSmain.py index fedb3d97..6983276f 100644 --- a/src/mitim_modules/vitals/VITALSmain.py +++ b/src/mitim_modules/vitals/VITALSmain.py @@ -51,7 +51,7 @@ def __init__(self, folder, namelist=None, **kwargs): # Default (please change to your desire after instancing the object) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - self.TGLFparameters = {"TGLFsettings": 2, "extraOptions": {}} + self.TGLFparameters = {"code_settings": 2, "extraOptions": {}} self.VITALSparameters = { "rel_error": 0.02, # Standard deviation (relative to value) @@ -74,6 +74,8 @@ def prep( if not classLoaded: with open(tglf_class_file, "rb") as f: tglf_read = pickle.load(f) + tglf_tmp = TGLFtools.TGLF() + tglf_read.run_specifications = tglf_tmp.run_specifications self.tglf = TGLFtools.TGLF(alreadyRun=tglf_read) self.tglf.FolderGACODE, self.tglf.rhos = self.folder, [rho] @@ -199,31 +201,25 @@ def run(self, paramsfile, resultsfile): for iquant in dictOFs: if "_exp" not in iquant: if iquant == "Qe": - value = tglf.results["tglf1"]["TGLFout"][0].Qe_unn + value = tglf.results["tglf1"]["output"][0].Qe_unn elif iquant == "Qi": - value = tglf.results["tglf1"]["TGLFout"][0].Qi_unn + value = tglf.results["tglf1"]["output"][0].Qi_unn elif iquant == "TeFluct": - value = tglf.results["tglf1"]["TGLFout"][ + value = tglf.results["tglf1"]["output"][ 0 ].AmplitudeSpectrum_Te_level elif iquant == "neFluct": - value = tglf.results["tglf1"]["TGLFout"][ + value = tglf.results["tglf1"]["output"][ 0 ].AmplitudeSpectrum_ne_level elif iquant == "neTe": - value = tglf.results["tglf1"]["TGLFout"][0].neTeSpectrum_level + value = tglf.results["tglf1"]["output"][0].neTeSpectrum_level dictOFs[iquant]["value"] = value - dictOFs[iquant]["error"] = np.abs( - dictOFs[iquant]["value"] * self.VITALSparameters["rel_error"] - ) + dictOFs[iquant]["error"] = np.abs(dictOFs[iquant]["value"] * self.VITALSparameters["rel_error"]) else: - dictOFs[iquant]["value"] = self.VITALSparameters["experimentalVals"][ - iquant[:-4] - ] - dictOFs[iquant]["error"] = self.VITALSparameters["std_deviation"][ - iquant[:-4] - ] + dictOFs[iquant]["value"] = self.VITALSparameters["experimentalVals"][iquant[:-4]] + dictOFs[iquant]["error"] = self.VITALSparameters["std_deviation"][iquant[:-4]] # Write stuff self.write(dictOFs, resultsfile) @@ -287,7 +283,7 @@ def runTGLF( numSim = self.folder.name - variation = TGLFtools.completeVariation(variation, tglf.inputsTGLF[tglf.rhos[0]]) + variation = TGLFtools.completeVariation_TGLF(variation, tglf.inputs_files[tglf.rhos[0]]) extraOptions = self.TGLFparameters["extraOptions"] multipliers = {} @@ -302,9 +298,9 @@ def runTGLF( folder_label = label tglf.run( - subFolderTGLF=f"{folder_label}", + subfolder=f"{folder_label}", cold_start=cold_start, - TGLFsettings=self.TGLFparameters["TGLFsettings"], + code_settings=self.TGLFparameters["code_settings"], forceIfcold_start=True, extraOptions=extraOptions, multipliers=multipliers, diff --git a/src/mitim_tools/__init__.py b/src/mitim_tools/__init__.py index b4ef771a..1bdd8728 100644 --- a/src/mitim_tools/__init__.py +++ b/src/mitim_tools/__init__.py @@ -1,4 +1,4 @@ from mitim_tools.misc_tools.CONFIGread import config_manager from pathlib import Path -__version__ = "3.0.0" +__version__ = "4.0.0" __mitimroot__ = Path(__file__).resolve().parents[2] diff --git a/src/mitim_tools/astra_tools/ASTRAtools.py b/src/mitim_tools/astra_tools/ASTRAtools.py index b5aacac0..79e602d7 100644 --- a/src/mitim_tools/astra_tools/ASTRAtools.py +++ b/src/mitim_tools/astra_tools/ASTRAtools.py @@ -158,7 +158,7 @@ def convert_ASTRA_to_gacode_fromCDF(astra_cdf, """ template_path = __mitimroot__ / "tests" / "data"/ "input.gacode" - p = PROFILEStools.PROFILES_GACODE(template_path) + p = PROFILEStools.gacode_state(template_path) params = p.profiles # Extract CDF file @@ -204,7 +204,7 @@ def convert_ASTRA_to_gacode_from_transp_output(c, """ template_path = __mitimroot__ / "tests" / "data"/ "input.gacode" - p = PROFILEStools.PROFILES_GACODE(template_path) + p = PROFILEStools.gacode_state(template_path) params = p.profiles #c.calcProfiles() @@ -263,7 +263,7 @@ def convert_ASTRA_to_gacode_from_transp_output(c, bcentr = np.array([c.BTOR[ai]]) ; params['bcentr(T)'] = bcentr current = np.array([c.IPL[ai]]) ; params['current(MA)'] = current rho = interp_to_nexp(c.rho[ai]/c.rho[ai,-1]); params['rho(-)'] = rho - polflux = interp_to_nexp(c.FP[ai]) ; params['polflux(Wb/radian)'] = polflux + polflux = interp_to_nexp(c.FP[ai]) ; params['polflux(Wb/radian)'] = polflux/2/np.pi polflux_norm = (polflux-polflux[0])/(polflux[-1]-polflux[0]) @@ -342,13 +342,13 @@ def convert_ASTRA_to_gacode_from_transp_output(c, p.profiles['rho(-)'][0] = 0.0 # rederive quantities - p.deriveQuantities() + p.derive_quantities() # Print output to check Q, Pfus, etc. p.printInfo() if gacode_out is not None: - p.writeCurrentStatus(file=gacode_out) + p.write_state(file=gacode_out) if plot_result: p.plot() @@ -501,7 +501,7 @@ def _betan_initial_conditions(x, geometry_object, rhotop, Ttop_keV, netop_19, ep profiles.makeAllThermalIonsHaveSameTemp() profiles.profiles['ni(10^19/m^3)'][:,0] = profiles.profiles['ne(10^19/m^3)'] profiles.enforceQuasineutrality() - profiles.deriveQuantities() + profiles.derive_quantities() print("residual:", ((profiles.derived['BetaN_engineering']-betan_desired) / betan_desired)**2) @@ -544,6 +544,8 @@ def _betan_initial_conditions(x, geometry_object, rhotop, Ttop_keV, netop_19, ep T[BC_index:] = T_ped[BC_index:] print(f"Pedestal values: ne_ped = {ne_ped}, Te_ped = {Te_ped}") + else: + x=rho preamble_Temp = f""" 900052D3D 2 0 6 ;-SHOT #- F(X) DATA WRITEUF OMFIT ;-SHOT DATE- UFILES ASCII FILE SYSTEM diff --git a/src/mitim_tools/eped_tools/EPEDtools.py b/src/mitim_tools/eped_tools/EPEDtools.py new file mode 100644 index 00000000..31248fb5 --- /dev/null +++ b/src/mitim_tools/eped_tools/EPEDtools.py @@ -0,0 +1,503 @@ +import os +import re +import copy +import subprocess +import matplotlib.pyplot as plt +import f90nml +from pathlib import Path +from mitim_tools.misc_tools import FARMINGtools, GRAPHICStools, IOtools, GUItools +import numpy as np +import pandas as pd +import xarray as xr +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class EPED: + def __init__( + self, + folder + ): + + self.folder = Path(folder) if folder is not None else None # None for just reading + + if self.folder is not None: + self.folder.mkdir(parents=True, exist_ok=True) + + self.results = {} + + self.inputs_potential = ['ip', 'bt', 'r', 'a', 'kappa', 'delta', 'neped', 'betan', 'zeffped', 'nesep', 'tesep', 'zeta'] + + def run( + self, + subfolder = 'run1', + input_params = None, # {'ip': 12.0, 'bt': 12.16, 'r': 1.85, 'a': 0.57, 'kappa': 1.9, 'delta': 0.5, 'neped': 30.0, 'betan': 1.0, 'zeffped': 1.5, 'nesep': 10.0, 'tesep': 100.0, 'zeta': 0}, + scan_param = None, # {'variable': 'neped', 'values': [10.0, 20.0, 30.0]} + keep_nsep_ratio = None, # Ratio of neped to nesep + nproc_per_run = 64, + minutes_slurm = 30, + cold_start = False, + job_array_limit = 5, + removeScratchFolders = True, #ONLY CHANGE THIS FOR DEBUGGING, if you make this False, your EPED runs will be saved and they are enormous + ): + + # ------------------------------------ + # Prepare job + # ------------------------------------ + + # Prepare folder structure + self.folder_run = self.folder / subfolder + + # Prepare scan parameters + scan_param_variable = scan_param['variable'] if scan_param is not None else None + scan_param_values = scan_param['values'] if scan_param is not None else [None] + + # Prepare job array setup + job_array = '' + for i in range(len(scan_param_values)): + job_array += f'{i+1}' if i == 0 else f',{i+1}' + + # Initialize Job + self.eped_job = FARMINGtools.mitim_job(self.folder_run) + + self.eped_job.define_machine( + "eped", + "mitim_eped", + slurm_settings={ + 'name': 'mitim_eped', + 'minutes': minutes_slurm, + 'ntasks': nproc_per_run, + 'job_array': job_array, + 'job_array_limit': job_array_limit, + } + ) + + # ------------------------------------ + # Prepare each individual case + # ------------------------------------ + + folder_cases, output_files, shellPreCommands = [], [], [] + for i,value in enumerate(scan_param_values): + + # Folder structure + subfolder = f'run{i+1}' + folder_case = self.folder_run / subfolder + + # Prepare input parameters + if scan_param_variable is not None: + input_params_new = input_params.copy() if input_params is not None else {} + input_params_new[scan_param_variable] = value + else: + input_params_new = input_params + + if keep_nsep_ratio is not None: + print(f'\t> Setting nesep to {keep_nsep_ratio} * neped') + input_params_new['nesep'] = keep_nsep_ratio * input_params_new['neped'] + + # ******************************* + # Check if the case should be run + run_case = True + force_res = False + if (self.folder_run / f'output_{subfolder}.nc').exists(): + if cold_start: + res = print(f'\t> Run {subfolder} already exists but cold_start is set to True. Running from scratch.', typeMsg='i' if force_res else 'q') + if res: + IOtools.shutil_rmtree(folder_case) + (self.folder_run / f'output_{subfolder}.nc').unlink(missing_ok=True) + force_res = True + else: + run_case = False + else: + print(f'\t> Run {subfolder} already exists and cold_start is set to False. Skipping run.', typeMsg='i') + run_case = False + + if not run_case: + continue + # ******************************* + + # Set up folder + folder_case.mkdir(parents=True, exist_ok=True) + + # Preparation of the run folder by copying the template files + eped_input_file = 'eped.input.1' + required_files_folder = '$EPED_SOURCE_PATH/template/engaging/eped_run_template' + shellPreCommands.append(f'cp {required_files_folder}/* {self.eped_job.folderExecution}/{subfolder}/. && mv {self.eped_job.folderExecution}/{subfolder}/{eped_input_file} {self.eped_job.folderExecution}/{subfolder}/eped.input') + + # Write input file to EPED, determining the expected output file + output_file = self._prep_input_file(folder_case,input_params=input_params_new,eped_input_file=eped_input_file) + + output_files.append(output_file.as_posix()) + folder_cases.append(folder_case) + + # If no cases to run, exit + if len(folder_cases) == 0: + return + + # ------------------------------------- + # Execute + # ------------------------------------- + + # Submit as a slurm job array + if self.eped_job.launchSlurm: + EPEDcommand = f'cd {self.eped_job.folderExecution}/run"$SLURM_ARRAY_TASK_ID" && export NPROC_EPED={nproc_per_run} && ips.py --config=eped.config --platform=psfc_cluster.conf' + # Submit locally in parallel + else: + EPEDcommand = "" + for i in job_array.split(','): + EPEDcommand += f'cd {self.eped_job.folderExecution}/run{i} && export NPROC_EPED={nproc_per_run} && ips.py --config=eped.config --platform=psfc_cluster.conf & \n' + EPEDcommand += 'wait\n' + + # Prepare the job script + self.eped_job.prep(EPEDcommand,input_folders=folder_cases,output_files=copy.deepcopy(output_files),shellPreCommands=shellPreCommands) + + # Run the job + self.eped_job.run(removeScratchFolders=removeScratchFolders) + + # ------------------------------------- + # Postprocessing + # ------------------------------------- + + # Remove potential output files from previous runs + output_files_old = sorted(list(self.folder_run.glob("*.nc"))) + for output_file in output_files_old: + output_file.unlink() + + # Rename output files + for i in range(len(output_files)): + os.system(f'mv {self.folder_run / output_files[i]} {self.folder_run / f"output_run{i+1}.nc"}') + + def _prep_input_file( + self, + folder_case, + input_params = None, + eped_input_file = 'eped.input.1', # Do not call it directly 'eped.input' as it may be overwritten by the job script template copying commands + ): + + shot = 0 + timeid = 0 + + # Update with fixed parameters + input_params.update( + {'num_scan': 1, + 'shot': shot, + 'timeid': timeid, + 'runid': 0, + 'm': 2, + 'z': 1, + 'mi': 20, + 'zi': 10, + 'tewid': 0.03, + 'ptotwid': 0.03, + 'teped': -1, + 'ptotped': -1, + } + ) + + eped_input = {'eped_input': input_params} + nml = f90nml.Namelist(eped_input) + + # Write the input file + f90nml.write(nml, folder_case / eped_input_file, force=True) + + # What's the expected output file? + output_file = folder_case.relative_to(self.folder_run) / 'eped' / 'SUMMARY' / f'e{shot:06d}.{timeid:05d}' + + return output_file + + def read( + self, + subfolder = 'run1', + print_results = True, + label = None, + ): + + self.results[label if label is not None else subfolder] = {} + + where_is_this = self.folder / subfolder if self.folder is not None else Path(subfolder) + + output_files = sorted(list(where_is_this.glob("*.nc"))) + + for output_file in output_files: + + + with xr.open_dataset(f'{output_file.resolve()}', engine='netcdf4') as ds: + data = postprocess_eped(ds, 'G', 0.03) + + sublabel = output_file.name.split('_')[-1].split('.')[0] + + self.results[label if label is not None else subfolder][sublabel] = data + + if print_results: + self.print(label if label is not None else subfolder, sublabel) + + def print(self,label,sublabel): + + print(f'\n\t> EPED results {sublabel}:') + data = self.results[label][sublabel] + + print('\t\t> Inputs:') + for input_param in self.inputs_potential: + print(f'\t\t\t{input_param}: {data[input_param].values[0]}') + + + print('\t\t> Outputs:') + if 'ptop' in data.data_vars: + print(f'\t\t\tptop: {data["ptop"].values[0]:.2f} kPa') + print(f'\t\t\twptop: {data["wptop"].values[0]:.3f} psi_pol') + else: + print('\t\t\tptop: Not available',typeMsg='w') + + def plot( + self, + labels = ['run1'], + axs = None, + plot_labels = None, + legend_title = None, + legend_location = 'best', + ): + + if axs is None: + self.fn = GUItools.FigureNotebook("EPED", geometry="900x900") + fig = self.fn.add_figure(label="Pedestal Top") + axs = fig.subplots(2, 1) + + colors = GRAPHICStools.listColors() + + + + for i,name in enumerate(labels): + + data = self.results[name] + + neped, ptop, wtop = [], [], [] + sublabels = data.keys() + try: + sublabels = sorted(sublabels, key=lambda x: int(x.split('run')[1])) + except: + print('\t> Warning: sublabels could not be sorted numerically.', typeMsg='w') + for sublabel in sublabels: + neped.append(float(data[sublabel]['neped'])) + if 'ptop' in data[sublabel].data_vars: + ptop.append(float(data[sublabel]['ptop'])) + wtop.append(float(data[sublabel]['wptop'])) + else: + ptop.append(np.nan) + wtop.append(np.nan) + + axs[0].plot(neped,ptop,'-s', c = colors[i], ms = 10) + axs[1].plot(neped,wtop,'-s', c = colors[i], ms = 10) + + ax = axs[0] + ax.set_xlabel('neped ($10^{19}m^{-3}$)') + ax.set_ylabel('ptop (kPa)') + ax.set_ylim(bottom=0) + plot_labels = plot_labels if plot_labels is not None else labels + ax.legend(plot_labels, loc=legend_location, title =legend_title) + + GRAPHICStools.addDenseAxis(ax) + + ax = axs[1] + + ax.set_xlabel('neped ($10^{19}m^{-3}$)') + ax.set_ylabel('wptop (psi_pol)') + ax.set_ylim(bottom=0) + ax.legend(plot_labels, loc=legend_location, title=legend_title) + GRAPHICStools.addDenseAxis(ax) + + plt.tight_layout() + +# ************************************************************************************************************ +# ************************************************************************************************************ + +def convert_to_dimensional(df): + #ee = 1.60217663e-19 + mu0 = 1.25663706127e-6 + df['a'] = df['r'] / df['epsilon'] + df['ip'] = 1.0e-6 * (2.0 * np.pi * np.square(df['a']) * df['kappa'] * df['bt']) / (df['qstar'] * df['r'] * mu0) + df['neped'] = 10.0 * df['fgped'] * df['ip'] / (np.pi * np.square(df['a'])) + df['nesep'] = 0.25 * df['neped'] + #df['teped'] = 2500 * df['bt'] * df['ip'] * df['betan'] / (3 * df['a'] * 1.5 * df['neped']) + #df['teped'] = df['teped'].clip(upper=8000) + df['teped'] = df['r'] * 0.0 - 1.0 + return df + + +def convert_to_dimensionless(df): + mu0 = 1.25663706127e-6 + df['epsilon'] = df['r'] / df['a'] + df['fgped'] = df['neped'] * np.pi * np.square(df['a']) / (10.0 * df['ip']) + df['qstar'] = (2.0 * np.pi * np.square(df['a']) * df['kappa'] * df['bt']) / (1.0e6 * df['ip'] * df['r'] * mu0) + df['nesep'] = 0.25 * df['neped'] + df['teped'] = df['r'] * 0.0 - 1.0 + return df + + +def setup_eped(output_path, inputs_list, template_path): + + output_path = Path(output_path).resolve() # Ensure absolute path + output_path.mkdir(parents=True, exist_ok=True) + + subprocess.run(['cp', str(template_path.resolve() / 'exec_eped.sh'), str(output_path)]) + subprocess.run(['cp', str(template_path.resolve() / 'submit_eped_array_psfc.batch'), str(output_path)]) + subprocess.run(['cp', str(template_path.resolve() / 'postprocessing.py'), str(output_path)]) + rpaths = [] + + for run_num, inputs in enumerate(inputs_list): + run_id = f'run{run_num + 1:03d}' + rpath = output_path / run_id # Construct the absolute path for the run directory + subprocess.run(['cp', '-r', str(template_path.resolve() / 'eped_run_template'), str(rpath)]) + + # Edit input file + input_file = rpath / 'eped.input' + contents = f90nml.read(str(input_file)) + for param, value in inputs.items(): + contents['eped_input'][param] = value + contents.write(str(input_file), force=True) + + rpaths.append(rpath) + + #logger.info(f'{len(inputs_list)} Runs created at {output_path}') + + return rpaths + + +def setup_array_batch(launch_path, rpaths, maxqueue=5): + + # Convert to Path object and ensure absolute path + launch_path = Path(launch_path).resolve() + + s = '' + for path in rpaths: + if s: + s += '\n' + s += f'"./exec_eped.sh {path.resolve()}"' + + # Use proper Path object for file operations + batch_file = launch_path / 'submit_eped_array_psfc.batch' + with batch_file.open('r') as f: + content = f.read() + new_content = re.sub('', str(len(rpaths) - 1), content) + new_content = re.sub('', str(maxqueue), new_content) + new_content = re.sub('', str(launch_path), new_content) # Convert to string for substitution + new_content = re.sub('#', s, new_content) + with batch_file.open('w') as f: + f.write(new_content) + + #logger.info('Batch array created') + + return batch_file + + +def postprocess_eped(data, diamagnetic_stab_rule, stability_threshold): + + coords = {k: data[k].values for k in ['dim_height', 'dim_widths', 'dim_nmodes', 'dim_rho', 'dim_three', 'dim_one']} + data = data.assign_coords(coords) + + x = data['eq_betanped'].data + index = np.where(x < 0)[0] + if diamagnetic_stab_rule == 'G': + y = data['gamma'].data.copy() + elif diamagnetic_stab_rule in ['GH', 'HG']: + y = data['gamma_PB'].data.copy() + y *= data['gamma'].data.copy() + elif diamagnetic_stab_rule == 'H': + y = data['gamma_PB'].data.copy() + else: + y = data['gamma'].data.copy() + y[index, :] = np.nan + + data['stability'] = (('dim_height', 'dim_nmodes'), y) + y0 = np.nanmax(y, 1) + y0 = np.where(y0 == None, 0, y0) + indices = np.where(y0 > stability_threshold)[0] + if len(indices): + step = indices[0] + else: + step = -1 + + dims = ('dim_one') + data['stability_rule'] = (dims, [diamagnetic_stab_rule]) + data['stability_threshold'] = (dims, np.array([stability_threshold])) + if step > 0: + data['stability_index'] = (dims, np.array([step])) + data['pped'] = (dims, np.array([data['eq_pped'].data[step] * 1.0e3])) + data['ptop'] = (dims, np.array([data['eq_ptop'].data[step] * 1.0e3])) + data['tped'] = (dims, np.array([data['eq_tped'].data[step]])) + data['ttop'] = (dims, np.array([data['eq_ttop'].data[step]])) + data['wpped'] = (dims, np.array([data['eq_wped_psi'].data[step]])) + data['wptop'] = (dims, np.array([data['eq_wped_psi'].data[step] * 1.5])) + data['wrped'] = (dims, np.array([data['eq_wped_rho'].data[step]])) + if np.any(data['tesep'].data < 0): + data['tesep'] = (dims, np.array([75.0])) + data['nesep'] = 0.25 * data['neped'] + + return data + +def read_eped_file(ipaths): + invars = ['ip', 'bt', 'r' , 'a', 'kappa', 'delta', 'neped', 'betan', 'zeffped', 'nesep', 'tesep'] + data_arrays = [] + for ipath in ipaths: + dummy_coords = { + 'dim_height': np.empty((0, ), dtype=int), + 'dim_nmodes': np.empty((0, ), dtype=int), + 'dim_widths': np.empty((0, ), dtype=int), + 'dim_rho': np.empty((0, ), dtype=int), + 'dim_three': np.empty((0, ), dtype=int), + 'dim_one': np.arange(1), + } + set_inputs = f90nml.read(str(ipath.parent.parent.parent / 'eped.input')) + dummy_vars = {k: (['dim_one'], [v]) for k, v in set_inputs['eped_input'].items() if k in invars} + data = xr.Dataset(coords=dummy_coords, data_vars=dummy_vars) + if ipath.is_file(): + with xr.open_dataset(f'{ipath.resolve()}', engine='netcdf4') as ds: + data = ds.load() + data = postprocess_eped(data, 'G', 0.03) + data_arrays.append(data.expand_dims({'filename': [ipath.parent.parent.parent.name]})) + + dataset = xr.merge(data_arrays, join='outer', fill_value=np.nan).sortby('filename') + return dataset + +def launch_eped_slurm(input_params, scan_params, nscan, output_path, template_path, run_tag, wait=False): + ivars = ['ip', 'bt', 'r', 'a', 'kappa', 'delta', 'neped', 'betan', 'zeffped', 'nesep', 'tesep', 'teped'] + input_params.update(scan_params) + data = {} + for var, val in input_params.items(): + if isinstance(val, (tuple, list, np.ndarray)) and len(val) > 1: + data[var] = np.linspace(val[0], val[1], nscan) + else: + data[var] = np.zeros((nscan, )) + val + #if scan_var == 'qstar': # Use for ip scan + # data['fgped'] = (0.5 / 3.5) * data['qstar'] + inp = pd.DataFrame(data=data, index=pd.RangeIndex(nscan)) + #inp = convert_to_dimensional(inp) + inputs = [{ivar: inp[ivar].iloc[i] for ivar in ivars} for i in range(len(inp))] + run_paths = setup_eped(output_path, inputs, template_path) + spath = setup_array_batch(output_path, run_paths) + inp.to_hdf(output_path / f'{output_path.name}.h5', key='/data') + command = ['sbatch'] + if wait: + command.append('--wait') + command.append(f'{spath.resolve()}') + subprocess.run(command) + + return run_paths + +def main(): + + rootdir = Path(os.environ.get('PIXI_PROJECT_ROOT', './')) + run_tag = 'mitim_eped_test' + base_input_path = Path('./') / 'eped.input' + scan_params = { + # 'tesep': [50.0, 300.0], + } + nscan = 1 + output_path = Path('./') / f'eped_{run_tag}' + template_path = rootdir / 'ips-eped-master' / 'template' / 'engaging' + wait = False + + input_params = f90nml.read(str(base_input_path)).todict().get('eped_input', {}) + + launch_eped_slurm(input_params, scan_params, nscan, output_path, template_path, run_tag, wait=wait) + + +if __name__ == '__main__': + main() diff --git a/src/mitim_tools/eped_tools/scripts/plot_eped.py b/src/mitim_tools/eped_tools/scripts/plot_eped.py new file mode 100644 index 00000000..42ce68ec --- /dev/null +++ b/src/mitim_tools/eped_tools/scripts/plot_eped.py @@ -0,0 +1,27 @@ +import argparse +from mitim_tools.misc_tools import IOtools +from mitim_tools.eped_tools import EPEDtools +from IPython import embed + +def main(): + + parser = argparse.ArgumentParser() + parser.add_argument("folders", type=str, nargs="*") + + args = parser.parse_args() + + folders = [IOtools.expandPath(folder) for folder in args.folders] + + eped = EPEDtools.EPED(folder=None) + + for i, folder in enumerate(folders): + eped.read(subfolder=folder, label=f"run{i}") + + eped.plot(labels=[f"run{i}" for i in range(len(folders))]) + + eped.fn.show() + + embed() + +if __name__ == "__main__": + main() diff --git a/src/mitim_tools/gacode_tools/CGYROtools.py b/src/mitim_tools/gacode_tools/CGYROtools.py index e1f0b7e8..8a52d7a1 100644 --- a/src/mitim_tools/gacode_tools/CGYROtools.py +++ b/src/mitim_tools/gacode_tools/CGYROtools.py @@ -1,50 +1,92 @@ -import os -import shutil -import datetime -import time +import copy from pathlib import Path import numpy as np import matplotlib.pyplot as plt -from mitim_tools.gacode_tools.utils import GACODEdefaults, GACODErun -from mitim_tools.misc_tools import IOtools, GRAPHICStools, FARMINGtools +from mitim_tools import __mitimroot__ +from mitim_tools.gacode_tools.utils import GACODEdefaults, CGYROutils +from mitim_tools.simulation_tools import SIMtools +from mitim_tools.simulation_tools.utils import SIMplot +from mitim_tools.misc_tools import GRAPHICStools, CONFIGread from mitim_tools.gacode_tools.utils import GACODEplotting from mitim_tools.misc_tools.LOGtools import printMsg as print -from pygacode.cgyro.data_plot import cgyrodata_plot -from pygacode import gacodefuncs from IPython import embed +class CGYRO(SIMtools.mitim_simulation, SIMplot.GKplotting): + def __init__( + self, + **kwargs, + ): -class CGYRO: - def __init__(self): + super().__init__(**kwargs) - self.output_files_test = [ - "out.cgyro.equilibrium", - "out.cgyro.info", - "out.cgyro.mpi", - "input.cgyro.gen", - "out.cgyro.egrid", - "out.cgyro.grids", - "out.cgyro.memory", - "out.cgyro.rotation", - ] + def code_call(folder, p, n = 1, nomp = 1, additional_command="", **kwargs): + return f"cgyro -e {folder} -n {n} -nomp {nomp} -p {p} {additional_command}" - self.output_files = [ + def code_slurm_settings(name, minutes, total_cores_required, cores_per_code_call, type_of_submission, array_list=None, **kwargs_slurm): + + slurm_settings = { + "name": name, + "minutes": minutes, + } + + # Gather if this is a GPU enabled machine + machineSettings = CONFIGread.machineSettings(code='cgyro') + + if type_of_submission == "slurm_standard": + + slurm_settings['ntasks'] = total_cores_required // cores_per_code_call + + if machineSettings['gpus_per_node'] > 0: + slurm_settings['gpuspertask'] = cores_per_code_call + else: + slurm_settings['cpuspertask'] = cores_per_code_call + + elif type_of_submission == "slurm_array": + + slurm_settings['ntasks'] = 1 + if machineSettings['gpus_per_node'] > 0: + slurm_settings['gpuspertask'] = cores_per_code_call + else: + slurm_settings['cpuspertask'] = cores_per_code_call + slurm_settings['job_array'] = ",".join(array_list) + + return slurm_settings + + self.run_specifications = { + 'code': 'cgyro', + 'input_file': 'input.cgyro', + 'code_call': code_call, + 'code_slurm_settings': code_slurm_settings, + 'control_function': GACODEdefaults.addCGYROcontrol, + 'controls_file': 'input.cgyro.controls', + 'state_converter': 'to_cgyro', + 'input_class': CGYROinput, + 'complete_variation': None, + 'default_cores': 16, # Default cores to use in the simulation + 'output_class': CGYROutils.CGYROoutput, + } + + print("\n-----------------------------------------------------------------------------------------") + print("\t\t\t CGYRO class module") + print("-----------------------------------------------------------------------------------------\n") + + self.ResultsFiles = self.ResultsFiles_minimal = [ "bin.cgyro.geo", "bin.cgyro.kxky_e", "bin.cgyro.kxky_n", "bin.cgyro.kxky_phi", + "bin.cgyro.kxky_apar", + "bin.cgyro.kxky_bpar", "bin.cgyro.kxky_v", "bin.cgyro.ky_cflux", "bin.cgyro.ky_flux", "bin.cgyro.phib", + "bin.cgyro.aparb", + "bin.cgyro.bparb", "bin.cgyro.restart", - "bin.cgyro.restart.old", "input.cgyro", "input.cgyro.gen", - "input.gacode", "mitim.out", - "mitim_bash.src", - "mitim_shell_executor.sh", "out.cgyro.egrid", "out.cgyro.equilibrium", "out.cgyro.freq", @@ -62,623 +104,1534 @@ def __init__(self): "out.cgyro.version", ] - self.results = {} - - def prep(self, folder, inputgacode_file): - - # Prepare main folder with input.gacode - self.folder = IOtools.expandPath(folder) - - self.folder.mkdir(parents=True, exist_ok=True) + self.output_files_test = [ + "out.cgyro.equilibrium", + "out.cgyro.info", + "out.cgyro.mpi", + "input.cgyro.gen", + "out.cgyro.egrid", + "out.cgyro.grids", + "out.cgyro.memory", + "out.cgyro.rotation", + ] - self.inputgacode_file = self.folder / "input.gacode" - shutil.copy2(IOtools.expandPath(inputgacode_file), self.inputgacode_file) + # Redefine to raise warning + def _run_prepare( + self, + subfolder_simulation, + extraOptions=None, + multipliers=None, + **kwargs, + ): + + # --------------------------------------------- + # Check if any *_SCALE_* variable is being used + # --------------------------------------------- + dictionary_check = {} + if extraOptions is not None: + if multipliers is not None: + dictionary_check = {**extraOptions, **multipliers} + else: + dictionary_check = extraOptions + elif multipliers is not None: + dictionary_check = multipliers + + for key in dictionary_check: + if '_SCALE_' in key: + print(f"The use of *_SCALE_* is discouraged, please use the appropriate variable instead.", typeMsg='q') + + # --------------------------------------------- + + return super()._run_prepare( + subfolder_simulation, + extraOptions=extraOptions, + multipliers=multipliers, + **kwargs, + ) - def run( + # Re-defined to make specific arguments explicit + def read( self, - subFolderCGYRO, - roa=0.55, - CGYROsettings=None, - extraOptions={}, - multipliers={}, - test_run=False, - n=16, - nomp=1, + tmin = 0.0, + minimal = False, + last_tmin_for_linear = True, + **kwargs ): + + super().read( + tmin = tmin, + minimal = minimal, + last_tmin_for_linear = last_tmin_for_linear, + **kwargs) + + # Re-defined to make allowing reading a scan of KY linear runs easily + def read_scan( + self, + label="scan1", + cgyro_linear_scan = False, + **kwargs + ): + + super().read_scan(label=label,**kwargs) + + if cgyro_linear_scan: + self.results[label] = CGYROutils.CGYROlinear_scan(list(self.results.keys()), self.results) + print(f"\t- Created a linear scan object with label {label} from all the read cases", typeMsg='i') + + def read_linear_scan( + self, + folder=None, + preffix="scan", + **kwargs + ): + ''' + Useful utility for when a folder contains subfolders like... scan0, scan1, scan2... with different ky + ''' + + main_label = kwargs.get('label', 'run1') + del kwargs['label'] + + # Get all folders inside "folder" that start with "preffix" + subfolders = [subfolder for subfolder in Path(folder).glob(f"*{preffix}*") if subfolder.is_dir()] + + labels_in_results = [] + if len(subfolders) == 0: + print(f"No subfolders found in {folder} with preffix {preffix}. Reading the folder directly.") + labels_in_results.append(f'{main_label}_KY_scan0') + self.read(label=labels_in_results[-1], folder=folder, **kwargs) + else: + for subfolder in subfolders: + labels_in_results.append(f'{main_label}_KY_{subfolder.name}') + self.read(label=labels_in_results[-1], folder=subfolder, **kwargs) + + # ---------------------------------------------------------- + # Make it a linear scan for the main label + # ---------------------------------------------------------- + labelsD = [] + for label in labels_in_results: + parts = label.split('_') + if len(parts) >= 3 and parts[-2] == "KY": + # Extract the base name (scan1) and middle value (0.3/0.4) + base_name = '_'.join(parts[0:-2]) + labelsD.append(label) + + self.results[main_label] = CGYROutils.CGYROlinear_scan(labelsD, self.results) + + def plot( + self, + labels=[""], + fn=None, + include_2D=True, + common_colorbar=True): + + # If it has radii, we need to correct the labels + self.results_all = copy.deepcopy(self.results) + self.results = {} + labels_with_rho = [] + for label in labels: + for i,rho in enumerate(self.rhos): + labels_with_rho.append(f"{label}_{rho}") + self.results[f'{label}_{rho}'] = self.results_all[label]['output'][i] + labels = labels_with_rho + # ------------------------------------------------ + + if fn is None: + from mitim_tools.misc_tools.GUItools import FigureNotebook + self.fn = FigureNotebook("CGYRO Notebook", geometry="1600x1000") + else: + self.fn = fn - self.folderCGYRO = self.folder / f"{subFolderCGYRO}_{roa:.6f}" + fig = self.fn.add_figure(label="Fluxes (time)") + axsFluxes_t = fig.subplot_mosaic( + """ + AC + BD + """ + ) + fig = self.fn.add_figure(label="Fluxes (ky)") + axsFluxes_ky = fig.subplot_mosaic( + """ + AC + BD + """ + ) + fig = self.fn.add_figure(label="Intensities (time)") + axsIntensities = fig.subplot_mosaic( + """ + ACEG + BDFH + """ + ) + fig = self.fn.add_figure(label="Intensities (ky)") + axsIntensities_ky = fig.subplot_mosaic( + """ + ACEG + BDFH + """ + ) + fig = self.fn.add_figure(label="Intensities (kx)") + axsIntensities_kx = fig.subplot_mosaic( + """ + AC + BD + """ + ) + fig = self.fn.add_figure(label="Cross-phases (ky)") + axsCrossPhases = fig.subplot_mosaic( + """ + ACEG + BDFH + """ + ) + fig = self.fn.add_figure(label="Turbulence (linear)") + axsTurbulence = fig.subplot_mosaic( + """ + AC + BD + """ + ) + + create_ballooning = False + for label in labels: + if 'phi_ballooning' in self.results[label].__dict__: + create_ballooning = True + + if create_ballooning: + + fig = self.fn.add_figure(label="Ballooning") + axsBallooning = fig.subplot_mosaic( + """ + 135 + 246 + """ + ) + else: + axsBallooning = None + + + if include_2D: + axs2D = [] + for i in range(len(labels)): + fig = self.fn.add_figure(label="Turbulence (2D), " + labels[i]) + + mosaic = _2D_mosaic(4) # Plot 4 times by default + + axs2D.append(fig.subplot_mosaic(mosaic)) + + fig = self.fn.add_figure(label="Inputs") + axsInputs = fig.subplot_mosaic( + """ + A + B + """ + ) + + + colors = GRAPHICStools.listColors() + + colorbars_all = [] # Store all colorbars for later use + for j in range(len(labels)): + + self.plot_fluxes( + axs=axsFluxes_t, + label=labels[j], + c=colors[j], + plotLegend=j == len(labels) - 1, + ) + self.plot_fluxes_ky( + axs=axsFluxes_ky, + label=labels[j], + c=colors[j], + plotLegend=j == len(labels) - 1, + ) + self.plot_intensities_ky( + axs=axsIntensities_ky, + label=labels[j], + c=colors[j], + addText=j == len(labels) - 1, + ) + self.plot_intensities( + axs=axsIntensities, + label=labels[j], + c=colors[j], + addText=j == len(labels) - 1, # Add text only for the last label + ) + self.plot_intensities_kx( + axs=axsIntensities_kx, + label=labels[j], + c=colors[j], + addText=j == len(labels) - 1, # Add text only for the last label + ) + self.plot_turbulence( + axs=axsTurbulence, + label=labels[j], + c=colors[j], + ) + self.plot_cross_phases( + axs=axsCrossPhases, + label=labels[j], + c=colors[j], + ) + if create_ballooning: + self.plot_ballooning( + axs=axsBallooning, + label=labels[j], + c=colors[j], + ) + + if include_2D: + + colorbars = self.plot_2D( + axs=axs2D[j], + label=labels[j], + ) + + colorbars_all.append(colorbars) + + self.plot_inputs( + ax=axsInputs["A"], + label=labels[j], + c=colors[j], + ms= 10-j*0.5, # Decrease marker size for each label + normalization_label= labels[0], # Normalize to the first label + only_plot_differences=len(labels) > 1, # Only plot differences if there are multiple labels + ) + + self.plot_inputs( + ax=axsInputs["B"], + label=labels[j], + c=colors[j], + ms= 10-j*0.5, # Decrease marker size for each label + ) + + axsInputs["A"].axhline( + 1.0, + color="k", + ls="--", + lw=2.0 + ) + + GRAPHICStools.adjust_subplots(axs=axsInputs, vertical=0.4, horizontal=0.3) + + # Modify the colorbars to have a common range + if include_2D and common_colorbar and len(colorbars_all) > 0: + for var in ['phi', 'n', 'e']: + min_val = np.inf + max_val = -np.inf + for ilabel in range(len(colorbars_all)): + cb = colorbars_all[ilabel][0][var] + vals = cb.mappable.get_clim() + min_val = min(min_val, vals[0]) + max_val = max(max_val, vals[1]) + + for ilabel in range(len(colorbars_all)): + for it in range(len(colorbars_all[ilabel])): + cb = colorbars_all[ilabel][it][var] + cb.mappable.set_clim(min_val, max_val) + cb.update_ticks() + #cb.set_label(f"{var} (common range)") + + self.results = self.results_all + + def plot_inputs(self, ax = None, label="", c="b", ms = 10, normalization_label=None, only_plot_differences=False): + + if ax is None: + plt.ion() + fig, ax = plt.subplots(1, 1, figsize=(18, 9)) + + rel_tol = 1e-2 + + legadded = False + for i, ikey in enumerate(self.results[label].params1D): + + z = self.results[label].params1D[ikey] + + if normalization_label is not None: + z0 = self.results[normalization_label].params1D[ikey] + zp = z/z0 if z0 != 0 else 0 + label_plot = f"{label} / {normalization_label}" + else: + label_plot = label + zp = z - self.folderCGYRO.mkdir(parents=True, exist_ok=True) + if (not only_plot_differences) or (not np.isclose(z, z0, rtol=rel_tol)): + ax.plot(ikey,zp,'o',markersize=ms,color=c,label=label_plot if not legadded else '') + legadded = True - input_cgyro_file = self.folderCGYRO / "input.cgyro" - inputCGYRO = CGYROinput(file=input_cgyro_file) + if normalization_label is not None: + if only_plot_differences: + ylabel = f"Parameters (DIFFERENT by {rel_tol*100:.2f}%) relative to {normalization_label}" + else: + ylabel = f"Parameters relative to {normalization_label}" + else: + ylabel = "Parameters" - inputgacode_file_this = self.folderCGYRO / "input.gacode" - shutil.copy2(self.inputgacode_file, inputgacode_file_this) + ax.set_xlabel("Parameter") + ax.tick_params(axis='x', rotation=60) + ax.set_ylabel(ylabel) + GRAPHICStools.addDenseAxis(ax) + if legadded: + ax.legend(loc='best') - ResultsFiles_new = [] - for i in self.output_files: - if "mitim.out" not in i: - ResultsFiles_new.append(i) - self.output_files = ResultsFiles_new + def plot_fluxes(self, axs=None, label="", c="b", lw=1, plotLegend=True): + if axs is None: + plt.ion() + fig = plt.figure(figsize=(18, 9)) - inputCGYRO = GACODErun.modifyInputs( - inputCGYRO, - Settings=CGYROsettings, - extraOptions=extraOptions, - multipliers=multipliers, - addControlFunction=GACODEdefaults.addCGYROcontrol, - rmin=roa, - ) + axs = fig.subplot_mosaic( + """ + AB + CD + """ + ) - inputCGYRO.writeCurrentStatus() + ls = GRAPHICStools.listLS() - self.cgyro_job = FARMINGtools.mitim_job(self.folderCGYRO) + # Electron energy flux + ax = axs["A"] + self._plot_trace(ax,label,"Qe",c=c,lw=lw,ls=ls[0],label_plot=f"{label}, Total") + self._plot_trace(ax,label,"Qe_EM",c=c,lw=lw,ls=ls[1],label_plot=f"{label}, EM ($A_\\parallel$+$A_\\perp$)", meanstd=False) + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$Q_e$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron energy flux') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) - name = f'mitim_cgyro_{subFolderCGYRO}_{roa:.6f}{"_test" if test_run else ""}' + # Electron particle flux + ax = axs["B"] + self._plot_trace(ax,label,"Ge",c=c,lw=lw,ls=ls[0],label_plot=f"{label}, Total") + self._plot_trace(ax,label,"Ge_EM",c=c,lw=lw,ls=ls[1],label_plot=f"{label}, EM ($A_\\parallel$+$A_\\perp$)", meanstd=False) + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\Gamma_e$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron particle flux') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) - if test_run: + # Ion energy fluxes + ax = axs["C"] + self._plot_trace(ax,label,"Qi",c=c,lw=lw,ls=ls[0],label_plot=f"{label}, Total") + self._plot_trace(ax,label,"Qi_EM",c=c,lw=lw,ls=ls[1],label_plot=f"{label}, EM ($A_\\parallel$+$A_\\perp$)", meanstd=False) + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$Q_i$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion energy fluxes') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) - self.cgyro_job.define_machine( - "cgyro", - name, - slurm_settings={ - "name": name, - "minutes": 5, - "cpuspertask": 1, - "ntasks": 1, - }, - ) + # Ion species energy fluxes + ax = axs["D"] + for j, i in enumerate(self.results[label].ions_flags): + self._plot_trace(ax,label,self.results[label].Qi_all[j],c=c,lw=lw,ls=ls[j],label_plot=f"{label}, {self.results[label].all_names[i]}", meanstd=False) + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$Q_i$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion energy fluxes (separate species)') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) - CGYROcommand = "cgyro -t ." + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) - else: + def plot_fluxes_ky(self, axs=None, label="", c="b", lw=1, plotLegend=True): + if axs is None: + plt.ion() + fig = plt.figure(figsize=(18, 9)) - self.cgyro_job.define_machine( - "cgyro", - name, - launchSlurm=False, + axs = fig.subplot_mosaic( + """ + AC + BD + """ ) + + ls = GRAPHICStools.listLS() - if self.cgyro_job.launchSlurm: - CGYROcommand = f'gacode_qsub -e . -n {n} -nomp {nomp} -repo {self.cgyro_job.machineSettings["slurm"]["account"]} -queue {self.cgyro_job.machineSettings["slurm"]["partition"]} -w 0:10:00 -s' - else: + # Electron energy flux + ax = axs["A"] + ax.plot(self.results[label].ky, self.results[label].Qe_ky_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].Qe_ky_mean-self.results[label].Qe_ky_std, self.results[label].Qe_ky_mean+self.results[label].Qe_ky_std, color=c, alpha=0.2) - CGYROcommand = f"cgyro -e . -n {n} -nomp {nomp}" + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$Q_e$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron energy flux vs. $k_\\theta\\rho_s$') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) - self.cgyro_job.prep( - CGYROcommand, - input_files=[input_cgyro_file, inputgacode_file_this], - output_files=self.output_files if not test_run else self.output_files_test, - ) + # Electron particle flux + ax = axs["B"] + ax.plot(self.results[label].ky, self.results[label].Ge_ky_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].Ge_ky_mean-self.results[label].Ge_ky_std, self.results[label].Ge_ky_mean+self.results[label].Ge_ky_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\Gamma_e$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron particle flux vs. $k_\\theta\\rho_s$') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) - self.cgyro_job.run( - waitYN=not self.cgyro_job.launchSlurm - ) # ,removeScratchFolders=False) + # Ion energy flux + ax = axs["C"] + ax.plot(self.results[label].ky, self.results[label].Qi_ky_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].Qi_ky_mean-self.results[label].Qi_ky_std, self.results[label].Qi_ky_mean+self.results[label].Qi_ky_std, color=c, alpha=0.2) - def check(self, every_n_minutes=5): + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$Q_i$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion energy fluxes vs. $k_\\theta\\rho_s$') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) - if self.cgyro_job.launchSlurm: - print("- Checker job status") + # Ion species energy fluxes + ax = axs["D"] + for j, i in enumerate(self.results[label].ions_flags): + ax.plot(self.results[label].ky, self.results[label].Qi_all_ky_mean[j],ls[j]+'o', markersize=5, color=c, label=f"{label}, {self.results[label].all_names[i]}") - while True: - self.cgyro_job.check() - print( - f'\t- Current status (as of {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}): {self.cgyro_job.status} ({self.cgyro_job.infoSLURM["STATE"]})' - ) - if self.cgyro_job.status == 2: - break - else: - print(f"\t- Waiting {every_n_minutes} minutes") - time.sleep(every_n_minutes * 60) - else: - print("- Not checking status because this was run command line (not slurm)") + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$Q_i$ (GB)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion energy fluxes vs. $k_\\theta\\rho_s$(separate species)') + if plotLegend: + ax.legend(loc='best', prop={'size': 8},) - print("\t- Job considered finished") + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) - def get(self): - """ - For a job that has been submitted but not waited for, once it is done, get the results - """ + def plot_intensities(self, axs = None, label= "cgyro1", c="b", addText=True): + + if axs is None: + plt.ion() + fig = plt.figure(figsize=(18, 9)) - if self.cgyro_job.launchSlurm: - self.cgyro_job.connect() - self.cgyro_job.retrieve() - self.cgyro_job.close() - else: - print( - "- Not retrieving results because this was run command line (not slurm)" + axs = fig.subplot_mosaic( + """ + ACEG + BDFH + """ ) + + ls = GRAPHICStools.listLS() + + ax = axs["A"] + ax.plot(self.results[label].t, self.results[label].phi_rms_sumnr_sumn*100.0, '-', c=c, lw=2, label=f"{label}") + ax.plot(self.results[label].t, self.results[label].phi_rms_sumnr_n0*100.0, '-.', c=c, lw=0.5, label=f"{label}, $n=0$") + ax.plot(self.results[label].t, self.results[label].phi_rms_sumnr_sumn1*100.0, '--', c=c, lw=0.5, label=f"{label}, $n>0$") + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta \\phi/\\phi_0$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Potential intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}\sum_{n_r}|\delta\phi/\phi_0|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) - # --------------------------------------------------------------------------------------------------------- - # Reading and plotting - # --------------------------------------------------------------------------------------------------------- - - def read(self, label="cgyro1", folder=None): - - folder = IOtools.expandPath(folder) if folder is not None else self.folderCGYRO - - try: - self.results[label] = cgyrodata_plot(folder) - except: - if ( - True - ): # print('- Could not read data, do you want me to try do "cgyro -t" in the folder?',typeMsg='q'): - os.chdir(folder) - os.system("cgyro -t") - self.results[label] = cgyrodata_plot(f"{folder.resolve()}{os.sep}") - - # Extra postprocessing - self.results[label].electron_flag = np.where(self.results[label].z == -1)[0][0] - self.results[label].all_flags = np.arange(0, len(self.results[label].z), 1) - self.results[label].ions_flags = self.results[label].all_flags[ - self.results[label].all_flags != self.results[label].electron_flag - ] - - self.results[label].all_names = [ - f"{gacodefuncs.specmap(self.results[label].mass[i],self.results[label].z[i])}({self.results[label].z[i]},{self.results[label].mass[i]:.1f})" - for i in self.results[label].all_flags - ] - - def plotLS(self, labels=["cgyro1"], fig=None): - colors = GRAPHICStools.listColors() + ax = axs["B"] + if 'apar' in self.results[label].__dict__: + ax.plot(self.results[label].t, self.results[label].apar_rms_sumnr_sumn*100.0, '-', c=c, lw=2, label=f"{label}, $A_\\parallel$") + ax.plot(self.results[label].t, self.results[label].bpar_rms_sumnr_sumn*100.0, '--', c=c, lw=2, label=f"{label}, $B_\\parallel$") + ax.legend(loc='best', prop={'size': 8},) - if fig is None: - # fig = plt.figure(figsize=(15,9)) + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta F_\\parallel/F_{\\parallel,0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('EM potential intensity fluctuations') + - from mitim_tools.misc_tools.GUItools import FigureNotebook + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}\sum_{n_r}|\delta F_\parallel/F_{\parallel,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) - self.fnLS = FigureNotebook( - "Linear CGYRO Notebook", - geometry="1600x1000", - ) - fig1 = self.fnLS.add_figure(label="Linear Stability") - fig2 = self.fnLS.add_figure(label="Ballooning") - - grid = plt.GridSpec(2, 2, hspace=0.3, wspace=0.3) - ax00 = fig1.add_subplot(grid[0, 0]) - ax10 = fig1.add_subplot(grid[1, 0], sharex=ax00) - ax01 = fig1.add_subplot(grid[0, 1]) - ax11 = fig1.add_subplot(grid[1, 1], sharex=ax01) - - K, G, F = [], [], [] - for cont, label in enumerate(self.results): - c = self.results[label] - baseColor = colors[cont] - colorsC, _ = GRAPHICStools.colorTableFade( - len(c.ky), - startcolor=baseColor, - endcolor=baseColor, - alphalims=[1.0, 0.4], - ) - ax = ax00 - for ky in range(len(c.ky)): - ax.plot( - c.t, - c.freq[1, ky, :], - color=colorsC[ky], - label=f"$k_{{\\theta}}\\rho_s={np.abs(c.ky[ky]):.2f}$", - ) - ax = ax10 - for ky in range(len(c.ky)): - ax.plot( - c.t, - c.freq[0, ky, :], - color=colorsC[ky], - label=f"$k_{{\\theta}}\\rho_s={np.abs(c.ky[ky]):.2f}$", - ) + ax = axs["C"] + ax.plot(self.results[label].t, self.results[label].ne_rms_sumnr_sumn*100.0, '-', c=c, lw=2, label=f"{label}") + ax.plot(self.results[label].t, self.results[label].ne_rms_sumnr_n0*100.0, '-.', c=c, lw=0.5, label=f"{label}, $n=0$") + ax.plot(self.results[label].t, self.results[label].ne_rms_sumnr_sumn1*100.0, '--', c=c, lw=0.5, label=f"{label}, $n>0$") + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta n_e/n_{e,0}/n_{e0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron Density intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}\sum_{n_r}|\delta n_e/n_{e,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) - K.append(np.abs(c.ky[0])) - G.append(c.freq[1, 0, -1]) - F.append(c.freq[0, 0, -1]) - - GACODEplotting.plotTGLFspectrum( - [ax01, ax11], - K, - G, - freq=F, - coeff=0.0, - c=colors[0], - ls="-", - lw=1, - label="", - facecolors=colors[: len(K)], - markersize=50, - alpha=1.0, - titles=["Growth Rate", "Real Frequency"], - removeLow=1e-4, - ylabel=True, - ) - ax = ax00 - ax.set_xlabel("Time $(a/c_s)$") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - ax.set_ylabel("$\\gamma$ $(c_s/a)$") - ax.set_title("Growth Rate") - ax.set_xlim(left=0) - ax.legend() - ax = ax10 - ax.set_xlabel("Time $(a/c_s)$") - ax.set_ylabel("$\\omega$ $(c_s/a)$") - ax.set_title("Real Frequency") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - ax.set_xlim(left=0) - ax = ax01 - ax.set_xlim([5e-2, 50.0]) + ax = axs["D"] + ax.plot(self.results[label].t, self.results[label].Te_rms_sumnr_sumn*100.0, '-', c=c, lw=2, label=f"{label}") + ax.plot(self.results[label].t, self.results[label].Te_rms_sumnr_n0*100.0, '-.', c=c, lw=0.5, label=f"{label}, $n=0$") + ax.plot(self.results[label].t, self.results[label].Te_rms_sumnr_sumn1*100.0, '--', c=c, lw=0.5, label=f"{label}, $n>0$") - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - ax00 = fig2.add_subplot(grid[0, 0]) - ax01 = fig2.add_subplot(grid[0, 1], sharex=ax00, sharey=ax00) - ax02 = fig2.add_subplot(grid[0, 2], sharex=ax00, sharey=ax00) - ax10 = fig2.add_subplot(grid[1, 0], sharex=ax00, sharey=ax00) - ax11 = fig2.add_subplot(grid[1, 1], sharex=ax01, sharey=ax00) - ax12 = fig2.add_subplot(grid[1, 2], sharex=ax02, sharey=ax00) + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta T_e/T_{e,0}/T_{e0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron Temperature intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) - it = -1 + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}\sum_{n_r}|\delta T_e/T_{e,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) - for cont, label in enumerate(self.results): - c = self.results[label] - baseColor = colors[cont] - colorsC, _ = GRAPHICStools.colorTableFade( - len(c.ky), - startcolor=baseColor, - endcolor=baseColor, - alphalims=[1.0, 0.4], - ) - ax = ax00 - for ky in range(len(c.ky)): - for var, axs, label in zip( - ["phib", "aparb", "bparb"], - [[ax00, ax10], [ax01, ax11], [ax02, ax12]], - ["phi", "abar", "aper"], - ): - try: - f = c.__dict__[var][0, :, it] + 1j * c.__dict__[var][1, :, it] - y1 = np.real(f) - y2 = np.imag(f) - x = c.thetab / np.pi - - ax = axs[0] - ax.plot( - x, - y1, - color=colorsC[ky], - ls="-", - label=f"$k_{{\\theta}}\\rho_s={np.abs(c.ky[ky]):.2f}$", - ) - ax = axs[1] - ax.plot(x, y2, color=colorsC[ky], ls="-") - except: - pass - - ax = ax00 - ax.set_xlabel("$\\theta/\\pi$") - ax.set_ylabel("Re($\\delta\\phi$)") - ax.set_title("$\\delta\\phi$") - ax.legend(loc="best") - ax.set_xlim([-2 * np.pi, 2 * np.pi]) + ax = axs["E"] + ax.plot(self.results[label].t, self.results[label].ni_rms_sumnr_sumn*100.0, '-', c=c, lw=2, label=f"{label}") + ax.plot(self.results[label].t, self.results[label].ni_rms_sumnr_n0*100.0, '-.', c=c, lw=0.5, label=f"{label}, $n=0$") + ax.plot(self.results[label].t, self.results[label].ni_rms_sumnr_sumn1*100.0, '--', c=c, lw=0.5, label=f"{label}, $n>0$") + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta n_i/n_{i,0}/n_{i0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion Density intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}\sum_{n_r}|\delta n_i/n_{i,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) - ax = ax01 - ax.set_xlabel("$\\theta/\\pi$") - ax.set_ylabel("Re($\\delta A\\parallel$)") - ax.set_title("$\\delta A\\parallel$") - ax = ax02 - ax.set_xlabel("$\\theta/\\pi$") - ax.set_ylabel("Re($\\delta B\\parallel$)") - ax.set_title("$\\delta B\\parallel$") - ax = ax10 - ax.set_xlabel("$\\theta/\\pi$") - ax.set_ylabel("Im($\\delta\\phi$)") - ax = ax11 - ax.set_xlabel("$\\theta/\\pi$") - ax.set_ylabel("Im($\\delta A\\parallel$)") - ax = ax12 - ax.set_xlabel("$\\theta/\\pi$") - ax.set_ylabel("Im($\\delta B\\parallel$)") - for ax in [ax00, ax01, ax02, ax10, ax11, ax12]: - ax.axvline(x=0, lw=0.5, ls="--", c="k") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - def get_flux(self, label="", moment="e", ispec=0, retrieveSpecieFlag=True): - cgyro = self.results[label] - - # Time - usec = cgyro.getflux() - cgyro.getnorm("elec") - t = cgyro.tnorm - - # Flux - ys = np.sum(cgyro.ky_flux, axis=(2, 3)) - if moment == "n": - y = ys[ispec, 0, :] - mtag = r"\Gamma" - elif moment == "e": - y = ys[ispec, 1, :] / cgyro.qc - mtag = r"Q" - elif moment == "v": - y = ys[ispec, 2, :] - mtag = r"\Pi" - elif moment == "s": - y = ys[ispec, 3, :] - mtag = r"S" - - name = gacodefuncs.specmap(cgyro.mass[ispec], cgyro.z[ispec]) - - if retrieveSpecieFlag: - flag = f"${mtag}_{{{name}}}$ (GB)" - else: - flag = f"${mtag}$ (GB)" + ax = axs["F"] + ax.plot(self.results[label].t, self.results[label].Ti_rms_sumnr_sumn*100.0, '-', c=c, lw=2, label=f"{label}") + ax.plot(self.results[label].t, self.results[label].Ti_rms_sumnr_n0*100.0, '-.', c=c, lw=0.5, label=f"{label}, $n=0$") + ax.plot(self.results[label].t, self.results[label].Ti_rms_sumnr_sumn1*100.0, '--', c=c, lw=0.5, label=f"{label}, $n>0$") + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta T_i/T_{i,0}/T_{i0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion Temperature intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}\sum_{n_r}|\delta T_i/T_{i,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + + ax = axs["G"] + for ion in self.results[label].ions_flags: + ax.plot(self.results[label].t, self.results[label].ni_all_rms_sumnr_sumn[ion]*100.0, ls[ion], c=c, lw=1, label=f"{label}, {self.results[label].all_names[ion]}") + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta n_i/n_{i,0}/n_{i0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ions (all) Density intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) + + + ax = axs["H"] + for ion in self.results[label].ions_flags: + ax.plot(self.results[label].t, self.results[label].Ti_all_rms_sumnr_sumn[ion]*100.0, ls[ion], c=c, lw=1, label=f"{label}, {self.results[label].all_names[ion]}") + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\delta T_i/T_{i,0}/n_{i0}$ (%)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ions (all) Temperature intensity fluctuations') + ax.legend(loc='best', prop={'size': 8},) + + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) + + def plot_intensities_ky(self, axs=None, label="", c="b", addText=True): + if axs is None: + plt.ion() + fig = plt.figure(figsize=(18, 9)) - return t, y, flag + axs = fig.subplot_mosaic( + """ + ACEG + BDFH + """ + ) + + ls = GRAPHICStools.listLS() - def plot_flux( - self, - ax=None, - label="", - moment="e", - ispecs=[0], - labelPlot="", - c="b", - lw=1, - tmax=None, - dense=True, - ls="-", - ): - if ax is None: + # Potential intensity + ax = axs["A"] + ax.plot(self.results[label].ky, self.results[label].phi_rms_sumnr_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].phi_rms_sumnr_mean-self.results[label].phi_rms_sumnr_std, self.results[label].phi_rms_sumnr_mean+self.results[label].phi_rms_sumnr_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel(r"$\delta\phi/\phi_0$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Potential intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n_r}|\delta\phi/\phi_0|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + # EM potential intensity + ax = axs["B"] + if 'apar' in self.results[label].__dict__: + ax.plot(self.results[label].ky, self.results[label].apar_rms_sumnr_mean, '-o', markersize=5, color=c, label=label+', $A_\\parallel$ (mean)') + ax.fill_between(self.results[label].ky, self.results[label].apar_rms_sumnr_mean-self.results[label].apar_rms_sumnr_std, self.results[label].apar_rms_sumnr_mean+self.results[label].apar_rms_sumnr_std, color=c, alpha=0.2) + ax.plot(self.results[label].ky, self.results[label].bpar_rms_sumnr_mean, '--', markersize=5, color=c, label=label+', $B_\\parallel$ (mean)') + ax.fill_between(self.results[label].ky, self.results[label].bpar_rms_sumnr_mean-self.results[label].bpar_rms_sumnr_std, self.results[label].bpar_rms_sumnr_mean+self.results[label].bpar_rms_sumnr_std, color=c, alpha=0.2) + + ax.legend(loc='best', prop={'size': 8},) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel(r"$\delta F_\parallel/F_{\parallel,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('EM potential intensity vs. $k_\\theta\\rho_s$') + + ax.axhline(0.0, color='k', ls='--', lw=1) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n_r}|\delta F_\parallel/F_{\parallel,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + + # Electron particle intensity + ax = axs["C"] + ax.plot(self.results[label].ky, self.results[label].ne_rms_sumnr_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].ne_rms_sumnr_mean-self.results[label].ne_rms_sumnr_std, self.results[label].ne_rms_sumnr_mean+self.results[label].ne_rms_sumnr_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\delta n_e/n_{e,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron particle intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n_r}|\delta n_e/n_{e,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + # Electron temperature intensity + ax = axs["D"] + ax.plot(self.results[label].ky, self.results[label].Te_rms_sumnr_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].Te_rms_sumnr_mean-self.results[label].Te_rms_sumnr_std, self.results[label].Te_rms_sumnr_mean+self.results[label].Te_rms_sumnr_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\delta T_e/T_{e,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron temperature intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n_r}|\delta T_e/T_{e,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + + # Ion particle intensity + ax = axs["E"] + ax.plot(self.results[label].ky, self.results[label].ni_rms_sumnr_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].ni_rms_sumnr_mean-self.results[label].ni_rms_sumnr_std, self.results[label].ni_rms_sumnr_mean+self.results[label].ni_rms_sumnr_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\delta n_i/n_{i,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion particle intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n_r}|\delta n_i/n_{i,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + # Ion temperature intensity + ax = axs["F"] + ax.plot(self.results[label].ky, self.results[label].Ti_rms_sumnr_mean, '-o', markersize=5, color=c, label=label+' (mean)') + ax.fill_between(self.results[label].ky, self.results[label].Ti_rms_sumnr_mean-self.results[label].Ti_rms_sumnr_std, self.results[label].Ti_rms_sumnr_mean+self.results[label].Ti_rms_sumnr_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\delta T_i/T_{i,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ion temperature intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n_r}|\delta T_i/T_{i,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + + # Ion particle intensity + ax = axs["G"] + for ion in self.results[label].ions_flags: + ax.plot(self.results[label].ky, self.results[label].ni_all_rms_sumnr_mean[ion], ls[ion]+'o', markersize=5, color=c, label=f"{label}, {self.results[label].all_names[ion]} (mean)") + + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\delta n_i/n_{i,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ions (all) particle intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + + # Ion temperature intensity + ax = axs["H"] + for ion in self.results[label].ions_flags: + ax.plot(self.results[label].ky, self.results[label].Ti_all_rms_sumnr_mean[ion], ls[ion]+'o', markersize=5, color=c, label=f"{label}, {self.results[label].all_names[ion]} (mean)") + + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\delta T_i/T_{i,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Ions (all) temperature intensity vs. $k_\\theta\\rho_s$') + ax.legend(loc='best', prop={'size': 8},) + ax.axhline(0.0, color='k', ls='--', lw=1) + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) + + def plot_intensities_kx(self, axs=None, label="", c="b", addText=True): + if axs is None: plt.ion() - fig, ax = plt.subplots() - - for i, ispec in enumerate(ispecs): - t, y0, yl = self.get_flux( - label=label, - moment=moment, - ispec=ispec, - retrieveSpecieFlag=len(ispecs) == 1, + fig = plt.figure(figsize=(18, 9)) + + axs = fig.subplot_mosaic( + """ + AC + BD + """ ) - if i == 0: - y = y0 - else: - y += y0 + # Potential intensity + ax = axs["A"] + ax.plot(self.results[label].kx, self.results[label].phi_rms_sumn_mean, '-o', markersize=1.0, lw=1.0, color=c, label=label+' (mean)') + ax.plot(self.results[label].kx, self.results[label].phi_rms_n0_mean, '-.', markersize=0.5, lw=0.5, color=c, label=label+', $n=0$ (mean)') + ax.plot(self.results[label].kx, self.results[label].phi_rms_sumn1_mean, '--', markersize=0.5, lw=0.5, color=c, label=label+', $n>0$ (mean)') + + ax.set_xlabel("$k_{x}$") + ax.set_ylabel("$\\delta \\phi/\\phi_0$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Potential intensity vs kx') + ax.legend(loc='best', prop={'size': 8},) + ax.set_yscale('log') + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}|\delta\phi/\phi_0|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + # EM potential intensity + ax = axs["C"] + if 'apar' in self.results[label].__dict__: + ax.plot(self.results[label].kx, self.results[label].apar_rms_sumn_mean, '-o', markersize=1.0, lw=1.0, color=c, label=label+', $A_\\parallel$ (mean)') + ax.plot(self.results[label].kx, self.results[label].bpar_rms_sumn_mean, '--', markersize=1.0, lw=1.0, color=c, label=label+', $B_\\parallel$ (mean)') - if tmax is not None: - it = np.argmin(np.abs(t - tmax)) - t = t[: it + 1] - y = y[: it + 1] + ax.legend(loc='best', prop={'size': 8},) - ax.plot(t, y, ls=ls, lw=lw, c=c, label=labelPlot) - ax.set_xlabel("$t$ ($a/c_s$)") - # ax.set_xlim(left=0) - ax.set_ylabel(yl) + ax.set_xlabel("$k_{x}$") + ax.set_ylabel("$\\delta F_\\parallel/F_{\\parallel,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('EM potential intensity vs kx') + ax.set_yscale('log') - if dense: - GRAPHICStools.addDenseAxis(ax) + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}|\delta F_\parallel/F_{\parallel,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) - def plot_fluxes(self, axs=None, label="", c="b", lw=1, plotLegend=True): + + # Electron particle intensity + ax = axs["B"] + ax.plot(self.results[label].kx, self.results[label].ne_rms_sumn_mean, '-o', markersize=1.0, lw=1.0, color=c, label=label+' (mean)') + ax.plot(self.results[label].kx, self.results[label].ne_rms_n0_mean, '-.', markersize=0.5, lw=0.5, color=c, label=label+', $n=0$ (mean)') + ax.plot(self.results[label].kx, self.results[label].ne_rms_sumn1_mean, '--', markersize=0.5, lw=0.5, color=c, label=label+', $n>0$ (mean)') + + ax.set_xlabel("$k_{x}$") + ax.set_ylabel("$\\delta n_e/n_{e,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron particle intensity vs kx') + ax.legend(loc='best', prop={'size': 8},) + ax.set_yscale('log') + + # Add mathematical definitions text + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}|\delta n_e/n_{e,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + # Electron temperature intensity + ax = axs["D"] + ax.plot(self.results[label].kx, self.results[label].Te_rms_sumn_mean, '-o', markersize=1.0, lw=1.0, color=c, label=label+' (mean)') + ax.plot(self.results[label].kx, self.results[label].Te_rms_n0_mean, '-.', markersize=0.5, lw=0.5, color=c, label=label+', $n=0$ (mean)') + ax.plot(self.results[label].kx, self.results[label].Te_rms_sumn1_mean, '--', markersize=0.5, lw=0.5, color=c, label=label+', $n>0$ (mean)') + + ax.set_xlabel("$k_{x}$") + ax.set_ylabel("$\\delta T_e/T_{e,0}$") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Electron temperature intensity vs kx') + ax.legend(loc='best', prop={'size': 8},) + ax.set_yscale('log') + + if addText: + ax.text(0.02, 0.95, + r'$\sqrt{\langle\sum_{n}|\delta T_e/T_{e,0}|^2\rangle}$', + transform=ax.transAxes, + fontsize=12, + verticalalignment='top', + bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8)) + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) + + def plot_turbulence(self, axs = None, label= "cgyro1", c="b", kys = None): + if axs is None: plt.ion() fig = plt.figure(figsize=(18, 9)) axs = fig.subplot_mosaic( """ - ABC - DEF - """ + AC + BD + """ ) - ls = GRAPHICStools.listLS() + # Is no kys provided, select just 3: first, last and middle + if kys is None: + ikys = [0] + if len(self.results[label].ky) > 1: + ikys.append(-1) + if len(self.results[label].ky) > 2: + ikys.append(len(self.results[label].ky) // 2) + + ikys = np.unique(ikys) + else: + ikys = [self.results[label].ky.index(ky) for ky in kys if ky in self.results[label].ky] - # Electron + # Growth rate as function of time ax = axs["A"] - self.plot_flux( - ax=ax, - label=label, - moment="e", - ispecs=[self.results[label].electron_flag], - labelPlot=label, - c=c, - lw=lw, - ) - ax.set_title("Electron energy flux") - + for i,ky in enumerate(ikys): + self._plot_trace( + ax, + label, + self.results[label].g[ky, :], + c=c, + ls = GRAPHICStools.listLS()[i], + lw=1, + label_plot=f"$k_{{\\theta}}\\rho_s={np.abs(self.results[label].ky[ky]):.2f}$", + var_meanstd = [self.results[label].g_mean[ky], self.results[label].g_std[ky]], + ) + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\gamma$ (norm.)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Growth rate vs time') + ax.legend(loc='best', prop={'size': 8},) + + # Frequency as function of time ax = axs["B"] - self.plot_flux( - ax=ax, - label=label, - moment="e", - ispecs=self.results[label].ions_flags, - labelPlot=f"{label}, sum", - c=c, - lw=lw, - ls=ls[0], - ) - for j, i in enumerate(self.results[label].ions_flags): - self.plot_flux( - ax=ax, - label=label, - moment="e", - ispecs=[i], - labelPlot=f"{label}, {self.results[label].all_names[i]}", + for i,ky in enumerate(ikys): + self._plot_trace( + ax, + label, + self.results[label].f[ky, :], c=c, - lw=lw / 2, - ls=ls[j + 1], + ls = GRAPHICStools.listLS()[i], + lw=1, + label_plot=f"$k_{{\\theta}}\\rho_s={np.abs(self.results[label].ky[ky]):.2f}$", + var_meanstd = [self.results[label].f_mean[ky], self.results[label].f_std[ky]], ) - ax.set_title(f"Ion energy fluxes") - - # Ion + + ax.set_xlabel("$t$ ($a/c_s$)"); #ax.set_xlim(left=0.0) + ax.set_ylabel("$\\omega$ (norm.)") + GRAPHICStools.addDenseAxis(ax) + ax.set_title('Real Frequency vs time') + ax.legend(loc='best', prop={'size': 8},) + + # Mean+Std Growth rate as function of ky + ax = axs["C"] + ax.errorbar(self.results[label].ky, self.results[label].g_mean, yerr=self.results[label].g_std, fmt='-o', markersize=5, color=c, label=label+' (mean+std)') + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\gamma$ (norm.)") + ax.set_title('Saturated Growth Rate') + GRAPHICStools.addDenseAxis(ax) + ax.legend(loc='best', prop={'size': 8},) + + # Mean+Std Frequency as function of ky ax = axs["D"] - self.plot_flux( - ax=ax, - label=label, - moment="n", - ispecs=[self.results[label].electron_flag], - labelPlot=label, - c=c, - lw=lw, - ) - ax.set_title("Electron particle flux") + ax.errorbar(self.results[label].ky, self.results[label].f_mean, yerr=self.results[label].f_std, fmt='-o', markersize=5, color=c, label=label+' (mean+std)') + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\omega$ (norm.)") + ax.set_title('Saturated Real Frequency') + GRAPHICStools.addDenseAxis(ax) + ax.legend(loc='best', prop={'size': 8},) + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) - ax = axs["E"] - for j, i in enumerate(self.results[label].ions_flags): - self.plot_flux( - ax=ax, - label=label, - moment="n", - ispecs=[i], - labelPlot=f"{label}, {self.results[label].all_names[i]}", - c=c, - lw=lw / 2, - ls=ls[j + 1], + def plot_cross_phases(self, axs = None, label= "cgyro1", c="b"): + + if axs is None: + plt.ion() + fig = plt.figure(figsize=(18, 9)) + + axs = fig.subplot_mosaic( + """ + ACEG + BDFH + """ ) - self.plot_flux( - ax=ax, - label=label, - moment="n", - ispecs=self.results[label].ions_flags, - labelPlot=f"{label}, sum", - c=c, - lw=lw, - ls=ls[0], - ) - ax.set_title("Ion particle fluxes") + + ls = GRAPHICStools.listLS() + m = GRAPHICStools.listmarkers() + + ax = axs["A"] + ax.plot(self.results[label].ky, self.results[label].neTe_kx0_mean, '-o', c=c, lw=2, label=f"{label} (mean)") + ax.fill_between(self.results[label].ky, self.results[label].neTe_kx0_mean-self.results[label].neTe_kx0_std, self.results[label].neTe_kx0_mean+self.results[label].neTe_kx0_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$n_e-T_e$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$n_e-T_e$ cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + + + ax = axs["B"] + ax.plot(self.results[label].ky, self.results[label].niTi_kx0_mean, '-o', c=c, lw=2, label=f"{label} (mean)") + ax.fill_between(self.results[label].ky, self.results[label].niTi_kx0_mean-self.results[label].niTi_kx0_std, self.results[label].niTi_kx0_mean+self.results[label].niTi_kx0_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$n_i-T_i$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$n_i-T_i$ cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) - # Extra ax = axs["C"] - for j, i in enumerate(self.results[label].all_flags): - self.plot_flux( - ax=ax, - label=label, - moment="v", - ispecs=[i], - labelPlot=f"{label}, {self.results[label].all_names[i]}", - c=c, - lw=lw / 2, - ls=ls[j + 1], - ) - self.plot_flux( - ax=ax, - label=label, - moment="v", - ispecs=self.results[label].all_flags, - labelPlot=f"{label}, sum", - c=c, - lw=lw, - ls=ls[0], - ) - ax.set_title("Momentum flux") + ax.plot(self.results[label].ky, self.results[label].phine_kx0_mean, '-o', c=c, lw=2, label=f"{label} (mean)") + ax.fill_between(self.results[label].ky, self.results[label].phine_kx0_mean-self.results[label].phine_kx0_std, self.results[label].phine_kx0_mean+self.results[label].phine_kx0_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\phi-n_e$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$\\phi-n_e$ cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + + ax = axs["D"] + ax.plot(self.results[label].ky, self.results[label].phini_kx0_mean, '-o', c=c, lw=2, label=f"{label} (mean)") + ax.fill_between(self.results[label].ky, self.results[label].phini_kx0_mean-self.results[label].phini_kx0_std, self.results[label].phini_kx0_mean+self.results[label].phini_kx0_std, color=c, alpha=0.2) + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\phi-n_i$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$\\phi-n_i$ cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + + + ax = axs["E"] + ax.plot(self.results[label].ky, self.results[label].phiTe_kx0_mean, '-o', c=c, lw=2, label=f"{label} (mean)") + ax.fill_between(self.results[label].ky, self.results[label].phiTe_kx0_mean-self.results[label].phiTe_kx0_std, self.results[label].phiTe_kx0_mean+self.results[label].phiTe_kx0_std, color=c, alpha=0.2) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\phi-T_e$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$\\phi-T_e$ cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + ax = axs["F"] - try: - for j, i in enumerate(self.results[label].all_flags): - self.plot_flux( - ax=ax, - label=label, - moment="s", - ispecs=[self.results[label].electron_flag], - labelPlot=f"{label}, {self.results[label].all_names[i]}", - c=c, - lw=lw, - ) - worked = True - except: - print("Could not plot energy exchange", typeMsg="w") - worked = False - ax.set_title("Electron energy exchange") + ax.plot(self.results[label].ky, self.results[label].phiTi_kx0_mean, '-o', c=c, lw=2, label=f"{label} (mean)") + ax.fill_between(self.results[label].ky, self.results[label].phiTi_kx0_mean-self.results[label].phiTi_kx0_std, self.results[label].phiTi_kx0_mean+self.results[label].phiTi_kx0_std, color=c, alpha=0.2) + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\phi-T_i$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$\\phi-T_i$ cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + + + ax = axs["G"] + for ion in self.results[label].ions_flags: + ax.plot(self.results[label].ky, self.results[label].phiTi_all_kx0_mean[ion], ls[ion]+m[ion], c=c, lw=1, label=f"{label}, {self.results[label].all_names[ion]} (mean)", markersize=4) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\phi-T_i$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$\\phi-T_i$ (all) cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + + + ax = axs["H"] + for ion in self.results[label].ions_flags: + ax.plot(self.results[label].ky, self.results[label].phini_all_kx0_mean[ion], ls[ion]+m[ion], c=c, lw=1, label=f"{label}, {self.results[label].all_names[ion]} (mean)", markersize=4) + + ax.set_xlabel("$k_{\\theta} \\rho_s$") + ax.set_ylabel("$\\phi-n_i$ cross-phase (degrees)"); ax.set_ylim([-180, 180]) + GRAPHICStools.addDenseAxis(ax) + ax.axhline(0.0, color='k', ls='--', lw=1) + ax.set_title('$\\phi-n_i$ (all) cross-phase ($k_x=0$)') + ax.legend(loc='best', prop={'size': 8},) + + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) + + def plot_ballooning(self, time = None, label="cgyro1", c="b", axs=None): + + if axs is None: + plt.ion() + fig = plt.figure(figsize=(18, 9)) - plt.subplots_adjust() - if plotLegend: - for n in ["B", "E", "C"]: - GRAPHICStools.addLegendApart( - axs[n], - ratio=0.7, - withleg=True, - extraPad=0, - size=7, - loc="upper left", - ) - for n in ["F"]: - GRAPHICStools.addLegendApart( - axs[n], - ratio=0.7, - withleg=worked, - extraPad=0, - size=7, - loc="upper left", + axs = fig.subplot_mosaic( + """ + 135 + 246 + """ + ) + + if time is None: + time = np.min([self.results[label].tmin, self.results[label].tmax_fluct]) + + it = np.argmin(np.abs(self.results[label].t - time)) + + colorsC, _ = GRAPHICStools.colorTableFade( + len(self.results[label].ky), + startcolor=c, + endcolor=c, + alphalims=[1.0, 0.4], + ) + + ax = axs['1'] + for ky in range(len(self.results[label].ky)): + for var, axsT in zip( + ["phi_ballooning", "apar_ballooning", "bpar_ballooning"], + [[axs['1'], axs['2']], [axs['3'], axs['4']], [axs['5'], axs['6']]], + ): + + f = self.results[label].__dict__[var][:, it] + y1 = np.real(f) + y2 = np.imag(f) + x = self.results[label].theta_ballooning / np.pi + + # Normalize + y1_max = np.max(np.abs(y1)) + y2_max = np.max(np.abs(y2)) + y1 /= y1_max + y2 /= y2_max + + ax = axsT[0] + ax.plot( + x, + y1, + color=colorsC[ky], + ls="-", + label=f"$k_{{\\theta}}\\rho_s={np.abs( self.results[label].ky[ky]):.2f}$ (max {y1_max:.2e})", ) - for n in ["A", "D"]: - GRAPHICStools.addLegendApart( - axs[n], - ratio=0.7, - withleg=False, - extraPad=0, - size=7, - loc="upper left", + ax = axsT[1] + ax.plot( + x, + y2, + color=colorsC[ky], + ls="-", + label=f"$k_{{\\theta}}\\rho_s={np.abs( self.results[label].ky[ky]):.2f}$ (max {y2_max:.2e})", ) - def plot(self, labels=[""]): - from mitim_tools.misc_tools.GUItools import FigureNotebook - self.fn = FigureNotebook("CGYRO Notebook", geometry="1600x1000") + ax = axs['1'] + ax.set_xlabel("$\\theta/\\pi$ (normalized to maximum)") + ax.set_ylabel("Re($\\delta\\phi$)") + ax.set_title("$\\delta\\phi$") + ax.legend(loc="best", prop={"size": 8}) + GRAPHICStools.addDenseAxis(ax) - colors = GRAPHICStools.listColors() + ax.set_xlim([-2 * np.pi, 2 * np.pi]) - fig = self.fn.add_figure(label="Fluxes Time Traces") - axsFluxes_t = fig.subplot_mosaic( - """ - ABC - DEF - """ - ) + ax = axs['3'] + ax.set_xlabel("$\\theta/\\pi$ (normalized to maximum)") + ax.set_ylabel("Re($\\delta A\\parallel$)") + ax.set_title("$\\delta A\\parallel$") + ax.legend(loc="best", prop={"size": 8}) + GRAPHICStools.addDenseAxis(ax) - for j in range(len(labels)): - self.plot_fluxes( - axs=axsFluxes_t, - label=labels[j], - c=colors[j], - plotLegend=j == len(labels) - 1, - ) + ax = axs['5'] + ax.set_xlabel("$\\theta/\\pi$ (normalized to maximum)") + ax.set_ylabel("Re($\\delta B\\parallel$)") + ax.set_title("$\\delta B\\parallel$") + ax.legend(loc="best", prop={"size": 8}) + GRAPHICStools.addDenseAxis(ax) + ax = axs['2'] + ax.set_xlabel("$\\theta/\\pi$") + ax.set_ylabel("Im($\\delta\\phi$)") + ax.legend(loc="best", prop={"size": 8}) + GRAPHICStools.addDenseAxis(ax) -class CGYROinput: - def __init__(self, file=None): - self.file = IOtools.expandPath(file) if isinstance(file, (str, Path)) else None + ax = axs['4'] + ax.set_xlabel("$\\theta/\\pi$") + ax.set_ylabel("Im($\\delta A\\parallel$)") + ax.legend(loc="best", prop={"size": 8}) + GRAPHICStools.addDenseAxis(ax) - if self.file is not None and self.file.exists(): - with open(self.file, "r") as f: - lines = f.readlines() - self.file_txt = "".join(lines) - else: - self.file_txt = "" + ax = axs['6'] + ax.set_xlabel("$\\theta/\\pi$") + ax.set_ylabel("Im($\\delta B\\parallel$)") + ax.legend(loc="best", prop={"size": 8}) + GRAPHICStools.addDenseAxis(ax) - self.controls = GACODErun.buildDictFromInput(self.file_txt) - def writeCurrentStatus(self, file=None): - print("\t- Writting CGYRO input file") + for ax in [axs['1'], axs['3'], axs['5'], axs['2'], axs['4'], axs['6']]: + ax.axvline(x=0, lw=0.5, ls="--", c="k") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.3, horizontal=0.3) + + def plot_2D(self, label="cgyro1", axs=None, times = None): + + if times is None: + times = [] + + number_times = len(axs)//3 if axs is not None else 4 + + try: + times = [self.results[label].t[-1-i*10] for i in range(number_times)] + except IndexError: + times = [self.results[label].t[-1-i*1] for i in range(number_times)] - if file is None: - file = self.file + if axs is None: - with open(file, "w") as f: - f.write( - "#-------------------------------------------------------------------------\n" - ) - f.write( - "# CGYRO input file modified by MITIM framework (Rodriguez-Fernandez, 2020)\n" - ) - f.write( - "#-------------------------------------------------------------------------" + mosaic = _2D_mosaic(len(times)) + + plt.ion() + fig = plt.figure(figsize=(18, 9)) + axs = fig.subplot_mosaic(mosaic) + + # Pre-calculate global min/max for each field type across all times + phi_values = [] + n_values = [] + e_values = [] + + for time in times: + it = np.argmin(np.abs(self.results[label].t - time)) + + # Get phi values + xp, yp, fp = self._to_real_space(label=label, variable = 'kxky_phi', it = it) + phi_values.append(fp) + + # Get n values + xp, yp, fp = self._to_real_space(label=label, variable = 'kxky_n',species = self.results[label].electron_flag, it = it) + n_values.append(fp) + + # Get e values + xp, yp, fp = self._to_real_space(label=label, variable = 'kxky_e',species = self.results[label].electron_flag, it = it) + e_values.append(fp) + + # Calculate global ranges + phi_max = np.max([np.max(np.abs(fp)) for fp in phi_values]) + phi_min, phi_max = -phi_max, +phi_max + + n_max = np.max([np.max(np.abs(fp)) for fp in n_values]) + n_min, n_max = -n_max, +n_max + + e_max = np.max([np.max(np.abs(fp)) for fp in e_values]) + e_min, e_max = -e_max, +e_max + + colorbars = [] # Store colorbar references + # Now plot with consistent colorbar ranges + for time_i, time in enumerate(times): + + print(f"\t- Plotting 2D turbulence for {label} at time {time}") + + it = np.argmin(np.abs(self.results[label].t - time)) + + cfig = axs[str(time_i+1)].get_figure() + + # Phi plot + ax = axs[str(time_i+1)] + xp, yp, fp = self._to_real_space(label=label, variable = 'kxky_phi', it = it) + + cs1 = ax.contourf(xp,yp,np.transpose(fp),levels=np.arange(phi_min,phi_max,(phi_max-phi_min)/256),cmap=plt.get_cmap('jet')) + cphi = cfig.colorbar(cs1, ax=ax) + + ax.set_xlabel("$x/\\rho_s$") + ax.set_ylabel("$y/\\rho_s$") + ax.set_title(f"$\\delta\\phi/\\phi_0$ (t={self.results[label].t[it]} $a/c_s$)") + ax.set_aspect('equal') + + # N plot + ax = axs[str(time_i+1+len(times))] + xp, yp, fp = self._to_real_space(label=label, variable = 'kxky_n',species = self.results[label].electron_flag, it = it) + + cs2 = ax.contourf(xp,yp,np.transpose(fp),levels=np.arange(n_min,n_max,(n_max-n_min)/256),cmap=plt.get_cmap('jet')) + cn = cfig.colorbar(cs2, ax=ax) + + ax.set_xlabel("$x/\\rho_s$") + ax.set_ylabel("$y/\\rho_s$") + ax.set_title(f"$\\delta n_e/n_{{e,0}}$ (t={self.results[label].t[it]} $a/c_s$)") + ax.set_aspect('equal') + + # E plot + ax = axs[str(time_i+1+len(times)*2)] + xp, yp, fp = self._to_real_space(label=label, variable = 'kxky_e',species = self.results[label].electron_flag, it = it) + + cs3 = ax.contourf(xp,yp,np.transpose(fp),levels=np.arange(e_min,e_max,(e_max-e_min)/256),cmap=plt.get_cmap('jet')) + ce = cfig.colorbar(cs3, ax=ax) + + ax.set_xlabel("$x/\\rho_s$") + ax.set_ylabel("$y/\\rho_s$") + ax.set_title(f"$\\delta E_e/E_{{e,0}}$ (t={self.results[label].t[it]} $a/c_s$)") + ax.set_aspect('equal') + + # Store the colorbar objects with their associated contour plots + colorbars.append({ + 'phi': cphi, + 'n': cn, + 'e': ce + }) + + GRAPHICStools.adjust_subplots(axs=axs, vertical=0.4, horizontal=0.3) + + return colorbars + + def _to_real_space(self, variable = 'kxky_phi', species = None, label="cgyro1", theta_plot = 0, it = -1): + + # from pygacode + def maptoreal_fft(nr,nn,nx,ny,c): + + d = np.zeros([nx,nn],dtype=complex) + for i in range(nr): + p = i-nr//2 + if -p < 0: + k = -p+nx + else: + k = -p + d[k,0:nn] = np.conj(c[i,0:nn]) + f = np.fft.irfft2(d,s=[nx,ny],norm='forward')*0.5 + + # Correct for half-sum + f = 2*f + + return f + + # Real space + nr = self.results[label].cgyrodata.n_radial + nn = self.results[label].cgyrodata.n_n + craw = self.results[label].cgyrodata.__dict__[variable] + + itheta = np.argmin(np.abs(self.results[label].theta_stored-theta_plot)) + if species is None: + c = craw[:,itheta,:,it] + else: + c = craw[:,itheta,species,:,it] + + nx = self.results[label].cgyrodata.__dict__[variable].shape[0] + ny = nx + + # Arrays + x = np.arange(nx)*2*np.pi/nx + y = np.arange(ny)*2*np.pi/ny + f = maptoreal_fft(nr,nn,nx,ny,c) + + # Physical maxima + ky1 = self.results[label].cgyrodata.ky[1] if len(self.results[label].cgyrodata.ky) > 1 else self.results[label].cgyrodata.ky[0] + xmax = self.results[label].cgyrodata.length + ymax = (2*np.pi)/np.abs(ky1) + xp = x/(2*np.pi)*xmax + yp = y/(2*np.pi)*ymax + + # Periodic extensions + xp = np.append(xp,xmax) + yp = np.append(yp,ymax) + fp = np.zeros([nx+1,ny+1]) + fp[0:nx,0:ny] = f[:,:] + fp[-1,:] = fp[0,:] + fp[:,-1] = fp[:,0] + + return xp, yp, fp + + def plot_quick_linear(self, labels=["cgyro1"], fig=None): + + colors = GRAPHICStools.listColors() + + if fig is None: + fig = plt.figure(figsize=(15,9)) + + axs = fig.subplot_mosaic( + """ + 12 + 34 + """ + ) + + def _plot_linear_stability(axs, labels, label_base,col_lin ='b', start_cont=0): + + for cont, label in enumerate(labels): + c = self.results[label]['output'][0] + baseColor = colors[cont+start_cont+1] + colorsC, _ = GRAPHICStools.colorTableFade( + len(c.ky), + startcolor=baseColor, + endcolor=baseColor, + alphalims=[1.0, 0.4], + ) + + ax = axs['1'] + for ky in range(len(c.ky)): + ax.plot( + c.t, + c.g[ky,:], + color=colorsC[ky], + label=f"$k_{{\\theta}}\\rho_s={np.abs(c.ky[ky]):.2f}$", + ) + + ax = axs['2'] + for ky in range(len(c.ky)): + ax.plot( + c.t, + c.f[ky,:], + color=colorsC[ky], + label=f"$k_{{\\theta}}\\rho_s={np.abs(c.ky[ky]):.2f}$", + ) + + GACODEplotting.plotTGLFspectrum( + [axs['3'], axs['4']], + self.results[label_base].ky, + self.results[label_base].g_mean, + freq=self.results[label_base].f_mean, + coeff=0.0, + c=col_lin, + ls="-", + lw=1, + label="", + facecolors=colors, + markersize=50, + alpha=1.0, + titles=["Growth Rate", "Real Frequency"], + removeLow=1e-4, + ylabel=True, ) + + return cont + + co = -1 + for i,label0 in enumerate(labels): + co = _plot_linear_stability(axs, self.results[label0].labels, label0, start_cont=co, col_lin=colors[i]) + + ax = axs['1'] + ax.set_xlabel("Time $(a/c_s)$") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + ax.set_ylabel("$\\gamma$ $(c_s/a)$") + ax.set_title("Growth Rate") + ax.set_xlim(left=0) + ax.legend(loc='best', prop={'size': 8},) + + ax = axs['2'] + ax.set_xlabel("Time $(a/c_s)$") + ax.set_ylabel("$\\omega$ $(c_s/a)$") + ax.set_title("Real Frequency") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + ax.set_xlim(left=0) + + for ax in [axs['1'], axs['2'], axs['3'], axs['4']]: + GRAPHICStools.addDenseAxis(ax) + + plt.tight_layout() + +class CGYROinput(SIMtools.GACODEinput): + def __init__(self, file=None): + super().__init__( + file=file, + controls_file= __mitimroot__ / "templates" / "input.cgyro.controls", + code="CGYRO", + n_species='N_SPECIES', + ) - f.write("\n\n# Control parameters\n") - f.write("# ------------------\n\n") - for ikey in self.controls: - var = self.controls[ikey] - f.write(f"{ikey.ljust(23)} = {var}\n") +def _2D_mosaic(n_times): + + num_cols = n_times + + # Create the mosaic layout dynamically + mosaic = [] + counter = 1 + for _ in range(3): + row = [] + for _ in range(num_cols): + row.append(str(counter)) + counter += 1 + mosaic.append(row) + + return mosaic \ No newline at end of file diff --git a/src/mitim_tools/gacode_tools/NEOtools.py b/src/mitim_tools/gacode_tools/NEOtools.py index d09d4ed6..ce9dfbce 100644 --- a/src/mitim_tools/gacode_tools/NEOtools.py +++ b/src/mitim_tools/gacode_tools/NEOtools.py @@ -1,19 +1,204 @@ -from mitim_tools.misc_tools import IOtools -from mitim_tools.gacode_tools import PROFILEStools -from mitim_tools.gacode_tools.utils import GACODErun +import numpy as np +import matplotlib.pyplot as plt +from mitim_tools.misc_tools import GRAPHICStools, IOtools, GUItools +from mitim_tools.gacode_tools.utils import GACODErun, GACODEdefaults +from mitim_tools.simulation_tools import SIMtools from mitim_tools.misc_tools.LOGtools import printMsg as print +from mitim_tools import __mitimroot__ from IPython import embed +class NEO(SIMtools.mitim_simulation): + def __init__( + self, + rhos=[0.4, 0.6], # rho locations of interest + ): + + super().__init__(rhos=rhos) -class NEO: - def __init__(self): - pass + def code_call(folder, n, p, additional_command="", **kwargs): + return f"neo -e {folder} -n {n} -p {p} {additional_command}" + + def code_slurm_settings(name, minutes, total_cores_required, cores_per_code_call, type_of_submission, array_list=None, **kwargs_slurm): + + slurm_settings = { + "name": name, + "minutes": minutes, + 'job_array_limit': None, # Limit to this number at most running jobs at the same time? + } + + if type_of_submission == "slurm_standard": + + slurm_settings['ntasks'] = total_cores_required + slurm_settings['cpuspertask'] = cores_per_code_call + + elif type_of_submission == "slurm_array": + + slurm_settings['ntasks'] = 1 + slurm_settings['cpuspertask'] = cores_per_code_call + slurm_settings['job_array'] = ",".join(array_list) + + return slurm_settings + + self.run_specifications = { + 'code': 'neo', + 'input_file': 'input.neo', + 'code_call': code_call, + 'code_slurm_settings': code_slurm_settings, + 'control_function': GACODEdefaults.addNEOcontrol, + 'controls_file': 'input.neo.controls', + 'state_converter': 'to_neo', + 'input_class': NEOinput, + 'complete_variation': None, + 'default_cores': 1, # Default cores to use in the simulation + 'output_class': NEOoutput, + } + + print("\n-----------------------------------------------------------------------------------------") + print("\t\t\t NEO class module") + print("-----------------------------------------------------------------------------------------\n") + + self.ResultsFiles = self.ResultsFiles_minimal = ['out.neo.transport_flux'] + + def plot( + self, + fn=None, + labels=["neo1"], + extratitle="", + fn_color=None, + colors=None, + ): + + if fn is None: + self.fn = GUItools.FigureNotebook("NEO MITIM Notebook", geometry="1700x900", vertical=True) + else: + self.fn = fn + + fig1 = self.fn.add_figure(label=f"{extratitle}Summary", tab_color=fn_color) + + grid = plt.GridSpec(1, 3, hspace=0.7, wspace=0.2) + + if colors is None: + colors = GRAPHICStools.listColors() + + axQe = fig1.add_subplot(grid[0, 0]) + axQi = fig1.add_subplot(grid[0, 1]) + axGe = fig1.add_subplot(grid[0, 2]) + + for i,label in enumerate(labels): + roa, QeGB, QiGB, GeGB = [], [], [], [] + for irho in range(len(self.rhos)): + roa.append(self.results[label]['output'][irho].roa) + QeGB.append(self.results[label]['output'][irho].Qe) + QiGB.append(self.results[label]['output'][irho].Qi) + GeGB.append(self.results[label]['output'][irho].Ge) + + axQe.plot(roa, QeGB, label=label, color=colors[i], marker='o', linestyle='-') + axQi.plot(roa, QiGB, label=label, color=colors[i], marker='o', linestyle='-') + axGe.plot(roa, GeGB, label=label, color=colors[i], marker='o', linestyle='-') + + for ax in [axQe, axQi, axGe]: + ax.set_xlabel("$r/a$"); ax.set_xlim([0,1]) + GRAPHICStools.addDenseAxis(ax) + ax.legend(loc="best") + + axQe.set_ylabel("$Q_e$ ($MW/m^2$)"); axQe.set_yscale('log') + axQi.set_ylabel("$Q_i$ ($MW/m^2$)"); axQi.set_yscale('log') + axGe.set_ylabel("$\\Gamma_e$ ($1E20/s/m^2$)"); #axGe.set_yscale('log') + + + def read_scan( + self, + label="scan1", + subfolder=None, + variable="RLTS_1", + positionIon=2 + ): + + output_object = "output" + + variable_mapping = { + 'scanned_variable': ["parsed", variable, None], + 'Qe_gb': [output_object, 'Qe', None], + 'Qi_gb': [output_object, 'Qi', None], + 'Ge_gb': [output_object, 'Ge', None], + 'Gi_gb': [output_object, 'GiAll', positionIon - 2], + 'Mt_gb': [output_object, 'Mt', None], + } + + variable_mapping_unn = { + 'Qe': [output_object, 'Qe_unn', None], + 'Qi': [output_object, 'Qi_unn', None], + 'Ge': [output_object, 'Ge_unn', None], + 'Gi': [output_object, 'GiAll_unn', positionIon - 2], + 'Mt': [output_object, 'Mt_unn', None], + } + + super().read_scan( + label=label, + subfolder=subfolder, + variable=variable, + positionIon=positionIon, + variable_mapping=variable_mapping, + variable_mapping_unn=variable_mapping_unn + ) + + def plot_scan( + self, + fn=None, + labels=["neo1"], + extratitle="", + fn_color=None, + colors=None, + ): + + if fn is None: + self.fn = GUItools.FigureNotebook("NEO Scan Notebook", geometry="1700x900", vertical=True) + else: + self.fn = fn + + fig1 = self.fn.add_figure(label=f"{extratitle}Summary", tab_color=fn_color) + + grid = plt.GridSpec(1, 3, hspace=0.7, wspace=0.2) + + if colors is None: + colors = GRAPHICStools.listColors() + + axQe = fig1.add_subplot(grid[0, 0]) + axQi = fig1.add_subplot(grid[0, 1]) + axGe = fig1.add_subplot(grid[0, 2]) + + cont = 0 + for label in labels: + for irho in range(len(self.rhos)): + + x = self.scans[label]['scanned_variable'][irho] + + axQe.plot(x, self.scans[label]['Qe'][irho], label=f'{label}, {self.rhos[irho]}', color=colors[cont], marker='o', linestyle='-') + axQi.plot(x, self.scans[label]['Qi'][irho], label=f'{label}, {self.rhos[irho]}', color=colors[cont], marker='o', linestyle='-') + axGe.plot(x, self.scans[label]['Ge'][irho], label=f'{label}, {self.rhos[irho]}', color=colors[cont], marker='o', linestyle='-') + + cont += 1 + + for ax in [axQe, axQi, axGe]: + ax.set_xlabel("Scanned variable") + GRAPHICStools.addDenseAxis(ax) + ax.legend(loc="best") + + axQe.set_ylabel("$Q_e$ ($MW/m^2$)"); + axQi.set_ylabel("$Q_i$ ($MW/m^2$)"); + axGe.set_ylabel("$\\Gamma_e$ ($1E20/s/m^2$)") + + plt.tight_layout() + + + + # def prep(self, inputgacode, folder): + # self.inputgacode = inputgacode + # self.folder = IOtools.expandPath(folder) + + # self.folder.mkdir(parents=True, exist_ok=True) - def prep(self, inputgacode, folder): - self.inputgacode = inputgacode - self.folder = IOtools.expandPath(folder) - self.folder.mkdir(parents=True, exist_ok=True) def run_vgen(self, subfolder="vgen1", vgenOptions={}, cold_start=False): @@ -42,15 +227,12 @@ def run_vgen(self, subfolder="vgen1", vgenOptions={}, cold_start=False): ) if (not runThisCase) and cold_start: - runThisCase = print( - "\t- Files found in folder, but cold_start requested. Are you sure?", - typeMsg="q", - ) + runThisCase = print("\t- Files found in folder, but cold_start requested. Are you sure?",typeMsg="q",) if runThisCase: IOtools.askNewFolder(self.folder_vgen, force=True) - self.inputgacode.writeCurrentStatus(file=(self.folder_vgen / f"input.gacode")) + self.inputgacode.write_state(file=(self.folder_vgen / f"input.gacode")) # ---- Run @@ -59,17 +241,13 @@ def run_vgen(self, subfolder="vgen1", vgenOptions={}, cold_start=False): self.folder_vgen, vgenOptions=vgenOptions, name_run=subfolder ) else: - print( - f"\t- Required files found in {subfolder}, not running VGEN", - typeMsg="i", - ) + print(f"\t- Required files found in {subfolder}, not running VGEN",typeMsg="i",) file_new = self.folder_vgen / f"vgen" / f"input.gacode" # ---- Postprocess - self.inputgacode_vgen = PROFILEStools.PROFILES_GACODE( - file_new, calculateDerived=True, mi_ref=self.inputgacode.mi_ref - ) + from mitim_tools.gacode_tools import PROFILEStools + self.inputgacode_vgen = PROFILEStools.gacode_state(file_new, derive_quantities=True, mi_ref=self.inputgacode.mi_ref) def check_if_files_exist(folder, list_files): @@ -83,3 +261,111 @@ def check_if_files_exist(folder, list_files): return False return True + +class NEOinput(SIMtools.GACODEinput): + def __init__(self, file=None): + super().__init__( + file=file, + controls_file= __mitimroot__ / "templates" / "input.neo.controls", + code='NEO', + n_species='N_SPECIES' + ) + +class NEOoutput(SIMtools.GACODEoutput): + def __init__(self, FolderGACODE, suffix="", **kwargs): + super().__init__() + + self.FolderGACODE, self.suffix = FolderGACODE, suffix + + if suffix == "": + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} without suffix") + else: + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} with suffix {suffix}") + + self.inputclass = NEOinput(file=self.FolderGACODE / f"input.neo{self.suffix}") + + self.read() + + def read(self): + + with open(self.FolderGACODE / ("out.neo.transport_flux" + self.suffix), "r") as f: + lines = f.readlines() + + for i in range(len(lines)): + if '# Z pflux_tgyro eflux_tgyro mflux_tgyro' in lines[i]: + # Found the header line, now process the data + break + + Z, G, Q, M = [], [], [], [] + for i in range(i+2, len(lines)): + line = lines[i] + Z.append(float(line.split()[0])) + G.append(float(line.split()[1])) + Q.append(float(line.split()[2])) + M.append(float(line.split()[3])) + Z = np.array(Z) + G = np.array(G) + Q = np.array(Q) + M = np.array(M) + + + # Find electron line (Z= -1) + ie = int(np.where(Z == -1)[0][0]) + + self.Ge = G[ie] + self.Qe = Q[ie] + self.Me = M[ie] + + self.GiAll = np.delete(G, ie) + self.QiAll = np.delete(Q, ie) + self.MiAll = np.delete(M, ie) + + self.Qi = self.QiAll.sum() + self.Mt = self.Me + self.MiAll.sum() + + self.roa = float(lines[0].split()[-1]) + + # ------------------------------------------------------------------------ + # Input file + # ------------------------------------------------------------------------ + + with open(self.FolderGACODE / ("input.neo" + self.suffix), "r") as fi: + lines = fi.readlines() + self.inputFile = "".join(lines) + + + def unnormalize(self, normalization, rho=None): + + if normalization is not None: + rho_x = normalization["rho"] + roa_x = normalization["roa"] + q_gb = normalization["q_gb"] + g_gb = normalization["g_gb"] + pi_gb = normalization["pi_gb"] + s_gb = normalization["s_gb"] + rho_s = normalization["rho_s"] + a = normalization["rmin"][-1] + + # ------------------------------------ + # Usage of normalization quantities + # ------------------------------------ + + if rho is None: + ir = np.argmin(np.abs(roa_x - self.roa)) + rho_eval = rho_x[ir] + else: + ir = np.argmin(np.abs(rho_x - rho)) + rho_eval = rho + + self.Qe_unn = self.Qe * q_gb[ir] + self.Qi_unn = self.Qi * q_gb[ir] + self.QiAll_unn = self.QiAll * q_gb[ir] + self.Ge_unn = self.Ge * g_gb[ir] + self.GiAll_unn = self.GiAll * g_gb[ir] + self.MiAll_unn = self.MiAll * g_gb[ir] + self.Mt_unn = self.Mt * s_gb[ir] + + self.unnormalization_successful = True + + else: + self.unnormalization_successful = False diff --git a/src/mitim_tools/gacode_tools/PROFILEStools.py b/src/mitim_tools/gacode_tools/PROFILEStools.py index c5aea661..90d80427 100644 --- a/src/mitim_tools/gacode_tools/PROFILEStools.py +++ b/src/mitim_tools/gacode_tools/PROFILEStools.py @@ -1,235 +1,113 @@ import copy -import torch -import csv import numpy as np import matplotlib.pyplot as plt from collections import OrderedDict -from mitim_tools.misc_tools import GRAPHICStools, MATHtools, PLASMAtools, IOtools -from mitim_modules.powertorch.physics import GEOMETRYtools, CALCtools +from mitim_tools.plasmastate_tools import MITIMstate from mitim_tools.gs_tools import GEQtools -from mitim_tools.gacode_tools import NEOtools -from mitim_tools.gacode_tools.utils import GACODEdefaults -from mitim_tools.transp_tools import CDFtools -from mitim_tools.transp_tools.utils import TRANSPhelpers -from mitim_tools.gacode_tools.utils import PORTALSinteraction +from mitim_tools.misc_tools import MATHtools, IOtools, GRAPHICStools from mitim_tools.misc_tools.LOGtools import printMsg as print -from mitim_tools import __version__ from IPython import embed -# ------------------------------------------------------------------------------------- -# input.gacode -# ------------------------------------------------------------------------------------- +class gacode_state(MITIMstate.mitim_state): + ''' + Class to read and manipulate GACODE profiles files (input.gacode). + It inherits from the main MITIMstate class, which provides basic + functionality for plasma state management. + The class reads the GACODE profiles file, extracts relevant data, + and writes them in the way that MITIMstate class expects. + ''' -class PROFILES_GACODE: - def __init__(self, file, calculateDerived=True, mi_ref=None): - """ - Depending on resolution, derived can be expensive, so I mmay not do it every time - """ - - self.titles_singleNum = ["nexp", "nion", "shot", "name", "type", "time"] - self.titles_singleArr = [ - "masse", - "mass", - "ze", - "z", - "torfluxa(Wb/radian)", - "rcentr(m)", - "bcentr(T)", - "current(MA)", - ] - self.titles_single = self.titles_singleNum + self.titles_singleArr + # ************************************************************************************************************************************************ + # Reading and interpreting input.gacode files + # ************************************************************************************************************************************************ - self.file = file + def __init__(self, file, derive_quantities=True, mi_ref=None): - if self.file is not None: - with open(self.file, "r") as f: - self.lines = f.readlines() + # Initialize the base class and tell it the type of file + super().__init__(type_file='input.gacode') - # Read file and store raw data - self.readHeader() - self.readProfiles() + # Read the input file and store the raw data + self.files = [file] + + self.titles_singleNum = ["nexp", "nion", "shot", "name", "type", "time"] + self.titles_singleArr = ["masse","mass","ze","z","torfluxa(Wb/radian)","rcentr(m)","bcentr(T)","current(MA)"] + self.titles_single = self.titles_singleNum + self.titles_singleArr + + if self.files[0] is not None: - # Process - self.process(mi_ref=mi_ref, calculateDerived=calculateDerived) + self._read_inputgacocde() + + # Derive (Depending on resolution, derived can be expensive, so I mmay not do it every time) + self.derive_quantities(mi_ref=mi_ref, derive_quantities=derive_quantities) - def process(self, mi_ref=None, calculateDerived=True): - """ - Perform MITIM derivations (can be expensive, only if requested) - Note: One can force what mi_ref to use (in a.m.u.). This is because, by default, MITIM - will use the mass of the first thermal ion to produce quantities such as Q_GB, rho_s, etc. - However, in some ocasions (like when running TGLF), the normalization that must be used - for those quantities is a fixed one (e.g. Deuterium) - """ + @IOtools.hook_method(after=MITIMstate.ensure_variables_existence) + def _read_inputgacocde(self): - # Calculate necessary quantities + with open(self.files[0], "r") as f: + self.lines = f.readlines() - if "qpar_beam(MW/m^3)" in self.profiles: - self.varqpar, self.varqpar2 = "qpar_beam(MW/m^3)", "qpar_wall(MW/m^3)" - else: - self.varqpar, self.varqpar2 = "qpar_beam(1/m^3/s)", "qpar_wall(1/m^3/s)" + # Read file and store raw data + self._read_header() + self._read_profiles() + # Ensure correctness (wrong names in older input.gacode files) if "qmom(Nm)" in self.profiles: - self.varqmom = "qmom(Nm)" # Old, wrong one. But Candy fixed it as of 02/24/2023 - else: - self.varqmom = "qmom(N/m^2)" # CORRECT ONE - - # ------------------------------------------------------------------------------------------------------------------- - # Insert zeros in those cases whose column are not there - # ------------------------------------------------------------------------------------------------------------------- - - some_times_are_not_here = [ - "qei(MW/m^3)", - "qohme(MW/m^3)", - "johm(MA/m^2)", - "jbs(MA/m^2)", - "jbstor(MA/m^2)", - "w0(rad/s)", - "ptot(Pa)", # e.g. if I haven't written that info from ASTRA - "zeta(-)", # e.g. if TGYRO is run with zeta=0, it won't write this column in .new - "zmag(m)", - self.varqpar, - self.varqpar2, - "shape_cos0(-)", - self.varqmom, - ] - - num_moments = 6 # This is the max number of moments I'll be considering. If I don't have that many (usually there are 5 or 3), it'll be populated with zeros - for i in range(num_moments): - some_times_are_not_here.append(f"shape_cos{i + 1}(-)") - if i > 1: - some_times_are_not_here.append(f"shape_sin{i + 1}(-)") - - for ikey in some_times_are_not_here: - if ikey not in self.profiles.keys(): - self.profiles[ikey] = copy.deepcopy(self.profiles["rmin(m)"]) * 0.0 - - self.deriveQuantities(mi_ref=mi_ref, calculateDerived=calculateDerived) - - def deriveQuantities(self, mi_ref=None, calculateDerived=True, n_theta_geo=1001, rederiveGeometry=True): - - # ------------------------------------------------------------------------------------------------------------------- - self.readSpecies() - self.produce_shape_lists() - self.mi_first = self.Species[0]["A"] - self.DTplasma() - self.sumFast() - # ------------------------------------- + self.profiles["qmom(N/m^2)"] = self.profiles.pop("qmom(Nm)") + if "qpar_beam(MW/m^3)" in self.profiles: + self.profiles["qpar_beam(1/m^3/s)"] = self.profiles.pop("qpar_beam(MW/m^3)") + if "qpar_wall(MW/m^3)" in self.profiles: + self.profiles["qpar_wall(1/m^3/s)"] = self.profiles.pop("qpar_wall(MW/m^3)") + + """ + Note that in prgen_map_plasmastate, that variable: + expro_qpar_beam(i) = plst_sn_trans(i-1)/dvol - if "derived" not in self.__dict__: - self.derived = {} + Note that in prgen_read_plasmastate, that variable: + ! Particle source + err = nf90_inq_varid(ncid,trim('sn_trans'),varid) + err = nf90_get_var(ncid,varid,plst_sn_trans(1:nx-1)) + plst_sn_trans(nx) = 0.0 - if mi_ref is not None: - self.derived["mi_ref"] = mi_ref - print(f"\t* Reference mass ({self.derived['mi_ref']:.2f}) to use was forced by class initialization",typeMsg="w") - else: - self.derived["mi_ref"] = self.mi_first - print(f"\t* Reference mass ({self.derived['mi_ref']}) from first ion",typeMsg="i") + Note that in the plasmastate file, the variable "sn_trans": - # Useful to have gradients in the basic ---------------------------------------------------------- - self.derived["aLTe"] = aLT(self.profiles["rmin(m)"], self.profiles["te(keV)"]) - self.derived["aLne"] = aLT( - self.profiles["rmin(m)"], self.profiles["ne(10^19/m^3)"] - ) + long_name: particle transport (loss) + units: #/sec + component: PLASMA + section: STATE_PROFILES + specification: R|units=#/sec|step*dV sn_trans(~nrho,0:nspec_th) - self.derived["aLTi"] = self.profiles["ti(keV)"] * 0.0 - self.derived["aLni"] = [] - for i in range(self.profiles["ti(keV)"].shape[1]): - self.derived["aLTi"][:, i] = aLT( - self.profiles["rmin(m)"], self.profiles["ti(keV)"][:, i] - ) - self.derived["aLni"].append( - aLT(self.profiles["rmin(m)"], self.profiles["ni(10^19/m^3)"][:, i]) - ) - self.derived["aLni"] = np.transpose(np.array(self.derived["aLni"])) - # ------------------------------------------------------------------------------------------------ - - if calculateDerived: - self.deriveQuantities_full(rederiveGeometry=rederiveGeometry) - - # ------------------------------------------------------------------------------------- - # Method to write a scratch file - # ------------------------------------------------------------------------------------- - - @classmethod - def scratch(cls, profiles, label_header='', **kwargs_process): - instance = cls(None) - - # Header - instance.header = f''' -# Created from scratch with MITIM version {__version__} -# {label_header} -# -''' - # Add data to profiles - instance.profiles = profiles - - instance.process(**kwargs_process) - - return instance - - # ------------------------------------------------------------------------------------- - - def calculate_Er( - self, - folder, - rhos=None, - vgenOptions={}, - name="vgen1", - includeAll=False, - write_new_file=None, - cold_start=False, - ): - profiles = copy.deepcopy(self) - - # Resolution? - resol_changed = False - if rhos is not None: - profiles.changeResolution(rho_new=rhos) - resol_changed = True - - self.neo = NEOtools.NEO() - self.neo.prep(profiles, folder) - self.neo.run_vgen(subfolder=name, vgenOptions=vgenOptions, cold_start=cold_start) - - profiles_new = copy.deepcopy(self.neo.inputgacode_vgen) - if resol_changed: - profiles_new.changeResolution(rho_new=self.profiles["rho(-)"]) - - # Get the information from the NEO run - - variables = ["w0(rad/s)"] - if includeAll: - variables += [ - "vpol(m/s)", - "vtor(m/s)", - "jbs(MA/m^2)", - "jbstor(MA/m^2)", - "johm(MA/m^2)", - ] - - for ikey in variables: - if ikey in profiles_new.profiles: - print( - f'\t- Inserting {ikey} from NEO run{" (went back to original resolution by interpolation)" if resol_changed else ""}' - ) - self.profiles[ikey] = profiles_new.profiles[ikey] + So, this means that expro_qpar_beam is in units of #/sec/m^3, meaning that + it is a particle flux DENSITY. It therefore requires volume integral and + divide by surface to produce a flux. - self.deriveQuantities() + The units of this qpar_beam column is NOT MW/m^3. In the gacode source codes + they also say that those units are wrong. - if write_new_file is not None: - self.writeCurrentStatus(file=write_new_file) + """ + + self._ensure_shaping_coeffs() - # ***************** + def _ensure_shaping_coeffs(self): + + # Ensure that we also have the shape coefficients + num_moments = 7 # This is the max number of moments I'll be considering. If I don't have that many (usually there are 5 or 3), it'll be populated with zeros + if "shape_cos0(-)" not in self.profiles: + self.profiles["shape_cos0(-)"] = np.ones(self.profiles["rmaj(m)"].shape) + for i in range(num_moments): + if f"shape_cos{i + 1}(-)" not in self.profiles: + self.profiles[f"shape_cos{i + 1}(-)"] = np.zeros(self.profiles["rmaj(m)"].shape) + if f"shape_sin{i + 1}(-)" not in self.profiles and i > 1: + self.profiles[f"shape_sin{i + 1}(-)"] = np.zeros(self.profiles["rmaj(m)"].shape) - def readHeader(self): + def _read_header(self): for i in range(len(self.lines)): if "# nexp" in self.lines[i]: istartProfs = i self.header = self.lines[:istartProfs] - def readProfiles(self): - singleLine, title, var = None, None, None # for ruff complaints + def _read_profiles(self): + singleLine, title, var = None, None, None # --- found = False @@ -267,10 +145,7 @@ def readProfiles(self): """ Sometimes there's a bug in TGYRO, where the powers may be too low (E-191) that cannot be properly written """ - varT = [ - float(j) if (j[-4].upper() == "E" or "." in j) else 0.0 - for j in var0[1:] - ] + varT = [float(j) if (j[-4].upper() == "E" or "." in j) else 0.0for j in var0[1:]] var.append(varT) @@ -287,8 +162,26 @@ def readProfiles(self): self.profiles["w0(rad/s)"] = self.profiles["omega0(rad/s)"] del self.profiles["omega0(rad/s)"] + # ************************************************************************************************************************************************ + # Derivation (different from MITIMstate) + # ************************************************************************************************************************************************ + + def derive_quantities(self, **kwargs): + + if "derived" not in self.__dict__: + self.derived = {} + + self._produce_shape_lists() + + # Define the minor radius used in all calculations (could be the half-width of the midplance intersect, or an effective minor radius) + self.derived["r"] = self.profiles["rmin(m)"] - def produce_shape_lists(self): + super().derive_quantities_base(**kwargs) + + def _produce_shape_lists(self): + + self._ensure_shaping_coeffs() + self.shape_cos = [ self.profiles["shape_cos0(-)"], # tilt self.profiles["shape_cos1(-)"], @@ -308,4081 +201,693 @@ def produce_shape_lists(self): self.profiles["shape_sin6(-)"], ] - def readSpecies(self, maxSpecies=100): - maxSpecies = int(self.profiles["nion"][0]) - - Species = [] - for j in range(maxSpecies): - # To determine later if this specie has zero density - niT = self.profiles["ni(10^19/m^3)"][0, j] - - sp = { - "N": self.profiles["name"][j], - "Z": float(self.profiles["z"][j]), - "A": float(self.profiles["mass"][j]), - "S": self.profiles["type"][j].split("[")[-1].split("]")[0], - "n0": niT, - } - - Species.append(sp) - - self.Species = Species - - def sumFast(self): - self.nFast = self.profiles["ne(10^19/m^3)"] * 0.0 - self.nZFast = self.profiles["ne(10^19/m^3)"] * 0.0 - self.nThermal = self.profiles["ne(10^19/m^3)"] * 0.0 - self.nZThermal = self.profiles["ne(10^19/m^3)"] * 0.0 - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "fast": - self.nFast += self.profiles["ni(10^19/m^3)"][:, sp] - self.nZFast += ( - self.profiles["ni(10^19/m^3)"][:, sp] * self.profiles["z"][sp] - ) - else: - self.nThermal += self.profiles["ni(10^19/m^3)"][:, sp] - self.nZThermal += ( - self.profiles["ni(10^19/m^3)"][:, sp] * self.profiles["z"][sp] - ) - - def deriveQuantities_full(self, mi_ref=None, n_theta_geo=1001, rederiveGeometry=True): - """ - deriving geometry is expensive, so if I'm just updating profiles it may not be needed - """ - - self.varqmom = "qmom(N/m^2)" - if self.varqmom not in self.profiles: - self.profiles[self.varqmom] = self.profiles["rho(-)"] * 0.0 + def derive_geometry(self, n_theta_geo=1001, **kwargs): - if "derived" not in self.__dict__: - self.derived = {} + self._produce_shape_lists() - # --------------------------------------------------------------------------------------------------------------------- - # --------- MAIN (useful for STATEtools) - # --------------------------------------------------------------------------------------------------------------------- - - self.derived["a"] = self.profiles["rmin(m)"][-1] - # self.derived['epsX'] = self.profiles['rmaj(m)'] / self.profiles['rmin(m)'] - # self.derived['eps'] = self.derived['epsX'][-1] - self.derived["eps"] = self.profiles["rmin(m)"][-1] / self.profiles["rmaj(m)"][-1] - - self.derived["roa"] = self.profiles["rmin(m)"] / self.derived["a"] - self.derived["Rmajoa"] = self.profiles["rmaj(m)"] / self.derived["a"] - self.derived["Zmagoa"] = self.profiles["zmag(m)"] / self.derived["a"] - - self.derived["torflux"] = ( - float(self.profiles["torfluxa(Wb/radian)"][0]) - * 2 - * np.pi - * self.profiles["rho(-)"] ** 2 - ) # Wb - self.derived["B_unit"] = PLASMAtools.Bunit( - self.derived["torflux"], self.profiles["rmin(m)"] - ) + ( + self.derived["volp_geo"], + self.derived["surf_geo"], + self.derived["gradr_geo"], + self.derived["bp2_geo"], + self.derived["bt2_geo"], + self.derived["bt_geo"], + ) = calculateGeometricFactors(self,n_theta=n_theta_geo) + + # Calculate flux surfaces + cn = np.array(self.shape_cos).T + sn = copy.deepcopy(self.shape_sin) + sn[0] = self.profiles["rmaj(m)"]*0.0 + sn[1] = np.arcsin(self.profiles["delta(-)"]) + sn[2] = -self.profiles["zeta(-)"] + sn = np.array(sn).T + flux_surfaces = GEQtools.mitim_flux_surfaces() + flux_surfaces.reconstruct_from_mxh_moments( + self.profiles["rmaj(m)"], + self.profiles["rmin(m)"], + self.profiles["kappa(-)"], + self.profiles["zmag(m)"], + cn, + sn) + self.derived["R_surface"],self.derived["Z_surface"] = np.array([flux_surfaces.R]), np.array([flux_surfaces.Z]) + + # R and Z have [toroidal, radius, point], to allow for non-axisymmetric cases + # ----------------------------------------------- - self.derived["psi_pol_n"] = ( - self.profiles["polflux(Wb/radian)"] - self.profiles["polflux(Wb/radian)"][0] - ) / ( - self.profiles["polflux(Wb/radian)"][-1] - - self.profiles["polflux(Wb/radian)"][0] - ) - self.derived["rho_pol"] = self.derived["psi_pol_n"] ** 0.5 + #cross-sectional area of each flux surface + self.derived["surfXS"] = xsec_area_RZ(self.derived["R_surface"][0,...],self.derived["Z_surface"][0,...]) - self.derived["q95"] = np.interp( - 0.95, self.derived["psi_pol_n"], self.profiles["q(-)"] - ) + self.derived["R_LF"] = self.derived["R_surface"][0,...].max(axis=-1) # self.profiles['rmaj(m)'][0]+self.profiles['rmin(m)'] - self.derived["q0"] = self.profiles["q(-)"][0] + # For Synchrotron + self.derived["B_ref"] = np.abs(self.derived["B_unit"] * self.derived["bt_geo"]) - if self.profiles["q(-)"].min() > 1.0: - self.derived["rho_saw"] = np.nan - else: - self.derived["rho_saw"] = np.interp( - 1.0, self.profiles["q(-)"], self.profiles["rho(-)"] - ) + """ + surf_geo is truly surface area, but because of the GACODE definitions of flux, + Surf = V' <|grad r|> + Surf_GACODE = V' + """ + self.derived["surfGACODE_geo"] = (self.derived["surf_geo"] / self.derived["gradr_geo"]) + self.derived["surfGACODE_geo"][np.isnan(self.derived["surfGACODE_geo"])] = 0 - # --------- Geometry (only if it doesn't exist or if I ask to recalculate) - if rederiveGeometry or ("volp_miller" not in self.derived): + self.derived["kappa95"] = np.interp(0.95, self.derived["psi_pol_n"], self.profiles["kappa(-)"]) - self.produce_shape_lists() + self.derived["kappa995"] = np.interp(0.995, self.derived["psi_pol_n"], self.profiles["kappa(-)"]) - ( - self.derived["volp_miller"], - self.derived["surf_miller"], - self.derived["gradr_miller"], - self.derived["bp2_miller"], - self.derived["bt2_miller"], - self.derived["geo_bt"], - ) = GEOMETRYtools.calculateGeometricFactors( - self, - n_theta=n_theta_geo, - ) + self.derived["delta95"] = np.interp(0.95, self.derived["psi_pol_n"], self.profiles["delta(-)"]) - # Calculate flux surfaces - cn = np.array(self.shape_cos).T - sn = copy.deepcopy(self.shape_sin) - sn[0] = self.profiles["rmaj(m)"]*0.0 - sn[1] = np.arcsin(self.profiles["delta(-)"]) - sn[2] = -self.profiles["zeta(-)"] - sn = np.array(sn).T - flux_surfaces = GEQtools.mitim_flux_surfaces() - flux_surfaces.reconstruct_from_mxh_moments( - self.profiles["rmaj(m)"], - self.profiles["rmin(m)"], - self.profiles["kappa(-)"], - self.profiles["zmag(m)"], - cn, - sn) - self.derived["R_surface"],self.derived["Z_surface"] = flux_surfaces.R, flux_surfaces.Z - # ----------------------------------------------- - - #cross-sectional area of each flux surface - self.derived["surfXS"] = GEOMETRYtools.xsec_area_RZ( - self.derived["R_surface"], - self.derived["Z_surface"] - ) + self.derived["delta995"] = np.interp(0.995, self.derived["psi_pol_n"], self.profiles["delta(-)"]) - self.derived["R_LF"] = self.derived["R_surface"].max( - axis=1 - ) # self.profiles['rmaj(m)'][0]+self.profiles['rmin(m)'] + self.derived["zeta95"] = np.interp(0.95, self.derived["psi_pol_n"], self.profiles["zeta(-)"]) - # For Synchrotron - self.derived["B_ref"] = np.abs( - self.derived["B_unit"] * self.derived["geo_bt"] - ) + self.derived["zeta995"] = np.interp(0.995, self.derived["psi_pol_n"], self.profiles["zeta(-)"]) + + self.derived["kappa_a"] = self.derived["surfXS"][-1] / np.pi / self.derived["a"] ** 2 - # -------------------------------------------------------------------------- - # Reference mass - # -------------------------------------------------------------------------- + def plot_geometry(self, axs3, color="b", legYN=True, extralab="", lw=1, fs=6): - # Forcing mass from this specific deriveQuantities call - if mi_ref is not None: - self.derived["mi_ref"] = mi_ref - print(f'\t- Using mi_ref={self.derived["mi_ref"]} provided in this particular deriveQuantities method, subtituting initialization one',typeMsg='i') + [ax00c,ax10c,ax20c,ax01c,ax11c,ax21c,ax02c,ax12c,ax22c,ax3D,ax2D] = axs3 - # --------------------------------------------------------------------------------------------------------------------- - # --------- Important for scaling laws - # --------------------------------------------------------------------------------------------------------------------- + rho = self.profiles["rho(-)"] + lines = GRAPHICStools.listLS() - self.derived["kappa95"] = np.interp( - 0.95, self.derived["psi_pol_n"], self.profiles["kappa(-)"] - ) - self.derived["kappa995"] = np.interp( - 0.995, self.derived["psi_pol_n"], self.profiles["kappa(-)"] - ) + ax = ax00c - self.derived["kappa_a"] = self.derived["surfXS"][-1] / np.pi / self.derived["a"] ** 2 + var = self.derived['r'] + ax.plot(rho, var, "-", lw=lw, c=color) - self.derived["delta95"] = np.interp( - 0.95, self.derived["psi_pol_n"], self.profiles["delta(-)"] - ) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylim(bottom=0) + ax.set_ylabel("Effective radius ($r$)") - self.derived["delta995"] = np.interp( - 0.995, self.derived["psi_pol_n"], self.profiles["delta(-)"] - ) + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) - self.derived["Rgeo"] = float(self.profiles["rcentr(m)"][-1]) - self.derived["B0"] = np.abs(float(self.profiles["bcentr(T)"][-1])) - # --------------------------------------------------------------------------------------------------------------------- + ax = ax01c + ax.plot(self.profiles["rho(-)"], self.derived['volp_geo'], color=color, lw=lw, label = extralab) + ax.set_xlabel('$\\rho_N$'); ax.set_xlim(0, 1) + ax.set_ylabel(f"$dV/dr$ ($m^3/[r]$)") + GRAPHICStools.addDenseAxis(ax) + + if legYN: + ax.legend(loc="best", fontsize=fs) - """ - surf_miller is truly surface area, but because of the GACODE definitions of flux, - Surf = V' <|grad r|> - Surf_GACODE = V' - """ - self.derived["surfGACODE_miller"] = (self.derived["surf_miller"] / self.derived["gradr_miller"]) + ax = ax02c + var = self.profiles["polflux(Wb/radian)"] + ax.plot(rho, var, lw=lw, ls="-", c=color) - self.derived["surfGACODE_miller"][np.isnan(self.derived["surfGACODE_miller"])] = 0 + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("Poloidal $\\psi$ ($Wb/rad$)") - self.derived["c_s"] = PLASMAtools.c_s( - self.profiles["te(keV)"], self.derived["mi_ref"] - ) - self.derived["rho_s"] = PLASMAtools.rho_s( - self.profiles["te(keV)"], self.derived["mi_ref"], self.derived["B_unit"] - ) + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) - self.derived["q_gb"], self.derived["g_gb"], _, _, _ = PLASMAtools.gyrobohmUnits( - self.profiles["te(keV)"], - self.profiles["ne(10^19/m^3)"] * 1e-1, - self.derived["mi_ref"], - np.abs(self.derived["B_unit"]), - self.profiles["rmin(m)"][-1], - ) - """ - In prgen_map_plasmastate: - qspow_e = expro_qohme+expro_qbeame+expro_qrfe+expro_qfuse-expro_qei & - -expro_qsync-expro_qbrem-expro_qline - qspow_i = expro_qbeami+expro_qrfi+expro_qfusi+expro_qei - """ + # ---------------------------------------- + # Shaping params + # ---------------------------------------- + + minShape = 1E-3 - qe_terms = { - "qohme(MW/m^3)": 1, - "qbeame(MW/m^3)": 1, - "qrfe(MW/m^3)": 1, - "qfuse(MW/m^3)": 1, - "qei(MW/m^3)": -1, - "qsync(MW/m^3)": -1, - "qbrem(MW/m^3)": -1, - "qline(MW/m^3)": -1, - "qione(MW/m^3)": 1, - } - - self.derived["qe"] = np.zeros(len(self.profiles["rho(-)"])) - for i in qe_terms: - if i in self.profiles: - self.derived["qe"] += qe_terms[i] * self.profiles[i] - - qrad = { - "qsync(MW/m^3)": 1, - "qbrem(MW/m^3)": 1, - "qline(MW/m^3)": 1, - } - - self.derived["qrad"] = np.zeros(len(self.profiles["rho(-)"])) - for i in qrad: - if i in self.profiles: - self.derived["qrad"] += qrad[i] * self.profiles[i] - - qi_terms = { - "qbeami(MW/m^3)": 1, - "qrfi(MW/m^3)": 1, - "qfusi(MW/m^3)": 1, - "qei(MW/m^3)": 1, - "qioni(MW/m^3)": 1, - } - - self.derived["qi"] = np.zeros(len(self.profiles["rho(-)"])) - for i in qi_terms: - if i in self.profiles: - self.derived["qi"] += qi_terms[i] * self.profiles[i] - - # Depends on GACODE version - ge_terms = {self.varqpar: 1, self.varqpar2: 1} - - self.derived["ge"] = np.zeros(len(self.profiles["rho(-)"])) - for i in ge_terms: - if i in self.profiles: - self.derived["ge"] += ge_terms[i] * self.profiles[i] + ax = ax10c + cont = 0 + yl = 0 + for i, s in enumerate(self.shape_cos): + if s is not None: + valmax = np.abs(s).max() + if valmax > minShape: + lab = f"s{i}" + ax.plot(rho, s, lw=lw, ls=lines[cont], label=lab, c=color) + cont += 1 - """ - Careful, that's in MW/m^3. I need to find the volumes. Using here the Miller - calculation. Should be consistent with TGYRO + yl = np.max([yl, valmax]) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(f"sin-shape (>{minShape:.1e})") + if legYN: + ax.legend(loc="best", fontsize=fs) + GRAPHICStools.gradientSPAN(ax, -minShape, +minShape, color='k', startingalpha = 0.2, endingalpha = 0.2, orientation='horizontal') - profiles_gen puts any missing power into the CX: qioni, qione - """ + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) - r = self.profiles["rmin(m)"] - volp = self.derived["volp_miller"] + ax = ax11c + cont = 0 + yl = 0 + for i, s in enumerate(self.shape_sin): + if s is not None: + valmax = np.abs(s).max() + if valmax > minShape: + lab = f"c{i}" + ax.plot(rho, s, lw=lw, ls=lines[cont], label=lab, c=color) + cont += 1 - self.derived["qe_MWmiller"] = CALCtools.integrateFS(self.derived["qe"], r, volp) - self.derived["qi_MWmiller"] = CALCtools.integrateFS(self.derived["qi"], r, volp) - self.derived["ge_10E20miller"] = CALCtools.integrateFS( - self.derived["ge"] * 1e-20, r, volp - ) # Because the units were #/sec/m^3 + yl = np.max([yl, valmax]) - self.derived["geIn"] = self.derived["ge_10E20miller"][-1] # 1E20 particles/sec + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(f"cos-shape (>{minShape:.1e})") + if legYN: + ax.legend(loc="best", fontsize=fs) + GRAPHICStools.gradientSPAN(ax, -minShape, +minShape, color='k', startingalpha = 0.2, endingalpha = 0.2, orientation='horizontal') - self.derived["qe_MWm2"] = self.derived["qe_MWmiller"] / (volp) - self.derived["qi_MWm2"] = self.derived["qi_MWmiller"] / (volp) - self.derived["ge_10E20m2"] = self.derived["ge_10E20miller"] / (volp) - self.derived["QiQe"] = self.derived["qi_MWm2"] / np.where(self.derived["qe_MWm2"] == 0, 1e-10, self.derived["qe_MWm2"]) # to avoid division by zero + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) - # "Convective" flux - self.derived["ce_MWmiller"] = PLASMAtools.convective_flux( - self.profiles["te(keV)"], self.derived["ge_10E20miller"] - ) - self.derived["ce_MWm2"] = PLASMAtools.convective_flux( - self.profiles["te(keV)"], self.derived["ge_10E20m2"] - ) - # qmom - self.derived["mt_Jmiller"] = CALCtools.integrateFS( - self.profiles[self.varqmom], r, volp - ) - self.derived["mt_Jm2"] = self.derived["mt_Jmiller"] / (volp) + ax = ax12c - # Extras for plotting in TGYRO for comparison - P = np.zeros(len(self.profiles["rmin(m)"])) - if "qsync(MW/m^3)" in self.profiles: - P += self.profiles["qsync(MW/m^3)"] - if "qbrem(MW/m^3)" in self.profiles: - P += self.profiles["qbrem(MW/m^3)"] - if "qline(MW/m^3)" in self.profiles: - P += self.profiles["qline(MW/m^3)"] - self.derived["qe_rad_MWmiller"] = CALCtools.integrateFS(P, r, volp) + var = self.profiles["kappa(-)"] + ax.plot(rho, var, "-", lw=lw, c=color) - P = self.profiles["qei(MW/m^3)"] - self.derived["qe_exc_MWmiller"] = CALCtools.integrateFS(P, r, volp) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("$\\kappa$") - """ - --------------------------------------------------------------------------------------------------------------------- - Note that the real auxiliary power is RF+BEAMS+OHMIC, - The QIONE is added by TGYRO, but sometimes it includes radiation and direct RF to electrons - --------------------------------------------------------------------------------------------------------------------- - """ + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=1) - # ** Electrons + ax = ax20c + var = self.profiles["delta(-)"] + ax.plot(rho, var, "-", lw=lw, c=color, label = extralab + ', $\\delta$') - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qrfe(MW/m^3)", "qohme(MW/m^3)", "qbeame(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] + var = self.profiles["zeta(-)"] + ax.plot(rho, var, "--", lw=lw, c=color, label = extralab + ', $\\zeta$') - self.derived["qe_auxONLY"] = copy.deepcopy(P) - self.derived["qe_auxONLY_MWmiller"] = CALCtools.integrateFS(P, r, volp) - for i in ["qione(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("$\\delta$ and $\\zeta$") - self.derived["qe_aux"] = copy.deepcopy(P) - self.derived["qe_aux_MWmiller"] = CALCtools.integrateFS(P, r, volp) - # ** Ions + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + if legYN: + ax.legend(loc="best", fontsize=fs) - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qrfi(MW/m^3)", "qbeami(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] + ax = ax21c - self.derived["qi_auxONLY"] = copy.deepcopy(P) - self.derived["qi_auxONLY_MWmiller"] = CALCtools.integrateFS(P, r, volp) + var = self.profiles["rmaj(m)"] + ax.plot(rho, var, "-", lw=lw, c=color) - for i in ["qioni(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("$R_{maj}$") - self.derived["qi_aux"] = copy.deepcopy(P) - self.derived["qi_aux_MWmiller"] = CALCtools.integrateFS(P, r, volp) + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) - # ** General + ax = ax22c - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qohme(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] - self.derived["qOhm_MWmiller"] = CALCtools.integrateFS(P, r, volp) + var = self.profiles["zmag(m)"] + ax.plot(rho, var, "-", lw=lw, c=color) - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qrfe(MW/m^3)", "qrfi(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] - self.derived["qRF_MWmiller"] = CALCtools.integrateFS(P, r, volp) - if "qrfe(MW/m^3)" in self.profiles: - self.derived["qRFe_MWmiller"] = CALCtools.integrateFS( - self.profiles["qrfe(MW/m^3)"], r, volp - ) - if "qrfi(MW/m^3)" in self.profiles: - self.derived["qRFi_MWmiller"] = CALCtools.integrateFS( - self.profiles["qrfi(MW/m^3)"], r, volp - ) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + yl = np.max([0.1, np.max(np.abs(var))]) + ax.set_ylim([-yl, yl]) + ax.set_ylabel("$Z_{maj}$") - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qbeame(MW/m^3)", "qbeami(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] - self.derived["qBEAM_MWmiller"] = CALCtools.integrateFS(P, r, volp) + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) - self.derived["qrad_MWmiller"] = CALCtools.integrateFS(self.derived["qrad"], r, volp) - if "qsync(MW/m^3)" in self.profiles: - self.derived["qrad_sync_MWmiller"] = CALCtools.integrateFS(self.profiles["qsync(MW/m^3)"], r, volp) - else: - self.derived["qrad_sync_MWmiller"] = self.derived["qrad_MWmiller"]*0.0 - if "qbrem(MW/m^3)" in self.profiles: - self.derived["qrad_brem_MWmiller"] = CALCtools.integrateFS(self.profiles["qbrem(MW/m^3)"], r, volp) - else: - self.derived["qrad_brem_MWmiller"] = self.derived["qrad_MWmiller"]*0.0 - if "qline(MW/m^3)" in self.profiles: - self.derived["qrad_line_MWmiller"] = CALCtools.integrateFS(self.profiles["qline(MW/m^3)"], r, volp) - else: - self.derived["qrad_line_MWmiller"] = self.derived["qrad_MWmiller"]*0.0 - - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qfuse(MW/m^3)", "qfusi(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] - self.derived["qFus_MWmiller"] = CALCtools.integrateFS(P, r, volp) - - P = np.zeros(len(self.profiles["rho(-)"])) - for i in ["qioni(MW/m^3)", "qione(MW/m^3)"]: - if i in self.profiles: - P += self.profiles[i] - self.derived["qz_MWmiller"] = CALCtools.integrateFS(P, r, volp) - - self.derived["q_MWmiller"] = ( - self.derived["qe_MWmiller"] + self.derived["qi_MWmiller"] - ) - # --------------------------------------------------------------------------------------------------------------------- - # --------------------------------------------------------------------------------------------------------------------- + # ------------------------------ + # 3D and 2D plots + # ------------------------------ - P = np.zeros(len(self.profiles["rho(-)"])) - if "qfuse(MW/m^3)" in self.profiles: - P = self.profiles["qfuse(MW/m^3)"] - self.derived["qe_fus_MWmiller"] = CALCtools.integrateFS(P, r, volp) + ax = ax2D + self.plot_state_flux_surfaces(ax=ax, color=color) - P = np.zeros(len(self.profiles["rho(-)"])) - if "qfusi(MW/m^3)" in self.profiles: - P = self.profiles["qfusi(MW/m^3)"] - self.derived["qi_fus_MWmiller"] = CALCtools.integrateFS(P, r, volp) + ax.set_xlabel("R (m)") + ax.set_ylabel("Z (m)") + GRAPHICStools.addDenseAxis(ax) + + ax = ax3D + self.plot_plasma_boundary(ax=ax, color=color) + + def plot_state_flux_surfaces(self, ax=None, surfaces_rho=np.linspace(0, 1, 11), color="b", label = '', lw=1.0, lw1=2.0): + + if ax is None: + plt.ion() + fig, ax = plt.subplots() + provided = False + else: + provided = True - P = np.zeros(len(self.profiles["rho(-)"])) - if "qfusi(MW/m^3)" in self.profiles: - self.derived["q_fus"] = ( - self.profiles["qfuse(MW/m^3)"] + self.profiles["qfusi(MW/m^3)"] - ) * 5 - P = self.derived["q_fus"] - self.derived["q_fus"] = P - self.derived["q_fus_MWmiller"] = CALCtools.integrateFS(P, r, volp) + for rho in surfaces_rho: + ir = np.argmin(np.abs(self.profiles["rho(-)"] - rho)) - """ - Derivatives - """ - self.derived["aLTe"] = aLT(self.profiles["rmin(m)"], self.profiles["te(keV)"]) - self.derived["aLTi"] = self.profiles["ti(keV)"] * 0.0 - for i in range(self.profiles["ti(keV)"].shape[1]): - self.derived["aLTi"][:, i] = aLT( - self.profiles["rmin(m)"], self.profiles["ti(keV)"][:, i] - ) - self.derived["aLne"] = aLT( - self.profiles["rmin(m)"], self.profiles["ne(10^19/m^3)"] - ) - self.derived["aLni"] = [] - for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): - self.derived["aLni"].append( - aLT(self.profiles["rmin(m)"], self.profiles["ni(10^19/m^3)"][:, i]) - ) - self.derived["aLni"] = np.transpose(np.array(self.derived["aLni"])) + for i_toroidal in range(self.derived["R_surface"].shape[0]): + ax.plot( + self.derived["R_surface"][i_toroidal,ir, :], + self.derived["Z_surface"][i_toroidal,ir, :], + "-", + lw=lw if rho<1.0 else lw1, + c=color, + ) - if "w0(rad/s)" not in self.profiles: - self.profiles["w0(rad/s)"] = self.profiles["rho(-)"] * 0.0 - self.derived["aLw0"] = aLT(self.profiles["rmin(m)"], self.profiles["w0(rad/s)"]) - self.derived["dw0dr"] = -grad( - self.profiles["rmin(m)"], self.profiles["w0(rad/s)"] + ax.axhline(y=0, ls="--", lw=0.2, c="k") + ax.plot( + [self.profiles["rmaj(m)"][0]], + [self.profiles["zmag(m)"][0]], + "o", + markersize=2, + c=color, + label = label ) - self.derived["dqdr"] = grad(self.profiles["rmin(m)"], self.profiles["q(-)"]) + if not provided: + ax.set_xlabel("R (m)") + ax.set_ylabel("Z (m)") + ax.set_title("Surfaces @ rho=" + str(surfaces_rho), fontsize=8) + ax.set_aspect("equal") + def plot_plasma_boundary(self, ax=None, color="b"): + """ + Plot the 3D plasma boundary by extruding the poloidal cross-section toroidally. """ - Other, performance - """ - qFus = self.derived["qe_fus_MWmiller"] + self.derived["qi_fus_MWmiller"] - self.derived["Pfus"] = qFus[-1] * 5 - - # Note that in cases with NPRAD=0 in TRANPS, this includes radiation! no way to deal wit this... - qIn = self.derived["qe_aux_MWmiller"] + self.derived["qi_aux_MWmiller"] - self.derived["qIn"] = qIn[-1] - self.derived["Q"] = self.derived["Pfus"] / self.derived["qIn"] - self.derived["qHeat"] = qIn[-1] + qFus[-1] - - self.derived["qTr"] = ( - self.derived["qe_aux_MWmiller"] - + self.derived["qi_aux_MWmiller"] - + (self.derived["qe_fus_MWmiller"] + self.derived["qi_fus_MWmiller"]) - - self.derived["qrad_MWmiller"] - ) - - self.derived["Prad"] = self.derived["qrad_MWmiller"][-1] - self.derived["Prad_sync"] = self.derived["qrad_sync_MWmiller"][-1] - self.derived["Prad_brem"] = self.derived["qrad_brem_MWmiller"][-1] - self.derived["Prad_line"] = self.derived["qrad_line_MWmiller"][-1] - self.derived["Psol"] = self.derived["qHeat"] - self.derived["Prad"] - - self.derived["ni_thr"] = [] - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - self.derived["ni_thr"].append(self.profiles["ni(10^19/m^3)"][:, sp]) - self.derived["ni_thr"] = np.transpose(self.derived["ni_thr"]) - self.derived["ni_thrAll"] = self.derived["ni_thr"].sum(axis=1) - - self.derived["ni_All"] = self.profiles["ni(10^19/m^3)"].sum(axis=1) - - - ( - self.derived["ptot_manual"], - self.derived["pe"], - self.derived["pi"], - ) = PLASMAtools.calculatePressure( - np.expand_dims(self.profiles["te(keV)"], 0), - np.expand_dims(np.transpose(self.profiles["ti(keV)"]), 0), - np.expand_dims(self.profiles["ne(10^19/m^3)"] * 0.1, 0), - np.expand_dims(np.transpose(self.profiles["ni(10^19/m^3)"] * 0.1), 0), - ) - self.derived["ptot_manual"], self.derived["pe"], self.derived["pi"] = ( - self.derived["ptot_manual"][0], - self.derived["pe"][0], - self.derived["pi"][0], - ) - - ( - self.derived["pthr_manual"], - _, - self.derived["pi_thr"], - ) = PLASMAtools.calculatePressure( - np.expand_dims(self.profiles["te(keV)"], 0), - np.expand_dims(np.transpose(self.profiles["ti(keV)"]), 0), - np.expand_dims(self.profiles["ne(10^19/m^3)"] * 0.1, 0), - np.expand_dims(np.transpose(self.derived["ni_thr"] * 0.1), 0), - ) - self.derived["pthr_manual"], self.derived["pi_thr"] = ( - self.derived["pthr_manual"][0], - self.derived["pi_thr"][0], - ) - # ------- - # Content - # ------- + n_phi = 50 # Number of toroidal points for the surface mesh - ( - self.derived["We"], - self.derived["Wi_thr"], - self.derived["Ne"], - self.derived["Ni_thr"], - ) = PLASMAtools.calculateContent( - np.expand_dims(r, 0), - np.expand_dims(self.profiles["te(keV)"], 0), - np.expand_dims(np.transpose(self.profiles["ti(keV)"]), 0), - np.expand_dims(self.profiles["ne(10^19/m^3)"] * 0.1, 0), - np.expand_dims(np.transpose(self.derived["ni_thr"] * 0.1), 0), - np.expand_dims(volp, 0), - ) + R = self.derived["R_surface"][0,-1,:] # Outermost flux surface R coordinates + Z = self.derived["Z_surface"][0,-1,:] # Outermost flux surface Z coordinates - ( - self.derived["We"], - self.derived["Wi_thr"], - self.derived["Ne"], - self.derived["Ni_thr"], - ) = ( - self.derived["We"][0], - self.derived["Wi_thr"][0], - self.derived["Ne"][0], - self.derived["Ni_thr"][0], - ) + # Create toroidal angle array + phi = np.linspace(0, 2*np.pi, n_phi) + + # Create meshgrid for toroidal extrusion + PHI, THETA_POINTS = np.meshgrid(phi, range(len(R))) + R_mesh = R[THETA_POINTS] + Z_mesh = Z[THETA_POINTS] + + # Convert to Cartesian coordinates + x = R_mesh * np.cos(PHI) + y = R_mesh * np.sin(PHI) + z = Z_mesh * np.ones_like(PHI) # Z doesn't depend on phi for axisymmetric case - self.derived["Nthr"] = self.derived["Ne"] + self.derived["Ni_thr"] - self.derived["Wthr"] = self.derived["We"] + self.derived["Wi_thr"] # Thermal + # Create the 3D plot + if ax is None: + fig = plt.figure(figsize=(10, 8)) + ax = fig.add_subplot(projection="3d") - self.derived["tauE"] = self.derived["Wthr"] / self.derived["qHeat"] # Seconds + # Plot the surface + ax.plot_surface(x, y, z, alpha=0.7, color=color) - self.derived["tauP"] = np.where(self.derived["geIn"] != 0, self.derived["Ne"] / self.derived["geIn"], np.inf) # Seconds + # Set labels and title + ax.set_xlabel('X (m)') + ax.set_ylabel('Y (m)') + ax.set_zlabel('Z (m)') + ax.set_title('Plasma Boundary (3D)') + # Set equal aspect ratio + ax.set_aspect("equal") - self.derived["tauPotauE"] = self.derived["tauP"] / self.derived["tauE"] - - # Dilutions - self.derived["fi"] = self.profiles["ni(10^19/m^3)"] / np.atleast_2d( - self.profiles["ne(10^19/m^3)"] - ).transpose().repeat(self.profiles["ni(10^19/m^3)"].shape[1], axis=1) - - # Vol-avg density - self.derived["volume"] = CALCtools.integrateFS(np.ones(r.shape[0]), r, volp)[ - -1 - ] # m^3 - self.derived["ne_vol20"] = ( - CALCtools.integrateFS(self.profiles["ne(10^19/m^3)"] * 0.1, r, volp)[-1] - / self.derived["volume"] - ) # 1E20/m^3 - - self.derived["ni_vol20"] = np.zeros(self.profiles["ni(10^19/m^3)"].shape[1]) - self.derived["fi_vol"] = np.zeros(self.profiles["ni(10^19/m^3)"].shape[1]) - for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): - self.derived["ni_vol20"][i] = ( - CALCtools.integrateFS( - self.profiles["ni(10^19/m^3)"][:, i] * 0.1, r, volp - )[-1] - / self.derived["volume"] - ) # 1E20/m^3 - self.derived["fi_vol"][i] = ( - self.derived["ni_vol20"][i] / self.derived["ne_vol20"] - ) - - self.derived["fi_onlyions_vol"] = self.derived["ni_vol20"] / np.sum( - self.derived["ni_vol20"] - ) - - self.derived["ne_peaking"] = ( - self.profiles["ne(10^19/m^3)"][0] * 0.1 / self.derived["ne_vol20"] - ) +def calculateGeometricFactors(profiles, n_theta=1001): - xcoord = self.derived[ - "rho_pol" - ] # to find the peaking at rho_pol (with square root) as in Angioni PRL 2003 - self.derived["ne_peaking0.2"] = ( - self.profiles["ne(10^19/m^3)"][np.argmin(np.abs(xcoord - 0.2))] - * 0.1 - / self.derived["ne_vol20"] - ) + # ---------------------------------------- + # Raw parameters from the file + # in expro_util.f90, it performs those divisions to pass to geo library + # ---------------------------------------- - self.derived["Te_vol"] = ( - CALCtools.integrateFS(self.profiles["te(keV)"], r, volp)[-1] - / self.derived["volume"] - ) # keV - self.derived["Te_peaking"] = ( - self.profiles["te(keV)"][0] / self.derived["Te_vol"] - ) - self.derived["Ti_vol"] = ( - CALCtools.integrateFS(self.profiles["ti(keV)"][:, 0], r, volp)[-1] - / self.derived["volume"] - ) # keV - self.derived["Ti_peaking"] = ( - self.profiles["ti(keV)"][0, 0] / self.derived["Ti_vol"] - ) + r = profiles.profiles["rmin(m)"] / profiles.profiles["rmin(m)"][-1] + R = profiles.profiles["rmaj(m)"] / profiles.profiles["rmin(m)"][-1] + kappa = profiles.profiles["kappa(-)"] + delta = profiles.profiles["delta(-)"] + zeta = profiles.profiles["zeta(-)"] + zmag = profiles.profiles["zmag(m)"] / profiles.profiles["rmin(m)"][-1] + q = profiles.profiles["q(-)"] - self.derived["ptot_manual_vol"] = ( - CALCtools.integrateFS(self.derived["ptot_manual"], r, volp)[-1] - / self.derived["volume"] - ) # MPa - self.derived["pthr_manual_vol"] = ( - CALCtools.integrateFS(self.derived["pthr_manual"], r, volp)[-1] - / self.derived["volume"] - ) # MPa - - self.derived['pfast_manual'] = self.derived['ptot_manual'] - self.derived['pthr_manual'] - self.derived["pfast_manual_vol"] = ( - CALCtools.integrateFS(self.derived["pfast_manual"], r, volp)[-1] - / self.derived["volume"] - ) # MPa - - self.derived['pfast_fraction'] = self.derived['pfast_manual_vol'] / self.derived['ptot_manual_vol'] - - #approximate pedestal top density - self.derived['ptop(Pa)'] = np.interp(0.90, self.profiles['rho(-)'], self.profiles['ptot(Pa)']) - - # Quasineutrality - self.derived["QN_Error"] = np.abs( - 1 - np.sum(self.derived["fi_vol"] * self.profiles["z"]) - ) - self.derived["Zeff"] = ( - np.sum(self.profiles["ni(10^19/m^3)"] * self.profiles["z"] ** 2, axis=1) - / self.profiles["ne(10^19/m^3)"] - ) - self.derived["Zeff_vol"] = ( - CALCtools.integrateFS(self.derived["Zeff"], r, volp)[-1] - / self.derived["volume"] - ) + shape_coeffs = profiles.shape_cos + profiles.shape_sin + + signb = np.sign(profiles.profiles['torfluxa(Wb/radian)'][0]) + + # ---------------------------------------- + # Derivatives as defined in expro_util.f90 + # ---------------------------------------- + + s_delta = r * MATHtools.deriv(r, delta) + s_kappa = r / kappa * MATHtools.deriv(r, kappa) + s_zeta = r * MATHtools.deriv(r, zeta) + dzmag = MATHtools.deriv(r, zmag) + dRmag = MATHtools.deriv(r, R) + + s_shape_coeffs = [] + for i in range(len(shape_coeffs)): + if shape_coeffs[i] is not None: + s_shape_coeffs.append(r * MATHtools.deriv(r, shape_coeffs[i])) + else: + s_shape_coeffs.append(None) + + # ---------------------------------------- + # Calculate the differencial volume at each radii + # from f2py/geo/geo.f90 in gacode source we have geo_volume_prime. + # ---------------------------------------- + + # Prepare cos_sins + cos_sin = [] + cos_sin_s = [] + for j in range(len(R)): + cos_sin0 = [] + cos_sin_s0 = [] + for k in range(len(shape_coeffs)): + if shape_coeffs[k] is not None: + cos_sin0.append(shape_coeffs[k][j]) + cos_sin_s0.append(s_shape_coeffs[k][j]) + else: + cos_sin0.append(None) + cos_sin_s0.append(None) + cos_sin.append(cos_sin0) + cos_sin_s.append(cos_sin_s0) + + ( + geo_volume_prime, + geo_surf, + geo_fluxsurfave_grad_r, + geo_fluxsurfave_bp2, + geo_fluxsurfave_bt2, + bt_geo0, + ) = volp_surf_geo_vectorized( + R, + r, + delta, + kappa, + cos_sin, + cos_sin_s, + zeta, + zmag, + s_delta, + s_kappa, + s_zeta, + dzmag, + dRmag, + q, + geo_signb_in=signb, + n_theta=n_theta, + ) - self.derived["nu_eff"] = PLASMAtools.coll_Angioni07( - self.derived["ne_vol20"] * 1e1, - self.derived["Te_vol"], - self.derived["Rgeo"], - Zeff=self.derived["Zeff_vol"], - ) + """ + from expro_util.f90 we have: + expro_volp(i) = geo_volume_prime*r_min**2, where r_min = expro_rmin(expro_n_exp) + expro_surf(i) = geo_surf*r_min**2 + """ + + volp = geo_volume_prime * profiles.profiles["rmin(m)"][-1] ** 2 + surf = geo_surf * profiles.profiles["rmin(m)"][-1] ** 2 + + return volp, surf, geo_fluxsurfave_grad_r, geo_fluxsurfave_bp2, geo_fluxsurfave_bt2, bt_geo0 + +def volp_surf_geo_vectorized( + geo_rmaj_in, + geo_rmin_in, + geo_delta_in, + geo_kappa_in, + cos_sin, + cos_sin_s, + geo_zeta_in, + geo_zmag_in, + geo_s_delta_in, + geo_s_kappa_in, + geo_s_zeta_in, + geo_dzmag_in, + geo_drmaj_in, + geo_q_in, + geo_signb_in = 1.0, + n_theta=1001): + """ + Completety from f2py/geo/geo.f90 + """ + + geo_rmin_in = geo_rmin_in.clip(1e-10) # To avoid problems at 0 (Implemented by PRF, not sure how TGYRO deals with this) + geo_q_in = geo_q_in.clip(1e-2) # To avoid problems at 0 with some geqdsk files that are corrupted... + + [ + geo_shape_cos0_in, + geo_shape_cos1_in, + geo_shape_cos2_in, + geo_shape_cos3_in, + geo_shape_cos4_in, + geo_shape_cos5_in, + geo_shape_cos6_in, + _, + _, + _, + geo_shape_sin3_in, + geo_shape_sin4_in, + geo_shape_sin5_in, + geo_shape_sin6_in, + ] = np.array(cos_sin).astype(float).T + + [ + geo_shape_s_cos0_in, + geo_shape_s_cos1_in, + geo_shape_s_cos2_in, + geo_shape_s_cos3_in, + geo_shape_s_cos4_in, + geo_shape_s_cos5_in, + geo_shape_s_cos6_in, + _, + _, + _, + geo_shape_s_sin3_in, + geo_shape_s_sin4_in, + geo_shape_s_sin5_in, + geo_shape_s_sin6_in, + ] = np.array(cos_sin_s).astype(float).T + + geov_theta = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_bigr = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_bigr_r = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_bigr_t = np.zeros((n_theta,geo_rmin_in.shape[0])) + bigz = np.zeros((n_theta,geo_rmin_in.shape[0])) + bigz_r = np.zeros((n_theta,geo_rmin_in.shape[0])) + bigz_t = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_jac_r = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_grad_r = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_l_t = np.zeros((n_theta,geo_rmin_in.shape[0])) + r_c = np.zeros((n_theta,geo_rmin_in.shape[0])) + bigz_l = np.zeros((n_theta,geo_rmin_in.shape[0])) + bigr_l = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_l_r = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_nsin = np.zeros((n_theta,geo_rmin_in.shape[0])) + + pi_2 = 8.0 * np.arctan(1.0) + d_theta = pi_2 / (n_theta - 1) + + for i in range(n_theta): + #!----------------------------------------- + #! Generalized Miller-type parameterization + #!----------------------------------------- + + theta = -0.5 * pi_2 + i * d_theta + + geov_theta[i] = theta + + x = np.arcsin(geo_delta_in) + + #! A + #! dA/dtheta + #! d^2A/dtheta^2 + a = ( + theta + + geo_shape_cos0_in + + geo_shape_cos1_in * np.cos(theta) + + geo_shape_cos2_in * np.cos(2 * theta) + + geo_shape_cos3_in * np.cos(3 * theta) + + geo_shape_cos4_in * np.cos(4 * theta) + + geo_shape_cos5_in * np.cos(5 * theta) + + geo_shape_cos6_in * np.cos(6 * theta) + + x * np.sin(theta) + - geo_zeta_in * np.sin(2 * theta) + + geo_shape_sin3_in * np.sin(3 * theta) + + geo_shape_sin4_in * np.sin(4 * theta) + + geo_shape_sin5_in * np.sin(5 * theta) + + geo_shape_sin6_in * np.sin(6 * theta) + ) + a_t = ( + 1.0 + - geo_shape_cos1_in * np.sin(theta) + - 2 * geo_shape_cos2_in * np.sin(2 * theta) + - 3 * geo_shape_cos3_in * np.sin(3 * theta) + - 4 * geo_shape_cos4_in * np.sin(4 * theta) + - 5 * geo_shape_cos5_in * np.sin(5 * theta) + - 6 * geo_shape_cos6_in * np.sin(6 * theta) + + x * np.cos(theta) + - 2 * geo_zeta_in * np.cos(2 * theta) + + 3 * geo_shape_sin3_in * np.cos(3 * theta) + + 4 * geo_shape_sin4_in * np.cos(4 * theta) + + 5 * geo_shape_sin5_in * np.cos(5 * theta) + + 6 * geo_shape_sin6_in * np.cos(6 * theta) + ) + a_tt = ( + -geo_shape_cos1_in * np.cos(theta) + - 4 * geo_shape_cos2_in * np.cos(2 * theta) + - 9 * geo_shape_cos3_in * np.cos(3 * theta) + - 16 * geo_shape_cos4_in * np.cos(4 * theta) + - 25 * geo_shape_cos5_in * np.cos(5 * theta) + - 36 * geo_shape_cos6_in * np.cos(6 * theta) + - x * np.sin(theta) + + 4 * geo_zeta_in * np.sin(2 * theta) + - 9 * geo_shape_sin3_in * np.sin(3 * theta) + - 16 * geo_shape_sin4_in * np.sin(4 * theta) + - 25 * geo_shape_sin5_in * np.sin(5 * theta) + - 36 * geo_shape_sin6_in * np.sin(6 * theta) + ) + + #! R(theta) + #! dR/dr + #! dR/dtheta + #! d^2R/dtheta^2 + geov_bigr[i] = geo_rmaj_in + geo_rmin_in * np.cos(a) + geov_bigr_r[i] = ( + geo_drmaj_in + + np.cos(a) + - np.sin(a) + * ( + geo_shape_s_cos0_in + + geo_shape_s_cos1_in * np.cos(theta) + + geo_shape_s_cos2_in * np.cos(2 * theta) + + geo_shape_s_cos3_in * np.cos(3 * theta) + + geo_shape_s_cos4_in * np.cos(4 * theta) + + geo_shape_s_cos5_in * np.cos(5 * theta) + + geo_shape_s_cos6_in * np.cos(6 * theta) + + geo_s_delta_in / np.cos(x) * np.sin(theta) + - geo_s_zeta_in * np.sin(2 * theta) + + geo_shape_s_sin3_in * np.sin(3 * theta) + + geo_shape_s_sin4_in * np.sin(4 * theta) + + geo_shape_s_sin5_in * np.sin(5 * theta) + + geo_shape_s_sin6_in * np.sin(6 * theta) + ) + ) + geov_bigr_t[i] = -geo_rmin_in * a_t * np.sin(a) + bigr_tt = -geo_rmin_in * a_t**2 * np.cos(a) - geo_rmin_in * a_tt * np.sin(a) + + #!----------------------------------------------------------- + + #! A + #! dA/dtheta + #! d^2A/dtheta^2 + a = theta + a_t = 1.0 + a_tt = 0.0 + + #! Z(theta) + #! dZ/dr + #! dZ/dtheta + #! d^2Z/dtheta^2 + bigz[i] = geo_zmag_in + geo_kappa_in * geo_rmin_in * np.sin(a) + bigz_r[i] = geo_dzmag_in + geo_kappa_in * (1.0 + geo_s_kappa_in) * np.sin(a) + bigz_t[i] = geo_kappa_in * geo_rmin_in * np.cos(a) * a_t + bigz_tt = (-geo_kappa_in * geo_rmin_in * np.sin(a) * a_t**2+ geo_kappa_in * geo_rmin_in * np.cos(a) * a_tt) + + g_tt = geov_bigr_t[i] ** 2 + bigz_t[i] ** 2 + + geov_jac_r[i] = geov_bigr[i] * (geov_bigr_r[i] * bigz_t[i] - geov_bigr_t[i] * bigz_r[i]) + + geov_grad_r[i] = geov_bigr[i] * np.sqrt(g_tt) / geov_jac_r[i] + + geov_l_t[i] = np.sqrt(g_tt) + + r_c[i] = geov_l_t[i] ** 3 / (geov_bigr_t[i] * bigz_tt - bigz_t[i] * bigr_tt) + + bigz_l[i] = bigz_t[i] / geov_l_t[i] + + bigr_l[i] = geov_bigr_t[i] / geov_l_t[i] + + geov_l_r[i] = bigz_l[i] * bigz_r[i] + bigr_l[i] * geov_bigr_r[i] + + geov_nsin[i] = (geov_bigr_r[i] * geov_bigr_t[i] + bigz_r[i] * bigz_t[i]) / geov_l_t[i] + + c = 0.0 + for i in range(n_theta - 1): + c += geov_l_t[i] / (geov_bigr[i] * geov_grad_r[i]) + + f = geo_rmin_in / (c * d_theta / pi_2) + + c = 0.0 + for i in range(n_theta - 1): + c = c + geov_l_t[i] * geov_bigr[i] / geov_grad_r[i] + + geo_volume_prime = pi_2 * c * d_theta + + # Line 716 in geo.f90 + geo_surf = 0.0 + for i in range(n_theta - 1): + geo_surf = geo_surf + geov_l_t[i] * geov_bigr[i] + geo_surf = pi_2 * geo_surf * d_theta + + geov_b = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_bp = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_g_theta = np.zeros((n_theta,geo_rmin_in.shape[0])) + geov_bt = np.zeros((n_theta,geo_rmin_in.shape[0])) + for i in range(n_theta): + geov_bt[i] = f / geov_bigr[i] + geov_bp[i] = (geo_rmin_in / geo_q_in) * geov_grad_r[i] / geov_bigr[i] + + geov_b[i] = geo_signb_in * (geov_bt[i] ** 2 + geov_bp[i] ** 2) ** 0.5 + geov_g_theta[i] = ( + geov_bigr[i] + * geov_b[i] + * geov_l_t[i] + / (geo_rmin_in * geo_rmaj_in * geov_grad_r[i]) + ) + + theta_0 = 0 + dx = geov_theta[1,0] - geov_theta[0,0] + x0 = theta_0 - geov_theta[0,0] + i1 = int(x0 / dx) + 1 + i2 = i1 + 1 + x1 = (i1 - 1) * dx + z = (x0 - x1) / dx + if i2 == n_theta: + i2 -= 1 + # bt_geo0 = geov_bt[i1] + (geov_bt[i2] - geov_bt[i1]) * z + bt_geo0 = geov_bt[n_theta // 2] - self.derived["nu_eff2"] = PLASMAtools.coll_Angioni07( - self.derived["ne_vol20"] * 1e1, - self.derived["Te_vol"], - self.derived["Rgeo"], - Zeff=2.0, - ) + denom = 0 + for i in range(n_theta - 1): + denom = denom + geov_g_theta[i] / geov_b[i] - # Avg mass - self.calculateMass() - - params_set_scaling = ( - np.abs(float(self.profiles["current(MA)"][-1])), - self.derived["Rgeo"], - self.derived["kappa_a"], - self.derived["ne_vol20"], - self.derived["a"] / self.derived["Rgeo"], - self.derived["B0"], - self.derived["mbg_main"], - self.derived["qHeat"], + geo_fluxsurfave_grad_r = 0 + for i in range(n_theta - 1): + geo_fluxsurfave_grad_r = ( + geo_fluxsurfave_grad_r + + geov_grad_r[i] * geov_g_theta[i] / geov_b[i] / denom ) - self.derived["tau98y2"], self.derived["H98"] = PLASMAtools.tau98y2( - *params_set_scaling, tauE=self.derived["tauE"] - ) - self.derived["tau89p"], self.derived["H89"] = PLASMAtools.tau89p( - *params_set_scaling, tauE=self.derived["tauE"] - ) - self.derived["tau97L"], self.derived["H97L"] = PLASMAtools.tau97L( - *params_set_scaling, tauE=self.derived["tauE"] + geo_fluxsurfave_bp2 = 0 + for i in range(n_theta - 1): + geo_fluxsurfave_bp2 = ( + geo_fluxsurfave_bp2 + + geov_bp[i] ** 2 * geov_g_theta[i] / geov_b[i] / denom ) - """ - Mach number - """ - - Vtor_LF_Mach1 = PLASMAtools.constructVtorFromMach( - 1.0, self.profiles["ti(keV)"][:, 0], self.derived["mbg"] - ) # m/s - w0_Mach1 = Vtor_LF_Mach1 / (self.derived["R_LF"]) # rad/s - self.derived["MachNum"] = self.profiles["w0(rad/s)"] / w0_Mach1 - self.derived["MachNum_vol"] = ( - CALCtools.integrateFS(self.derived["MachNum"], r, volp)[-1] - / self.derived["volume"] + geo_fluxsurfave_bt2 = 0 + for i in range(n_theta - 1): + geo_fluxsurfave_bt2 = ( + geo_fluxsurfave_bt2 + + geov_bt[i] ** 2 * geov_g_theta[i] / geov_b[i] / denom ) - # Retain the old beta definition for comparison with 0D modeling - Beta_old = (self.derived["ptot_manual_vol"]* 1e6 / (self.derived["B0"] ** 2 / (2 * 4 * np.pi * 1e-7))) - self.derived["BetaN_engineering"] = (Beta_old / - (np.abs(float(self.profiles["current(MA)"][-1])) / - (self.derived["a"] * self.derived["B0"]) - )* 100.0 - ) # expressed in percent - - ''' - --------------------------------------------------------------------------------------------------- - Using B_unit, derive and for betap and betat calculations. - Equivalent to GACODE expro_bp2, expro_bt2 - --------------------------------------------------------------------------------------------------- - ''' - - self.derived["bp2_exp"] = self.derived["bp2_miller"] * self.derived["B_unit"] ** 2 - self.derived["bt2_exp"] = self.derived["bt2_miller"] * self.derived["B_unit"] ** 2 - - # Calculate the volume averages of bt2 and bp2 - - P = self.derived["bp2_exp"] - self.derived["bp2_vol_avg"] = CALCtools.integrateFS(P, r, volp)[-1] / self.derived["volume"] - P = self.derived["bt2_exp"] - self.derived["bt2_vol_avg"] = CALCtools.integrateFS(P, r, volp)[-1] / self.derived["volume"] - - # calculate beta_poloidal and beta_toroidal using volume averaged values - # mu0 = 4pi x 10^-7, also need to convert MPa to Pa - - self.derived["Beta_p"] = (2 * 4 * np.pi * 1e-7)*self.derived["ptot_manual_vol"]* 1e6/self.derived["bp2_vol_avg"] - self.derived["Beta_t"] = (2 * 4 * np.pi * 1e-7)*self.derived["ptot_manual_vol"]* 1e6/self.derived["bt2_vol_avg"] - - self.derived["Beta"] = 1/(1/self.derived["Beta_p"]+1/self.derived["Beta_t"]) - - TroyonFactor = np.abs(float(self.profiles["current(MA)"][-1])) / (self.derived["a"] * self.derived["B0"]) - - self.derived["BetaN"] = self.derived["Beta"] / TroyonFactor * 100.0 - - # --- - - nG = PLASMAtools.Greenwald_density( - np.abs(float(self.profiles["current(MA)"][-1])), - float(self.profiles["rmin(m)"][-1]), - ) - self.derived["fG"] = self.derived["ne_vol20"] / nG - self.derived["fG_x"] = self.profiles["ne(10^19/m^3)"]* 0.1 / nG - - self.derived["tite"] = self.profiles["ti(keV)"][:, 0] / self.profiles["te(keV)"] - self.derived["tite_vol"] = self.derived["Ti_vol"] / self.derived["Te_vol"] - - self.derived["LH_nmin"] = PLASMAtools.LHthreshold_nmin( - np.abs(float(self.profiles["current(MA)"][-1])), - self.derived["B0"], - self.derived["a"], - self.derived["Rgeo"], - ) - - self.derived["LH_Martin2"] = ( - PLASMAtools.LHthreshold_Martin2( - self.derived["ne_vol20"], - self.derived["B0"], - self.derived["a"], - self.derived["Rgeo"], - nmin=self.derived["LH_nmin"], - ) - * (2 / self.derived["mbg_main"]) ** 1.11 - ) - - self.derived["LHratio"] = self.derived["Psol"] / self.derived["LH_Martin2"] - - self.readSpecies() - - # ------------------------------------------------------- - # q-star - # ------------------------------------------------------- - - self.derived["qstar"] = PLASMAtools.evaluate_qstar( - self.profiles['current(MA)'][0], - self.profiles['rcentr(m)'], - self.derived['kappa95'], - self.profiles['bcentr(T)'], - self.derived['eps'], - self.derived['delta95'], - ITERcorrection=False, - includeShaping=True, - )[0] - self.derived["qstar_ITER"] = PLASMAtools.evaluate_qstar( - self.profiles['current(MA)'][0], - self.profiles['rcentr(m)'], - self.derived['kappa95'], - self.profiles['bcentr(T)'], - self.derived['eps'], - self.derived['delta95'], - ITERcorrection=True, - includeShaping=True, - )[0] - - # ------------------------------------------------------- - # Separatrix estimations - # ------------------------------------------------------- - - # ~~~~ Estimate lambda_q - pressure_atm = self.derived["ptot_manual_vol"] * 1e6 / 101325.0 - Lambda_q = PLASMAtools.calculateHeatFluxWidth_Brunner(pressure_atm) - - # ~~~~ Estimate upstream temperature - Bt = self.profiles["bcentr(T)"][0] - Bp = self.derived["eps"] * Bt / self.derived["q95"] #TODO: VERY ROUGH APPROXIMATION!!!! - - self.derived['Te_lcfs_estimate'] = PLASMAtools.calculateUpstreamTemperature( - Lambda_q, - self.derived["q95"], - self.derived["ne_vol20"], - self.derived["Psol"], - self.profiles["rcentr(m)"][0], - Bp, - Bt - )[0] - - # ~~~~ Estimate upstream density - self.derived['ne_lcfs_estimate'] = self.derived["ne_vol20"] * 0.6 - - # ------------------------------------------------------- - # TGLF-relevant quantities - # ------------------------------------------------------- - - self.tglf_plasma() - - def tglf_plasma(self): - - def deriv_gacode(y): - return grad(self.profiles["rmin(m)"],y).cpu().numpy() - - self.derived["tite_all"] = self.profiles["ti(keV)"] / self.profiles["te(keV)"][:, np.newaxis] - - self.derived['betae'] = PLASMAtools.betae( - self.profiles['te(keV)'], - self.profiles['ne(10^19/m^3)']*0.1, - self.derived["B_unit"]) - - self.derived['xnue'] = PLASMAtools.xnue( - torch.from_numpy(self.profiles['te(keV)']).to(torch.double), - torch.from_numpy(self.profiles['ne(10^19/m^3)']*0.1).to(torch.double), - self.derived["a"], - mref_u=self.derived["mi_ref"]).cpu().numpy() - - self.derived['debye'] = PLASMAtools.debye( - self.profiles['te(keV)'], - self.profiles['ne(10^19/m^3)']*0.1, - self.derived["mi_ref"], - self.derived["B_unit"]) - - self.derived['pprime'] = 1E-7 * self.profiles["q(-)"]*self.derived['a']**2/self.profiles["rmin(m)"]/self.derived["B_unit"]**2*deriv_gacode(self.profiles["ptot(Pa)"]) - self.derived['pprime'][0] = 0.0 - - self.derived['drmin/dr'] = deriv_gacode(self.profiles["rmin(m)"]) - self.derived['dRmaj/dr'] = deriv_gacode(self.profiles["rmaj(m)"]) - self.derived['dZmaj/dr'] = deriv_gacode(self.profiles["zmag(m)"]) - - self.derived['s_kappa'] = self.profiles["rmin(m)"] / self.profiles["kappa(-)"] * deriv_gacode(self.profiles["kappa(-)"]) - self.derived['s_delta'] = self.profiles["rmin(m)"] * deriv_gacode(self.profiles["delta(-)"]) - self.derived['s_zeta'] = self.profiles["rmin(m)"] * deriv_gacode(self.profiles["zeta(-)"]) - - s = self.profiles["rmin(m)"] / self.profiles["q(-)"]*deriv_gacode(self.profiles["q(-)"]) - self.derived['s_q'] = np.concatenate([np.array([0.0]),(self.profiles["q(-)"][1:] / self.derived['roa'][1:])**2 * s[1:]]) # infinite in first location - - ''' - Rotations - -------------------------------------------------------- - From TGYRO/TGLF definitions - w0p = expro_w0p(:)/100.0 - f_rot(:) = w0p(:)/w0_norm - gamma_p0 = -r_maj(i_r)*f_rot(i_r)*w0_norm - gamma_eb0 = gamma_p0*r(i_r)/(q_abs*r_maj(i_r)) - ''' - - w0p = deriv_gacode(self.profiles["w0(rad/s)"]) - gamma_p0 = -self.profiles["rmaj(m)"]*w0p - gamma_eb0 = -deriv_gacode(self.profiles["w0(rad/s)"]) * self.profiles["rmin(m)"]/self.profiles["q(-)"] - - self.derived['vexb_shear'] = gamma_eb0 * self.derived["a"]/self.derived['c_s'] - self.derived['vpar_shear'] = gamma_p0 * self.derived["a"]/self.derived['c_s'] - self.derived['vpar'] = self.profiles["rmaj(m)"]*self.profiles["w0(rad/s)"]/self.derived['c_s'] - - def calculateMass(self): - self.derived["mbg"] = 0.0 - self.derived["fmain"] = 0.0 - for i in range(self.derived["ni_vol20"].shape[0]): - self.derived["mbg"] += ( - float(self.profiles["mass"][i]) * self.derived["fi_onlyions_vol"][i] - ) - - if self.DTplasmaBool: - self.derived["mbg_main"] = ( - self.profiles["mass"][self.Dion] - * self.derived["fi_onlyions_vol"][self.Dion] - + self.profiles["mass"][self.Tion] - * self.derived["fi_onlyions_vol"][self.Tion] - ) / ( - self.derived["fi_onlyions_vol"][self.Dion] - + self.derived["fi_onlyions_vol"][self.Tion] - ) - self.derived["fmain"] = ( - self.derived["fi_vol"][self.Dion] + self.derived["fi_vol"][self.Tion] - ) - else: - self.derived["mbg_main"] = self.profiles["mass"][self.Mion] - self.derived["fmain"] = self.derived["fi_vol"][self.Mion] - - def deriveContentByVolumes(self, rhos=[0.5], impurityPosition=3): - """ - Calculates total particles and energy for ions and electrons, at a given volume - It fails near axis because of the polynomial integral, requiring a number of poitns - """ - - min_number_points = 3 - - We_x = np.zeros(self.profiles["te(keV)"].shape[0]) - Wi_x = np.zeros(self.profiles["te(keV)"].shape[0]) - Ne_x = np.zeros(self.profiles["te(keV)"].shape[0]) - Ni_x = np.zeros(self.profiles["te(keV)"].shape[0]) - for j in range(self.profiles["te(keV)"].shape[0] - min_number_points): - i = j + min_number_points - We_x[i], Wi_x[i], Ne_x[i], _ = PLASMAtools.calculateContent( - np.expand_dims(self.profiles["rmin(m)"][:i], 0), - np.expand_dims(self.profiles["te(keV)"][:i], 0), - np.expand_dims(np.transpose(self.profiles["ti(keV)"][:i]), 0), - np.expand_dims(self.profiles["ne(10^19/m^3)"][:i] * 0.1, 0), - np.expand_dims( - np.transpose(self.profiles["ni(10^19/m^3)"][:i] * 0.1), 0 - ), - np.expand_dims(self.derived["volp_miller"][:i], 0), - ) - - _, _, Ni_x[i], _ = PLASMAtools.calculateContent( - np.expand_dims(self.profiles["rmin(m)"][:i], 0), - np.expand_dims(self.profiles["te(keV)"][:i], 0), - np.expand_dims(np.transpose(self.profiles["ti(keV)"][:i]), 0), - np.expand_dims( - self.profiles["ni(10^19/m^3)"][:i, impurityPosition] * 0.1, 0 - ), - np.expand_dims( - np.transpose(self.profiles["ni(10^19/m^3)"][:i] * 0.1), 0 - ), - np.expand_dims(self.derived["volp_miller"][:i], 0), - ) - - We, Wi, Ne, Ni = ( - np.zeros(len(rhos)), - np.zeros(len(rhos)), - np.zeros(len(rhos)), - np.zeros(len(rhos)), - ) - for i in range(len(rhos)): - We[i] = np.interp(rhos[i], self.profiles["rho(-)"], We_x) - Wi[i] = np.interp(rhos[i], self.profiles["rho(-)"], Wi_x) - Ne[i] = np.interp(rhos[i], self.profiles["rho(-)"], Ne_x) - Ni[i] = np.interp(rhos[i], self.profiles["rho(-)"], Ni_x) - - return We, Wi, Ne, Ni - - def printInfo(self, label="", reDeriveIfNotFound=True): - - if 'pfast_fraction' not in self.derived: self.derived['pfast_fraction'] = np.nan #TODO: remove this line - - try: - ImpurityText = "" - for i in range(len(self.Species)): - ImpurityText += f"{self.Species[i]['N']}({self.Species[i]['Z']:.0f},{self.Species[i]['A']:.0f}) = {self.derived['fi_vol'][i]:.1e}, " - ImpurityText = ImpurityText[:-2] - - print(f"\n***********************{label}****************") - print("Engineering Parameters:") - print(f"\tBt = {self.profiles['bcentr(T)'][0]:.2f}T, Ip = {self.profiles['current(MA)'][0]:.2f}MA (q95 = {self.derived['q95']:.2f}, q* = {self.derived['qstar']:.2f}, q*ITER = {self.derived['qstar_ITER']:.2f}), Pin = {self.derived['qIn']:.2f}MW") - print(f"\tR = {self.profiles['rcentr(m)'][0]:.2f}m, a = {self.derived['a']:.2f}m (eps = {self.derived['eps']:.3f})") - print(f"\tkappa_sep = {self.profiles['kappa(-)'][-1]:.2f}, kappa_995 = {self.derived['kappa995']:.2f}, kappa_95 = {self.derived['kappa95']:.2f}, kappa_a = {self.derived['kappa_a']:.2f}") - print(f"\tdelta_sep = {self.profiles['delta(-)'][-1]:.2f}, delta_995 = {self.derived['delta995']:.2f}, delta_95 = {self.derived['delta95']:.2f}") - print("Performance:") - print( - "\tQ = {0:.2f} (Pfus = {1:.1f}MW, Pin = {2:.1f}MW)".format( - self.derived["Q"], self.derived["Pfus"], self.derived["qIn"] - ) - ) - print( - "\tH98y2 = {0:.2f} (tauE = {1:.3f} s)".format( - self.derived["H98"], self.derived["tauE"] - ) - ) - print( - "\tH89p = {0:.2f} (H97L = {1:.2f})".format( - self.derived["H89"], self.derived["H97L"] - ) - ) - print( - "\tnu_ne = {0:.2f} (nu_eff = {1:.2f})".format( - self.derived["ne_peaking"], self.derived["nu_eff"] - ) - ) - print( - "\tnu_ne0.2 = {0:.2f} (nu_eff w/Zeff2 = {1:.2f})".format( - self.derived["ne_peaking0.2"], self.derived["nu_eff2"] - ) - ) - print(f"\tnu_Ti = {self.derived['Ti_peaking']:.2f}") - print(f"\tp_vol = {self.derived['ptot_manual_vol']:.2f} MPa ({self.derived['pfast_fraction']*100.0:.1f}% fast)") - print( - f"\tBetaN = {self.derived['BetaN']:.3f} (BetaN w/B0 = {self.derived['BetaN_engineering']:.3f})" - ) - print( - "\tPrad = {0:.1f}MW ({1:.1f}% of total)".format( - self.derived["Prad"], - self.derived["Prad"] / self.derived["qHeat"] * 100.0, - ) - ) - print( - "\tPsol = {0:.1f}MW (fLH = {1:.2f})".format( - self.derived["Psol"], self.derived["LHratio"] - ) - ) - print( - "Operational point ( [,] = [{0:.2f},{1:.2f}] ) and species:".format( - self.derived["ne_vol20"], self.derived["Te_vol"] - ) - ) - print( - "\t = {0:.2f} keV (/ = {1:.2f}, Ti0/Te0 = {2:.2f})".format( - self.derived["Ti_vol"], - self.derived["tite_vol"], - self.derived["tite"][0], - ) - ) - print( - "\tfG = {0:.2f} ( = {1:.2f} * 10^20 m^-3)".format( - self.derived["fG"], self.derived["ne_vol20"] - ) - ) - print( - f"\tZeff = {self.derived['Zeff_vol']:.2f} (M_main = {self.derived['mbg_main']:.2f}, f_main = {self.derived['fmain']:.2f}) [QN err = {self.derived['QN_Error']:.1e}]" - ) - print(f"\tMach = {self.derived['MachNum_vol']:.2f} (vol avg)") - print("Content:") - print( - "\tWe = {0:.2f} MJ, Wi_thr = {1:.2f} MJ (W_thr = {2:.2f} MJ)".format( - self.derived["We"], self.derived["Wi_thr"], self.derived["Wthr"] - ) - ) - print( - "\tNe = {0:.1f}*10^20, Ni_thr = {1:.1f}*10^20 (N_thr = {2:.1f}*10^20)".format( - self.derived["Ne"], self.derived["Ni_thr"], self.derived["Nthr"] - ) - ) - print( - f"\ttauE = { self.derived['tauE']:.3f} s, tauP = {self.derived['tauP']:.3f} s (tauP/tauE = {self.derived['tauPotauE']:.2f})" - ) - print("Species concentration:") - print(f"\t{ImpurityText}") - print("******************************************************") - except KeyError: - print( - "\t- When printing info, not all keys found, probably because this input.gacode class came from an old MITIM version", - typeMsg="w", - ) - if reDeriveIfNotFound: - self.deriveQuantities() - self.printInfo(label=label, reDeriveIfNotFound=False) - - def export_to_table(self, table=None, name=None): - - if table is None: - table = DataTable() - - data = [name] - for var in table.variables: - if table.variables[var][1] is not None: - if table.variables[var][1].split("_")[0] == "rho": - ix = np.argmin( - np.abs( - self.profiles["rho(-)"] - - float(table.variables[var][1].split("_")[1]) - ) - ) - elif table.variables[var][1].split("_")[0] == "psi": - ix = np.argmin( - np.abs( - self.derived["psi_pol_n"] - - float(table.variables[var][1].split("_")[1]) - ) - ) - elif table.variables[var][1].split("_")[0] == "pos": - ix = int(table.variables[var][1].split("_")[1]) - vari = self.__dict__[table.variables[var][2]][table.variables[var][0]][ - ix - ] - else: - vari = self.__dict__[table.variables[var][2]][table.variables[var][0]] - - data.append(f"{vari*table.variables[var][4]:{table.variables[var][3]}}") - - table.data.append(data) - print(f"\t* Exported {name} to table") - - return table - - def makeAllThermalIonsHaveSameTemp(self, refIon=0): - SpecRef = self.Species[refIon]["N"] - tiRef = self.profiles["ti(keV)"][:, refIon] - - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm" and sp != refIon: - print( - f"\t\t\t- Temperature forcing {self.Species[sp]['N']} --> {SpecRef}" - ) - self.profiles["ti(keV)"][:, sp] = tiRef - - def scaleAllThermalDensities(self, scaleFactor=1.0): - scaleFactor_ions = scaleFactor - - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - print( - f"\t\t\t- Scaling density of {self.Species[sp]['N']} by an average factor of {np.mean(scaleFactor_ions):.3f}" - ) - ni_orig = self.profiles["ni(10^19/m^3)"][:, sp] - self.profiles["ni(10^19/m^3)"][:, sp] = scaleFactor_ions * ni_orig - - def toNumpyArrays(self): - self.profiles.update({key: tensor.cpu().detach().cpu().numpy() for key, tensor in self.profiles.items() if isinstance(tensor, torch.Tensor)}) - self.derived.update({key: tensor.cpu().detach().cpu().numpy() for key, tensor in self.derived.items() if isinstance(tensor, torch.Tensor)}) - - def writeCurrentStatus(self, file=None, limitedNames=False): - print("\t- Writting input.gacode file") - - if file is None: - file = self.file - - with open(file, "w") as f: - for line in self.header: - f.write(line) - - for i in self.profiles: - if "(" not in i: - f.write(f"# {i}\n") - else: - f.write(f"# {i.split('(')[0]} | {i.split('(')[-1].split(')')[0]}\n") - - if i in self.titles_single: - if i == "name" and limitedNames: - newlist = [self.profiles[i][0]] - for k in self.profiles[i][1:]: - if k not in [ - "D", - "H", - "T", - "He4", - "he4", - "C", - "O", - "Ar", - "W", - ]: - newlist.append("C") - else: - newlist.append(k) - print( - f"\n\n!! Correcting ion names from {self.profiles[i]} to {newlist} to avoid TGYRO radiation error (to solve in future?)\n\n", - typeMsg="w", - ) - listWrite = newlist - else: - listWrite = self.profiles[i] - - if IOtools.isfloat(listWrite[0]): - listWrite = [f"{i:.7e}".rjust(14) for i in listWrite] - f.write(f"{''.join(listWrite)}\n") - else: - f.write(f"{' '.join(listWrite)}\n") - - else: - if len(self.profiles[i].shape) == 1: - for j, val in enumerate(self.profiles[i]): - pos = f"{j + 1}".rjust(3) - valt = f"{round(val,99):.7e}".rjust(15) - f.write(f"{pos}{valt}\n") - else: - for j, val in enumerate(self.profiles[i]): - pos = f"{j + 1}".rjust(3) - txt = "".join([f"{k:.7e}".rjust(15) for k in val]) - f.write(f"{pos}{txt}\n") - - print(f"\t\t~ File {IOtools.clipstr(file)} written") - - # Update file - self.file = file - - def writeMiminalKinetic(self, file): - setProfs = [ - "rho(-)", - "polflux(Wb/radian)", - "q(-)", - "te(keV)", - "ti(keV)", - "ne(10^19/m^3)", - ] - - with open(file, "w") as f: - for i in setProfs: - if "(" not in i: - f.write(f"# {i}\n") - else: - f.write(f"# {i.split('(')[0]} | {i.split('(')[-1].split(')')[0]}\n") - - if len(self.profiles[i].shape) > 1: - p = self.profiles[i][:, 0] - else: - p = self.profiles[i] - - for j, val in enumerate(p): - pos = f"{j + 1}".rjust(3) - valt = f"{val:.7e}".rjust(15) - f.write(f"{pos}{valt}\n") - - def changeResolution( - self, n=100, rho_new=None, interpolation_function=MATHtools.extrapolateCubicSpline - ): - rho = copy.deepcopy(self.profiles["rho(-)"]) - - if rho_new is None: - n = int(n) - rho_new = np.linspace(rho[0], rho[-1], n) - else: - rho_new = np.unique(np.sort(rho_new)) - n = len(rho_new) - - self.profiles["nexp"] = [str(n)] - - pro = self.profiles - for i in pro: - if i not in self.titles_single: - if len(pro[i].shape) == 1: - pro[i] = interpolation_function(rho_new, rho, pro[i]) - else: - prof = [] - for j in range(pro[i].shape[1]): - pp = interpolation_function(rho_new, rho, pro[i][:, j]) - prof.append(pp) - prof = np.array(prof) - - pro[i] = np.transpose(prof) - - self.produce_shape_lists() - - self.deriveQuantities() - - print( - f"\t\t- Resolution of profiles changed to {n} points with function {interpolation_function}" - ) - - def DTplasma(self): - self.Dion, self.Tion = None, None - try: - self.Dion = np.where(self.profiles["name"] == "D")[0][0] - except: - pass - try: - self.Tion = np.where(self.profiles["name"] == "T")[0][0] - except: - pass - - if self.Dion is not None and self.Tion is not None: - self.DTplasmaBool = True - else: - self.DTplasmaBool = False - if self.Dion is not None: - self.Mion = self.Dion # Main - elif self.Tion is not None: - self.Mion = self.Tion # Main - else: - self.Mion = ( - 0 # If no D or T, assume that the main ion is the first and only - ) - - self.ion_list_main = [] - if self.DTplasmaBool: - self.ion_list_main = [self.Dion+1, self.Tion+1] - else: - self.ion_list_main = [self.Mion+1] - - self.ion_list_impurities = [i+1 for i in range(len(self.Species)) if i+1 not in self.ion_list_main] - - def remove(self, ions_list): - # First order them - ions_list.sort() - print( - "\t\t- Removing ions in positions (of ions order, no zero): ", - ions_list, - typeMsg="i", - ) - - ions_list = [i - 1 for i in ions_list] - - fail = False - - var_changes = ["name", "type", "mass", "z"] - for i in var_changes: - try: - self.profiles[i] = np.delete(self.profiles[i], ions_list) - except: - print( - f"\t\t\t* Ions {[k+1 for k in ions_list]} could not be removed", - typeMsg="w", - ) - fail = True - break - - if not fail: - var_changes = ["ni(10^19/m^3)", "ti(keV)"] - for i in var_changes: - self.profiles[i] = np.delete(self.profiles[i], ions_list, axis=1) - - if not fail: - # Ensure we extract the scalar value from the array - self.profiles["nion"] = np.array( - [str(int(self.profiles["nion"][0]) - len(ions_list))] - ) - - self.readSpecies() - self.deriveQuantities(rederiveGeometry=False) - - print("\t\t\t- Set of ions in updated profiles: ", self.profiles["name"]) - - def lumpSpecies( - self, ions_list=[2, 3], allthermal=False, forcename=None, force_integer=False, force_mass=None - ): - """ - if (D,Z1,Z2), lumping Z1 and Z2 requires ions_list = [2,3] - - if force_integer, the Zeff won't be kept exactly - """ - - # All thermal except first - if allthermal: - ions_list = [] - for i in range(len(self.Species) - 1): - if self.Species[i + 1]["S"] == "therm": - ions_list.append(i + 2) - lab = "therm" - else: - lab = "therm" - - print( - "\t\t- Lumping ions in positions (of ions order, no zero): ", - ions_list, - typeMsg="i", - ) - - if forcename is None: - forcename = "LUMPED" - - # Contributions to dilution and to Zeff - fZ1 = np.zeros(self.derived["fi"].shape[0]) - fZ2 = np.zeros(self.derived["fi"].shape[0]) - for i in ions_list: - fZ1 += self.Species[i - 1]["Z"] * self.derived["fi"][:, i - 1] - fZ2 += self.Species[i - 1]["Z"] ** 2 * self.derived["fi"][:, i - 1] - - Zr = fZ2 / fZ1 - Zr_vol = ( - CALCtools.integrateFS( - Zr, self.profiles["rmin(m)"], self.derived["volp_miller"] - )[-1] - / self.derived["volume"] - ) - - print(f'\t\t\t* Original plasma had Zeff_vol={self.derived["Zeff_vol"]:.2f}, QN error={self.derived["QN_Error"]:.4f}') - - # New specie parameters - if force_integer: - Z = round(Zr_vol) - print(f"\t\t\t* Lumped Z forced to be an integer ({Zr_vol}->{Z}), so plasma may not be quasineutral or fulfill original Zeff",typeMsg="w",) - else: - Z = Zr_vol - - A = Z * 2 if force_mass is None else force_mass - nZ = fZ1 / Z * self.profiles["ne(10^19/m^3)"] - - print(f"\t\t\t* New lumped impurity has Z={Z:.2f}, A={A:.2f} (calculated as 2*Z)") - - # Insert cases - self.profiles["nion"] = np.array([f"{int(self.profiles['nion'][0])+1}"]) - self.profiles["name"] = np.append(self.profiles["name"], forcename) - self.profiles["mass"] = np.append(self.profiles["mass"], A) - self.profiles["z"] = np.append(self.profiles["z"], Z) - self.profiles["type"] = np.append(self.profiles["type"], f"[{lab}]") - self.profiles["ni(10^19/m^3)"] = np.append( - self.profiles["ni(10^19/m^3)"], np.transpose(np.atleast_2d(nZ)), axis=1 - ) - self.profiles["ti(keV)"] = np.append( - self.profiles["ti(keV)"], - np.transpose(np.atleast_2d(self.profiles["ti(keV)"][:, 0])), - axis=1, - ) - - self.readSpecies() - self.deriveQuantities(rederiveGeometry=False) - - # Remove species - self.remove(ions_list) - - # Contributions to dilution and to Zeff - print( - f'\t\t\t* New plasma has Zeff_vol={self.derived["Zeff_vol"]:.2f}, QN error={self.derived["QN_Error"]:.4f}' - ) - - def lumpImpurities(self): - - self.lumpSpecies(ions_list=self.ion_list_impurities) - - def lumpDT(self): - - if self.DTplasmaBool: - self.lumpSpecies(ions_list=self.ion_list_main, forcename="DT", force_mass=2.5) - else: - print('\t\t- No DT plasma, so no lumping of main ions') - - self.moveSpecie(pos=len(self.Species), pos_new=1) - - def changeZeff(self, Zeff, ion_pos=2, quasineutral_ions=None, enforceSameGradients=False): - """ - if (D,Z1,Z2), pos 1 -> change Z1 - """ - - if quasineutral_ions is None: - if self.DTplasmaBool: - quasineutral_ions = [self.Dion, self.Tion] - else: - quasineutral_ions = [self.Mion] - - print(f'\t\t- Changing Zeff (from {self.derived["Zeff_vol"]:.3f} to {Zeff=:.3f}) by changing content of ion in position {ion_pos} {self.Species[ion_pos]["N"],self.Species[ion_pos]["Z"]}, quasineutralized by ions {quasineutral_ions}',typeMsg="i",) - - # Plasma needs to be in quasineutrality to start with - self.enforceQuasineutrality() - - # ------------------------------------------------------ - # Contributions to equations - # ------------------------------------------------------ - Zq = np.zeros(self.derived["fi"].shape[0]) - Zq2 = np.zeros(self.derived["fi"].shape[0]) - fZj = np.zeros(self.derived["fi"].shape[0]) - fZj2 = np.zeros(self.derived["fi"].shape[0]) - for i in range(len(self.Species)): - if i in quasineutral_ions: - Zq += self.Species[i]["Z"] - Zq2 += self.Species[i]["Z"] ** 2 - elif i != ion_pos: - fZj += self.Species[i]["Z"] * self.derived["fi"][:, i] - fZj2 += self.Species[i]["Z"] ** 2 * self.derived["fi"][:, i] - else: - Zk = self.Species[i]["Z"] - - # ------------------------------------------------------ - # Find free parameters (fk and fq) - # ------------------------------------------------------ - - fk = ( Zeff - (1-fZj)*Zq2/Zq - fZj2 ) / ( Zk**2 - Zk*Zq2/Zq) - fq = ( 1 - fZj - fk*Zk ) / Zq - - if (fq<0).any(): - raise ValueError(f"Zeff cannot be reduced by changing ion #{ion_pos} because it would require negative densities for quasineutral ions") - - # ------------------------------------------------------ - # Insert - # ------------------------------------------------------ - - fi_orig = self.derived["fi"][:, ion_pos] - - self.profiles["ni(10^19/m^3)"][:, ion_pos] = fk * self.profiles["ne(10^19/m^3)"] - for i in quasineutral_ions: - self.profiles["ni(10^19/m^3)"][:, i] = fq * self.profiles["ne(10^19/m^3)"] - - self.readSpecies() - - self.deriveQuantities(rederiveGeometry=False) - - if enforceSameGradients: - self.scaleAllThermalDensities() - self.deriveQuantities(rederiveGeometry=False) - - print(f'\t\t\t* Dilution changed from {fi_orig.mean():.2e} (vol avg) to { self.derived["fi"][:, ion_pos].mean():.2e} to achieve Zeff={self.derived["Zeff_vol"]:.3f} (fDT={self.derived["fmain"]:.3f}) [quasineutrality error = {self.derived["QN_Error"]:.1e}]') - - def moveSpecie(self, pos=2, pos_new=1): - """ - if (D,Z1,Z2), pos 1 pos_new 2-> (Z1,D,Z2) - """ - - if pos_new > pos: - pos, pos_new = pos_new, pos - - position_to_moveFROM_in_profiles = pos - 1 - position_to_moveTO_in_profiles = pos_new - 1 - - print(f'\t\t- Moving ion in position (of ions order, no zero) {pos} ({self.profiles["name"][position_to_moveFROM_in_profiles]}) to {pos_new}',typeMsg="i",) - - self.profiles["nion"] = np.array([f"{int(self.profiles['nion'][0])+1}"]) - - for ikey in ["name", "mass", "z", "type", "ni(10^19/m^3)", "ti(keV)"]: - if len(self.profiles[ikey].shape) > 1: - axis = 1 - newly = self.profiles[ikey][:, position_to_moveFROM_in_profiles] - else: - axis = 0 - newly = self.profiles[ikey][position_to_moveFROM_in_profiles] - self.profiles[ikey] = np.insert( - self.profiles[ikey], position_to_moveTO_in_profiles, newly, axis=axis - ) - - self.readSpecies() - self.deriveQuantities(rederiveGeometry=False) - - if position_to_moveTO_in_profiles > position_to_moveFROM_in_profiles: - self.remove([position_to_moveFROM_in_profiles + 1]) - else: - self.remove([position_to_moveFROM_in_profiles + 2]) - - def addSpecie(self, Z=5.0, mass=10.0, fi_vol=0.1, forcename=None): - print( - f"\t\t- Creating new specie with Z={Z}, mass={mass}, fi_vol={fi_vol}", - typeMsg="i", - ) - - if forcename is None: - forcename = "LUMPED" - - lab = "therm" - nZ = fi_vol * self.profiles["ne(10^19/m^3)"] - - self.profiles["nion"] = np.array([f"{int(self.profiles['nion'][0])+1}"]) - self.profiles["name"] = np.append(self.profiles["name"], forcename) - self.profiles["mass"] = np.append(self.profiles["mass"], mass) - self.profiles["z"] = np.append(self.profiles["z"], Z) - self.profiles["type"] = np.append(self.profiles["type"], f"[{lab}]") - self.profiles["ni(10^19/m^3)"] = np.append( - self.profiles["ni(10^19/m^3)"], np.transpose(np.atleast_2d(nZ)), axis=1 - ) - self.profiles["ti(keV)"] = np.append( - self.profiles["ti(keV)"], - np.transpose(np.atleast_2d(self.profiles["ti(keV)"][:, 0])), - axis=1, - ) - if "vtor(m/s)" in self.profiles: - self.profiles["vtor(m/s)"] = np.append( - self.profiles["vtor(m/s)"], - np.transpose(np.atleast_2d(self.profiles["vtor(m/s)"][:, 0])), - axis=1, - ) - - self.readSpecies() - self.deriveQuantities(rederiveGeometry=False) - - def correct(self, options={}, write=False, new_file=None): - """ - if name= T D LUMPED, and I want to eliminate D, removeIons = [2] - """ - - recompute_ptot = options.get("recompute_ptot", True) # Only done by default - removeIons = options.get("removeIons", []) - removeFast = options.get("removeFast", False) - quasineutrality = options.get("quasineutrality", False) - sameDensityGradients = options.get("sameDensityGradients", False) - groupQIONE = options.get("groupQIONE", False) - ensurePostiveGamma = options.get("ensurePostiveGamma", False) - ensureMachNumber = options.get("ensureMachNumber", None) - FastIsThermal = options.get("FastIsThermal", False) - - print("\t- Custom correction of input.gacode file has been requested") - - # ---------------------------------------------------------------------- - # Correct - # ---------------------------------------------------------------------- - - # Remove desired ions - if len(removeIons) > 0: - self.remove(removeIons) - - # Remove fast - if removeFast: - ions_fast = [] - for sp in range(len(self.Species)): - if self.Species[sp]["S"] != "therm": - ions_fast.append(sp + 1) - if len(ions_fast) > 0: - print( - f"\t\t- Detected fast ions in positions {ions_fast}, removing them..." - ) - self.remove(ions_fast) - # Fast as thermal - elif FastIsThermal: - self.make_fast_ions_thermal() - - # Correct LUMPED - for i in range(len(self.profiles["name"])): - if self.profiles["name"][i] in ["LUMPED", "None"]: - name = ionName( - int(self.profiles["z"][i]), int(self.profiles["mass"][i]) - ) - if name is not None: - print( - f'\t\t- Ion in position #{i+1} was named LUMPED with Z={self.profiles["z"][i]}, now it is renamed to {name}', - typeMsg="i", - ) - self.profiles["name"][i] = name - else: - print( - f'\t\t- Ion in position #{i+1} was named LUMPED with Z={self.profiles["z"][i]}, but I could not find what element it is, so doing nothing', - typeMsg="w", - ) - - # Correct qione - if groupQIONE and (np.abs(self.profiles["qione(MW/m^3)"].sum()) > 1e-14): - print('\t\t- Inserting "qione" into "qrfe"', typeMsg="i") - self.profiles["qrfe(MW/m^3)"] += self.profiles["qione(MW/m^3)"] - self.profiles["qione(MW/m^3)"] = self.profiles["qione(MW/m^3)"] * 0.0 - - # Make all thermal ions have the same gradient as the electron density, by keeping volume average constant - if sameDensityGradients: - self.enforce_same_density_gradients() - - # Enforce quasineutrality - if quasineutrality: - self.enforceQuasineutrality() - - print(f"\t\t\t* Quasineutrality error = {self.derived['QN_Error']:.1e}") - - # Recompute ptot - if recompute_ptot: - self.deriveQuantities(rederiveGeometry=False) - self.selfconsistentPTOT() - - # If I don't trust the negative particle flux in the core that comes from TRANSP... - if ensurePostiveGamma: - print("\t\t- Making particle flux always positive", typeMsg="i") - self.profiles[self.varqpar] = self.profiles[self.varqpar].clip(0) - self.profiles[self.varqpar2] = self.profiles[self.varqpar2].clip(0) - - # Mach - if ensureMachNumber is not None: - self.introduceRotationProfile(Mach_LF=ensureMachNumber) - - # ---------------------------------------------------------------------- - # Re-derive - # ---------------------------------------------------------------------- - - self.deriveQuantities(rederiveGeometry=False) - - # ---------------------------------------------------------------------- - # Write - # ---------------------------------------------------------------------- - if write: - self.writeCurrentStatus(file=new_file) - self.printInfo() - - def enforce_same_density_gradients(self): - txt = "" - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - self.profiles["ni(10^19/m^3)"][:, sp] = self.derived["fi_vol"][sp] * self.profiles["ne(10^19/m^3)"] - txt += f"{self.Species[sp]['N']} " - print(f"\t\t- Making all thermal ions ({txt}) have the same a/Ln as electrons (making them an exact flat fraction)",typeMsg="i",) - self.deriveQuantities(rederiveGeometry=False) - - def make_fast_ions_thermal(self): - modified_num = 0 - for i in range(len(self.Species)): - if self.Species[i]["S"] != "therm": - print( - f'\t\t- Specie {i} ({self.profiles["name"][i]}) was fast, but now it is considered thermal' - ) - self.Species[i]["S"] = "therm" - self.profiles["type"][i] = "[therm]" - self.profiles["ti(keV)"][:, i] = self.profiles["ti(keV)"][:, 0] - modified_num += 1 - if modified_num > 0: - print("\t- Making fast species as if they were thermal (to keep dilution effect and Qi-sum of fluxes)",typeMsg="w") - - def selfconsistentPTOT(self): - print(f"\t\t* Recomputing ptot and inserting it as ptot(Pa), changed from p0 = {self.profiles['ptot(Pa)'][0] * 1e-3:.1f} to {self.derived['ptot_manual'][0]*1e+3:.1f} kPa",typeMsg="i") - self.profiles["ptot(Pa)"] = self.derived["ptot_manual"] * 1e6 - - def enforceQuasineutrality(self): - print(f"\t\t- Enforcing quasineutrality (error = {self.derived['QN_Error']:.1e})",typeMsg="i",) - - # What's the lack of quasineutrality? - ni = self.profiles["ne(10^19/m^3)"] * 0.0 - for sp in range(len(self.Species)): - ni += self.profiles["ni(10^19/m^3)"][:, sp] * self.profiles["z"][sp] - ne_missing = self.profiles["ne(10^19/m^3)"] - ni - - # What ion to modify? - if self.DTplasmaBool: - print("\t\t\t* Enforcing quasineutrality by modifying D and T equally") - prev_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Dion]) - self.profiles["ni(10^19/m^3)"][:, self.Dion] += ne_missing / 2 - self.profiles["ni(10^19/m^3)"][:, self.Tion] += ne_missing / 2 - new_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Dion]) - else: - print( - f"\t\t\t* Enforcing quasineutrality by modifying main ion (position #{self.Mion})" - ) - prev_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Mion]) - self.profiles["ni(10^19/m^3)"][:, self.Mion] += ne_missing - new_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Mion]) - - print( - f"\t\t\t\t- Changed on-axis density from n0 = {prev_on_axis:.2f} to {new_on_axis:.2f} ({100*(new_on_axis-prev_on_axis)/prev_on_axis:.1f}%)" - ) - - self.deriveQuantities(rederiveGeometry=False) - - def introduceRotationProfile(self, Mach_LF=1.0, new_file=None): - print(f"\t- Enforcing Mach Number in LF of {Mach_LF}") - self.deriveQuantities() - Vtor_LF = PLASMAtools.constructVtorFromMach( - Mach_LF, self.profiles["ti(keV)"][:, 0], self.derived["mbg"] - ) # m/s - - self.profiles["w0(rad/s)"] = Vtor_LF / (self.derived["R_LF"]) # rad/s - - self.deriveQuantities() - - if new_file is not None: - self.writeCurrentStatus(file=new_file) - - def plot( - self, - axs1=None, - axs2=None, - axs3=None, - axs4=None, - axsFlows=None, - axs6=None, - axsImps=None, - color="b", - legYN=True, - extralab="", - fn=None, - fnlab="", - lsFlows="-", - legFlows=True, - showtexts=True, - lastRhoGradients=0.89, - ): - if axs1 is None: - if fn is None: - from mitim_tools.misc_tools.GUItools import FigureNotebook - - self.fn = FigureNotebook("PROFILES Notebook", geometry="1600x1000") - - fig, fig2, fig3, fig4, fig5, fig6, fig7 = add_figures(self.fn, fnlab=fnlab) - - grid = plt.GridSpec(3, 3, hspace=0.3, wspace=0.3) - axs1 = [ - fig.add_subplot(grid[0, 0]), - fig.add_subplot(grid[1, 0]), - fig.add_subplot(grid[2, 0]), - fig.add_subplot(grid[0, 1]), - fig.add_subplot(grid[1, 1]), - fig.add_subplot(grid[2, 1]), - fig.add_subplot(grid[0, 2]), - fig.add_subplot(grid[1, 2]), - fig.add_subplot(grid[2, 2]), - ] - - - grid = plt.GridSpec(3, 2, hspace=0.3, wspace=0.3) - axs2 = [ - fig2.add_subplot(grid[0, 0]), - fig2.add_subplot(grid[0, 1]), - fig2.add_subplot(grid[1, 0]), - fig2.add_subplot(grid[1, 1]), - fig2.add_subplot(grid[2, 0]), - fig2.add_subplot(grid[2, 1]), - ] - - - grid = plt.GridSpec(3, 4, hspace=0.3, wspace=0.5) - ax00c = fig3.add_subplot(grid[0, 0]) - axs3 = [ - ax00c, - fig3.add_subplot(grid[1, 0], sharex=ax00c), - fig3.add_subplot(grid[2, 0], sharex=ax00c), - fig3.add_subplot(grid[0, 1], sharex=ax00c), - fig3.add_subplot(grid[1, 1], sharex=ax00c), - fig3.add_subplot(grid[2, 1], sharex=ax00c), - fig3.add_subplot(grid[0, 2], sharex=ax00c), - fig3.add_subplot(grid[1, 2], sharex=ax00c), - fig3.add_subplot(grid[2, 2], sharex=ax00c), - fig3.add_subplot(grid[0, 3], sharex=ax00c), - fig3.add_subplot(grid[1, 3], sharex=ax00c), - fig3.add_subplot(grid[2, 3], sharex=ax00c), - ] - - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axs4 = [ - fig4.add_subplot(grid[0, 0]), - fig4.add_subplot(grid[1, 0]), - fig4.add_subplot(grid[0, 1]), - fig4.add_subplot(grid[1, 1]), - fig4.add_subplot(grid[0, 2]), - fig4.add_subplot(grid[1, 2]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - - axsFlows = [ - fig5.add_subplot(grid[0, 0]), - fig5.add_subplot(grid[1, 0]), - fig5.add_subplot(grid[0, 1]), - fig5.add_subplot(grid[0, 2]), - fig5.add_subplot(grid[1, 1]), - fig5.add_subplot(grid[1, 2]), - ] - - - grid = plt.GridSpec(2, 4, hspace=0.3, wspace=0.3) - axs6 = [ - fig6.add_subplot(grid[0, 0]), - fig6.add_subplot(grid[:, 1]), - fig6.add_subplot(grid[0, 2]), - fig6.add_subplot(grid[1, 0]), - fig6.add_subplot(grid[1, 2]), - fig6.add_subplot(grid[0, 3]), - fig6.add_subplot(grid[1, 3]), - ] - - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsImps = [ - fig7.add_subplot(grid[0, 0]), - fig7.add_subplot(grid[0, 1]), - fig7.add_subplot(grid[0, 2]), - fig7.add_subplot(grid[1, 0]), - fig7.add_subplot(grid[1, 1]), - fig7.add_subplot(grid[1, 2]), - ] - - [ax00, ax10, ax20, ax01, ax11, ax21, ax02, ax12, ax22] = axs1 - [ax00b, ax01b, ax10b, ax11b, ax20b, ax21b] = axs2 - [ - ax00c, - ax10c, - ax20c, - ax01c, - ax11c, - ax21c, - ax02c, - ax12c, - ax22c, - ax03c, - ax13c, - ax23c, - ] = axs3 - - lw = 1 - fs = 6 - rho = self.profiles["rho(-)"] - - lines = ["-", "--", "-.", ":", "-", "--", "-."] - - self.plot_temps(ax=ax00, leg=legYN, col=color, lw=lw, fs=fs, extralab=extralab) - self.plot_dens(ax=ax01, leg=legYN, col=color, lw=lw, fs=fs, extralab=extralab) - - ax = ax10 - cont = 0 - for i in range(len(self.Species)): - if self.Species[i]["S"] == "therm": - var = self.profiles["ti(keV)"][:, i] - ax.plot( - rho, - var, - lw=lw, - ls=lines[cont], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - cont += 1 - varL = "Thermal $T_i$ (keV)" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0); - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax20 - cont = 0 - for i in range(len(self.Species)): - if self.Species[i]["S"] == "fast": - var = self.profiles["ti(keV)"][:, i] - ax.plot( - rho, - var, - lw=lw, - ls=lines[cont], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - cont += 1 - varL = "Fast $T_i$ (keV)" - ax.plot( - rho, - self.profiles["ti(keV)"][:, 0], - lw=0.5, - ls="-", - alpha=0.5, - c=color, - label=extralab + "$T_{i,1}$", - ) - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0); - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax11 - cont = 0 - for i in range(len(self.Species)): - if self.Species[i]["S"] == "therm": - var = self.profiles["ni(10^19/m^3)"][:, i] * 1e-1 - ax.plot( - rho, - var, - lw=lw, - ls=lines[cont], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - cont += 1 - varL = "Thermal $n_i$ ($10^{20}/m^3$)" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0); - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax21 - cont = 0 - for i in range(len(self.Species)): - if self.Species[i]["S"] == "fast": - var = self.profiles["ni(10^19/m^3)"][:, i] * 1e-1 * 1e5 - ax.plot( - rho, - var, - lw=lw, - ls=lines[cont], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - cont += 1 - varL = "Fast $n_i$ ($10^{15}/m^3$)" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0); - ax.set_ylabel(varL) - if legYN and cont>0: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax02 - var = self.profiles["w0(rad/s)"] - ax.plot(rho, var, lw=lw, ls="-", c=color) - varL = "$\\omega_{0}$ (rad/s)" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax12 - var = self.profiles["ptot(Pa)"] * 1e-6 - ax.plot(rho, var, lw=lw, ls="-", c=color, label=extralab + "ptot") - if "ptot_manual" in self.derived: - ax.plot( - rho, - self.derived["ptot_manual"], - lw=lw, - ls="--", - c=color, - label=extralab + "check", - ) - # ax.plot(rho,np.abs(var-self.derived['ptot_manual']),lw=lw,ls='-.',c=color,label=extralab+'diff') - - ax.plot( - rho, - self.derived["pthr_manual"], - lw=lw, - ls="-.", - c=color, - label=extralab + "check, thrm", - ) - - - varL = "$p$ (MPa)" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - # ax.set_ylim(bottom=0) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax00b - varL = "$MW/m^3$" - cont = 0 - var = -self.profiles["qei(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "i->e", c=color) - cont += 1 - if "qrfe(MW/m^3)" in self.profiles: - var = self.profiles["qrfe(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "rf", c=color) - cont += 1 - if "qfuse(MW/m^3)" in self.profiles: - var = self.profiles["qfuse(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "fus", c=color) - cont += 1 - if "qbeame(MW/m^3)" in self.profiles: - var = self.profiles["qbeame(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "beam", c=color) - cont += 1 - if "qione(MW/m^3)" in self.profiles: - var = self.profiles["qione(MW/m^3)"] - ax.plot( - rho, var, lw=lw / 2, ls=lines[cont], label=extralab + "extra", c=color - ) - cont += 1 - if "qohme(MW/m^3)" in self.profiles: - var = self.profiles["qohme(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "ohmic", c=color) - cont += 1 - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - ax.set_title("Electron Power Density") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax01b - if "varqmom" not in self.__dict__: - self.varqmom = "qmom(N/m^2)" - self.profiles[self.varqmom] = self.profiles["rho(-)"] * 0.0 - - ax.plot(rho, self.profiles[self.varqmom], lw=lw, ls="-", c=color) - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("$N/m^2$, $J/m^3$") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - ax.set_title("Momentum Source Density") - - ax = ax21b - ax.plot( - rho, self.derived["qe_MWm2"], lw=lw, ls="-", label=extralab + "qe", c=color - ) - ax.plot( - rho, self.derived["qi_MWm2"], lw=lw, ls="--", label=extralab + "qi", c=color - ) - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("Heat Flux ($MW/m^2$)") - if legYN: - ax.legend(loc="lower left", fontsize=fs) - ax.set_title("Flux per unit area (gacode: P/V')") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax21b.twinx() - ax.plot( - rho, - self.derived["ge_10E20m2"], - lw=lw, - ls="-.", - label=extralab + "$\\Gamma_e$", - c=color, - ) - ax.set_ylabel("Particle Flux ($10^{20}/m^2/s$)") - if legYN: - ax.legend(loc="lower right", fontsize=fs) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax20b - varL = "$Q_{rad}$ ($MW/m^3$)" - if "qbrem(MW/m^3)" in self.profiles: - var = self.profiles["qbrem(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls="-", label=extralab + "brem", c=color) - if "qline(MW/m^3)" in self.profiles: - var = self.profiles["qline(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls="--", label=extralab + "line", c=color) - if "qsync(MW/m^3)" in self.profiles: - var = self.profiles["qsync(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=":", label=extralab + "sync", c=color) - - var = self.derived["qrad"] - ax.plot(rho, var, lw=lw * 1.5, ls="-", label=extralab + "Total", c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0); - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - ax.set_title("Radiation Contributions") - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax10b - varL = "$MW/m^3$" - cont = 0 - var = self.profiles["qei(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "e->i", c=color) - cont += 1 - if "qrfi(MW/m^3)" in self.profiles: - var = self.profiles["qrfi(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "rf", c=color) - cont += 1 - if "qfusi(MW/m^3)" in self.profiles: - var = self.profiles["qfusi(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "fus", c=color) - cont += 1 - if "qbeami(MW/m^3)" in self.profiles: - var = self.profiles["qbeami(MW/m^3)"] - ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "beam", c=color) - cont += 1 - if "qioni(MW/m^3)" in self.profiles: - var = self.profiles["qioni(MW/m^3)"] - ax.plot( - rho, var, lw=lw / 2, ls=lines[cont], label=extralab + "extra", c=color - ) - cont += 1 - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - ax.set_title("Ion Power Density") - ax.axhline(y=0, lw=0.5, ls="--", c="k") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - """ - Note that in prgen_map_plasmastate, that variable: - expro_qpar_beam(i) = plst_sn_trans(i-1)/dvol - - Note that in prgen_read_plasmastate, that variable: - ! Particle source - err = nf90_inq_varid(ncid,trim('sn_trans'),varid) - err = nf90_get_var(ncid,varid,plst_sn_trans(1:nx-1)) - plst_sn_trans(nx) = 0.0 - - Note that in the plasmastate file, the variable "sn_trans": - - long_name: particle transport (loss) - units: #/sec - component: PLASMA - section: STATE_PROFILES - specification: R|units=#/sec|step*dV sn_trans(~nrho,0:nspec_th) - - So, this means that expro_qpar_beam is in units of #/sec/m^3, meaning that - it is a particle flux DENSITY. It therefore requires volume integral and - divide by surface to produce a flux. - - The units of this qpar_beam column is NOT MW/m^3. In the gacode source codes - they also say that those units are wrong. - - """ - - ax = ax11b - cont = 0 - var = self.profiles[self.varqpar] * 1e-20 - ax.plot(rho, var, lw=lw, ls=lines[0], c=color, label=extralab + "beam") - var = self.profiles[self.varqpar2] * 1e-20 - ax.plot(rho, var, lw=lw, ls=lines[1], c=color, label=extralab + "wall") - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0); - ax.axhline(y=0, lw=0.5, ls="--", c="k") - ax.set_ylabel("$10^{20}m^{-3}s^{-1}$") - ax.set_title("Particle Source Density") - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax00c - varL = "cos Shape Params" - yl = 0 - cont = 0 - - for i, s in enumerate(self.shape_cos): - if s is not None: - valmax = np.abs(s).max() - if valmax > 1e-10: - lab = f"c{i}" - ax.plot(rho, s, lw=lw, ls=lines[cont], label=lab, c=color) - cont += 1 - - yl = np.max([yl, valmax]) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - - - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - if legYN: - ax.legend(loc="best", fontsize=fs) - - ax = ax01c - varL = "sin Shape Params" - cont = 0 - for i, s in enumerate(self.shape_sin): - if s is not None: - valmax = np.abs(s).max() - if valmax > 1e-10: - lab = f"s{i}" - ax.plot(rho, s, lw=lw, ls=lines[cont], label=lab, c=color) - cont += 1 - - yl = np.max([yl, valmax]) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax02c - var = self.profiles["q(-)"] - ax.plot(rho, var, lw=lw, ls="-", c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0) - ax.set_ylabel("q") - - ax.axhline(y=1, ls="--", c="k", lw=1) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0.0) - - - - ax = ax12c - var = self.profiles["polflux(Wb/radian)"] - ax.plot(rho, var, lw=lw, ls="-", c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("Poloidal $\\psi$ ($Wb/rad$)") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax10c - - var = self.profiles["rho(-)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - - ax.set_xlabel("$\\rho$") - # ax.set_ylim(bottom=0) - ax.set_ylabel("$\\rho$") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax11c - - var = self.profiles["rmin(m)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylim(bottom=0) - ax.set_ylabel("$r_{min}$") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax20c - - var = self.profiles["rmaj(m)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("$R_{maj}$") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax21c - - var = self.profiles["zmag(m)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - yl = np.max([0.1, np.max(np.abs(var))]) - ax.set_ylim([-yl, yl]) - ax.set_ylabel("$Z_{maj}$") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax22c - - var = self.profiles["kappa(-)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("$\\kappa$") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=1) - - ax = ax03c - - var = self.profiles["delta(-)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("$\\delta$") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = ax13c - - var = self.profiles["zeta(-)"] - ax.plot(rho, var, "-", lw=lw, c=color) - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel("zeta") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = ax23c - - var = self.profiles["johm(MA/m^2)"] - ax.plot(rho, var, "-", lw=lw, c=color, label=extralab + "$J_{OH}$") - var = self.profiles["jbs(MA/m^2)"] - ax.plot(rho, var, "--", lw=lw, c=color, label=extralab + "$J_{BS,par}$") - var = self.profiles["jbstor(MA/m^2)"] - ax.plot(rho, var, "-.", lw=lw, c=color, label=extralab + "$J_{BS,tor}$") - - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylim(bottom=0) - ax.set_ylabel("J ($MA/m^2$)") - if legYN: - ax.legend(loc="best", prop={"size": 7}) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - # Derived - self.plotGradients( - axs4, color=color, lw=lw, lastRho=lastRhoGradients, label=extralab - ) - - # Others - ax = axs6[0] - ax.plot(self.profiles["rho(-)"], self.derived["dw0dr"] * 1e-5, c=color, lw=lw) - ax.set_ylabel("$-d\\omega_0/dr$ (krad/s/cm)") - ax.set_xlabel("$\\rho$") - ax.set_xlim([0, 1]) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - ax.axhline(y=0, lw=1.0, c="k", ls="--") - - ax = axs6[2] - ax.plot(self.profiles["rho(-)"], self.derived["q_fus"], c=color, lw=lw) - ax.set_ylabel("$q_{fus}$ ($MW/m^3$)") - ax.set_xlabel("$\\rho$") - ax.set_xlim([0, 1]) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axs6[3] - ax.plot(self.profiles["rho(-)"], self.derived["q_fus_MWmiller"], c=color, lw=lw) - ax.set_ylabel("$P_{fus}$ ($MW$)") - ax.set_xlim([0, 1]) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axs6[4] - ax.plot(self.profiles["rho(-)"], self.derived["tite"], c=color, lw=lw) - ax.set_ylabel("$T_i/T_e$") - ax.set_xlabel("$\\rho$") - ax.set_xlim([0, 1]) - ax.axhline(y=1, ls="--", lw=1.0, c="k") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = axs6[5] - if "MachNum" in self.derived: - ax.plot(self.profiles["rho(-)"], self.derived["MachNum"], c=color, lw=lw) - ax.set_ylabel("Mach Number") - ax.set_xlabel("$\\rho$") - ax.set_xlim([0, 1]) - ax.axhline(y=0, ls="--", c="k", lw=0.5) - ax.axhline(y=1, ls="--", c="k", lw=0.5) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - ax = axs6[6] - safe_division = np.divide( - self.derived["qi_MWm2"], - self.derived["qe_MWm2"], - where=self.derived["qe_MWm2"] != 0, - out=np.full_like(self.derived["qi_MWm2"], np.nan), - ) - ax.plot( - self.profiles["rho(-)"], - safe_division, - c=color, - lw=lw, - label=extralab + "$Q_i/Q_e$", - ) - safe_division = np.divide( - self.derived["qi_aux_MWmiller"], - self.derived["qe_aux_MWmiller"], - where=self.derived["qe_aux_MWmiller"] != 0, - out=np.full_like(self.derived["qi_aux_MWmiller"], np.nan), - ) - ax.plot( - self.profiles["rho(-)"], - safe_division, - c=color, - lw=lw, - ls="--", - label=extralab + "$P_i/P_e$", - ) - ax.set_ylabel("Power ratios") - ax.set_xlabel("$\\rho$") - ax.set_xlim([0, 1]) - ax.axhline(y=1.0, ls="--", c="k", lw=1.0) - GRAPHICStools.addDenseAxis(ax) - # GRAPHICStools.autoscale_y(ax,bottomy=0) - ax.set_ylim(bottom=0) - ax.legend(loc="best", fontsize=fs) - - # Final - if axsFlows is not None: - self.plotBalance( - axs=axsFlows, ls=lsFlows, leg=legFlows, showtexts=showtexts - ) - - # Geometry - ax = axs6[1] - self.plotGeometry(ax=ax, color=color) - - ax.set_xlabel("R (m)") - ax.set_ylabel("Z (m)") - GRAPHICStools.addDenseAxis(ax) - - # Impurities - ax = axsImps[0] - for i in range(len(self.Species)): - var = ( - self.profiles["ni(10^19/m^3)"][:, i] - / self.profiles["ni(10^19/m^3)"][0, i] - ) - ax.plot( - rho, - var, - lw=lw, - ls=lines[i], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - varL = "$n_i/n_{i,0}$" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axsImps[1] - for i in range(len(self.Species)): - var = self.derived["fi"][:, i] - ax.plot( - rho, - var, - lw=lw, - ls=lines[i], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - varL = "$f_i$" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - ax.set_ylim([0, 1]) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axsImps[2] - - lastRho = 0.9 - - ix = np.argmin(np.abs(self.profiles["rho(-)"] - lastRho)) + 1 - ax.plot( - rho[:ix], self.derived["aLne"][:ix], lw=lw * 3, ls="-", c=color, label="e" - ) - for i in range(len(self.Species)): - var = self.derived["aLni"][:, i] - ax.plot( - rho[:ix], - var[:ix], - lw=lw, - ls=lines[i], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - varL = "$a/L_{ni}$" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - if legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axsImps[5] - - ax = axsImps[3] - ax.plot(self.profiles["rho(-)"], self.derived["Zeff"], c=color, lw=lw) - ax.set_ylabel("$Z_{eff}$") - ax.set_xlabel("$\\rho$") - ax.set_xlim([0, 1]) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axsImps[4] - cont = 0 - if "vtor(m/s)" in self.profiles: - for i in range(len(self.Species)): - try: # REMOVE FOR FUTURE - var = self.profiles["vtor(m/s)"][:, i] * 1e-3 - ax.plot( - rho, - var, - lw=lw, - ls=lines[cont], - c=color, - label=extralab + f"{i + 1} = {self.profiles['name'][i]}", - ) - cont += 1 - except: - break - varL = "$V_{tor}$ (km/s)" - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - if "vtor(m/s)" in self.profiles and legYN: - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - def plotGradients( - self, - axs4, - color="b", - lw=1.0, - label="", - ls="-o", - lastRho=0.89, - ms=2, - alpha=1.0, - useRoa=False, - RhoLocationsPlot=None, - plotImpurity=None, - plotRotation=False, - autoscale=True, - ): - - if RhoLocationsPlot is None: RhoLocationsPlot=[] - - if axs4 is None: - plt.ion() - fig, axs = plt.subplots( - ncols=3 + int(plotImpurity is not None) + int(plotRotation), - nrows=2, - figsize=(12, 5), - ) - - axs4 = [] - for i in range(axs.shape[-1]): - axs4.append(axs[0, i]) - axs4.append(axs[1, i]) - - ix = np.argmin(np.abs(self.profiles["rho(-)"] - lastRho)) + 1 - - xcoord = self.profiles["rho(-)"] if (not useRoa) else self.derived["roa"] - labelx = "$\\rho$" if (not useRoa) else "$r/a$" - - ax = axs4[0] - ax.plot( - xcoord, - self.profiles["te(keV)"], - ls, - c=color, - lw=lw, - label=label, - markersize=ms, - alpha=alpha, - ) - ax = axs4[2] - ax.plot( - xcoord, - self.profiles["ti(keV)"][:, 0], - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - ax = axs4[4] - ax.plot( - xcoord, - self.profiles["ne(10^19/m^3)"] * 1e-1, - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - - if "derived" in self.__dict__: - ax = axs4[1] - ax.plot( - xcoord[:ix], - self.derived["aLTe"][:ix], - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - ax = axs4[3] - ax.plot( - xcoord[:ix], - self.derived["aLTi"][:ix, 0], - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - ax = axs4[5] - ax.plot( - xcoord[:ix], - self.derived["aLne"][:ix], - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - - for ax in axs4: - ax.set_xlim([0, 1]) - - ax = axs4[0] - ax.set_ylabel("$T_e$ (keV)") - ax.set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - ax.legend(loc="best", fontsize=7) - ax = axs4[2] - ax.set_ylabel("$T_i$ (keV)") - ax.set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - ax = axs4[4] - ax.set_ylabel("$n_e$ ($10^{20}m^{-3}$)") - ax.set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axs4[1] - ax.set_ylabel("$a/L_{Te}$") - ax.set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - ax = axs4[3] - ax.set_ylabel("$a/L_{Ti}$") - ax.set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - ax = axs4[5] - ax.set_ylabel("$a/L_{ne}$") - ax.axhline(y=0, ls="--", lw=0.5, c="k") - ax.set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - - cont = 0 - if plotImpurity is not None: - axs4[6 + cont].plot( - xcoord, - self.profiles["ni(10^19/m^3)"][:, plotImpurity] * 1e-1, - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - axs4[6 + cont].set_ylabel("$n_Z$ ($10^{20}m^{-3}$)") - axs4[6].set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - if "derived" in self.__dict__: - axs4[7 + cont].plot( - xcoord[:ix], - self.derived["aLni"][:ix, plotImpurity], - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - axs4[7 + cont].set_ylabel("$a/L_{nZ}$") - axs4[7 + cont].axhline(y=0, ls="--", lw=0.5, c="k") - axs4[7 + cont].set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - cont += 2 - - if plotRotation: - axs4[6 + cont].plot( - xcoord, - self.profiles["w0(rad/s)"] * 1e-3, - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - axs4[6 + cont].set_ylabel("$w_0$ (krad/s)") - axs4[6 + cont].set_xlabel(labelx) - if "derived" in self.__dict__: - axs4[7 + cont].plot( - xcoord[:ix], - self.derived["dw0dr"][:ix] * 1e-5, - ls, - c=color, - lw=lw, - markersize=ms, - alpha=alpha, - ) - axs4[7 + cont].set_ylabel("-$d\\omega_0/dr$ (krad/s/cm)") - axs4[7 + cont].axhline(y=0, ls="--", lw=0.5, c="k") - axs4[7 + cont].set_xlabel(labelx) - if autoscale: - GRAPHICStools.autoscale_y(ax, bottomy=0) - cont += 2 - - for x0 in RhoLocationsPlot: - ix = np.argmin(np.abs(self.profiles["rho(-)"] - x0)) - for ax in axs4: - ax.axvline(x=xcoord[ix], ls="--", lw=0.5, c=color) - - for i in range(len(axs4)): - ax = axs4[i] - GRAPHICStools.addDenseAxis(ax) - - def plotBalance(self, axs=None, limits=None, ls="-", leg=True, showtexts=True): - if axs is None: - fig1 = plt.figure() - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - - axs = [ - fig1.add_subplot(grid[0, 0]), - fig1.add_subplot(grid[1, 0]), - fig1.add_subplot(grid[0, 1]), - fig1.add_subplot(grid[0, 2]), - fig1.add_subplot(grid[1, 1]), - fig1.add_subplot(grid[1, 2]), - ] - - # Profiles - - ax = axs[0] - axT = axs[1] - roa = self.profiles["rmin(m)"] / self.profiles["rmin(m)"][-1] - Te = self.profiles["te(keV)"] - ne = self.profiles["ne(10^19/m^3)"] * 1e-1 - ni = self.profiles["ni(10^19/m^3)"] * 1e-1 - niT = np.sum(ni, axis=1) - Ti = self.profiles["ti(keV)"][:, 0] - ax.plot(roa, Te, lw=2, c="r", label="$T_e$" if leg else "", ls=ls) - ax.plot(roa, Ti, lw=2, c="b", label="$T_i$" if leg else "", ls=ls) - axT.plot(roa, ne, lw=2, c="m", label="$n_e$" if leg else "", ls=ls) - axT.plot(roa, niT, lw=2, c="c", label="$\\sum n_i$" if leg else "", ls=ls) - if limits is not None: - [roa_first, roa_last] = limits - ax.plot(roa_last, np.interp(roa_last, roa, Te), "s", c="r", markersize=3) - ax.plot(roa_first, np.interp(roa_first, roa, Te), "s", c="r", markersize=3) - ax.plot(roa_last, np.interp(roa_last, roa, Ti), "s", c="b", markersize=3) - ax.plot(roa_first, np.interp(roa_first, roa, Ti), "s", c="b", markersize=3) - axT.plot(roa_last, np.interp(roa_last, roa, ne), "s", c="m", markersize=3) - axT.plot(roa_first, np.interp(roa_first, roa, ne), "s", c="m", markersize=3) - - ax.set_xlabel("r/a") - ax.set_xlim([0, 1]) - axT.set_xlabel("r/a") - axT.set_xlim([0, 1]) - ax.set_ylabel("$T$ (keV)") - ax.set_ylim(bottom=0) - axT.set_ylabel("$n$ ($10^{20}m^{-3}$)") - axT.set_ylim(bottom=0) - # axT.set_ylim([0,np.max(ne)*1.5]) - ax.legend() - axT.legend() - ax.set_title("Final Temperature profiles") - axT.set_title("Final Density profiles") - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - GRAPHICStools.addDenseAxis(axT) - GRAPHICStools.autoscale_y(axT, bottomy=0) - - if showtexts: - if self.derived["Q"] > 0.005: - ax.text( - 0.05, - 0.05, - f"Pfus = {self.derived['Pfus']:.1f}MW, Q = {self.derived['Q']:.2f}", - color="k", - fontsize=10, - fontweight="normal", - horizontalalignment="left", - verticalalignment="bottom", - rotation=0, - transform=ax.transAxes, - ) - - axT.text( - 0.05, - 0.4, - "ne_20 = {0:.1f} (fG = {1:.2f}), Zeff = {2:.1f}".format( - self.derived["ne_vol20"], - self.derived["fG"], - self.derived["Zeff_vol"], - ), - color="k", - fontsize=10, - fontweight="normal", - horizontalalignment="left", - verticalalignment="bottom", - rotation=0, - transform=axT.transAxes, - ) - - # F - ax = axs[2] - P = ( - self.derived["qe_fus_MWmiller"] - + self.derived["qe_aux_MWmiller"] - + -self.derived["qe_rad_MWmiller"] - + -self.derived["qe_exc_MWmiller"] - ) - - ax.plot( - roa, - -self.derived["qe_MWmiller"], - c="g", - lw=2, - label="$P_{e}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qe_fus_MWmiller"], - c="r", - lw=2, - label="$P_{fus,e}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qe_aux_MWmiller"], - c="b", - lw=2, - label="$P_{aux,e}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - -self.derived["qe_exc_MWmiller"], - c="m", - lw=2, - label="$P_{exc,e}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - -self.derived["qe_rad_MWmiller"], - c="c", - lw=2, - label="$P_{rad,e}$" if leg else "", - ls=ls, - ) - ax.plot(roa, -P, lw=1, c="y", label="sum" if leg else "", ls=ls) - - # Pe = self.profiles['te(keV)']*1E3*e_J*self.profiles['ne(10^19/m^3)']*1E-1*1E20 *1E-6 - # ax.plot(roa,Pe,ls='-',lw=3,alpha=0.1,c='k',label='$W_e$ (MJ/m^3)') - - ax.plot( - roa, - -self.derived["ce_MWmiller"], - c="k", - lw=1, - label="($P_{conv,e}$)" if leg else "", - ) - - ax.set_xlabel("r/a") - ax.set_xlim([0, 1]) - ax.set_ylabel("$P$ (MW)") - # ax.set_ylim(bottom=0) - ax.set_title("Electron Thermal Flows") - ax.axhline(y=0.0, lw=0.5, ls="--", c="k") - GRAPHICStools.addLegendApart( - ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" - ) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - ax = axs[3] - P = ( - self.derived["qi_fus_MWmiller"] - + self.derived["qi_aux_MWmiller"] - + self.derived["qe_exc_MWmiller"] - ) - - ax.plot( - roa, - -self.derived["qi_MWmiller"], - c="g", - lw=2, - label="$P_{i}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qi_fus_MWmiller"], - c="r", - lw=2, - label="$P_{fus,i}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qi_aux_MWmiller"], - c="b", - lw=2, - label="$P_{aux,i}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qe_exc_MWmiller"], - c="m", - lw=2, - label="$P_{exc,i}$" if leg else "", - ls=ls, - ) - ax.plot(roa, -P, lw=1, c="y", label="sum" if leg else "", ls=ls) - - # Pi = self.profiles['ti(keV)'][:,0]*1E3*e_J*self.profiles['ni(10^19/m^3)'][:,0]*1E-1*1E20 *1E-6 - # ax.plot(roa,Pi,ls='-',lw=3,alpha=0.1,c='k',label='$W_$ (MJ/m^3)') - - ax.set_xlabel("r/a") - ax.set_xlim([0, 1]) - ax.set_ylabel("$P$ (MW)") - # ax.set_ylim(bottom=0) - ax.set_title("Ion Thermal Flows") - ax.axhline(y=0.0, lw=0.5, ls="--", c="k") - GRAPHICStools.addLegendApart( - ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" - ) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - # F - ax = axs[4] - - ax.plot( - roa, - self.derived["ge_10E20miller"], - c="g", - lw=2, - label="$\\Gamma_{e}$" if leg else "", - ls=ls, - ) - # ax.plot(roa,self.profiles['ne(10^19/m^3)']*1E-1,lw=3,alpha=0.1,c='k',label='$n_e$ ($10^{20}/m^3$)' if leg else '',ls=ls) - - ax.set_xlabel("r/a") - ax.set_xlim([0, 1]) - ax.set_ylabel("$\\Gamma$ ($10^{20}/s$)") - ax.set_title("Particle Flows") - ax.axhline(y=0.0, lw=0.5, ls="--", c="k") - GRAPHICStools.addLegendApart( - ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" - ) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - # TOTAL - ax = axs[5] - P = ( - self.derived["qOhm_MWmiller"] - + self.derived["qRF_MWmiller"] - + self.derived["qFus_MWmiller"] - + -self.derived["qe_rad_MWmiller"] - + self.derived["qz_MWmiller"] - + self.derived["qBEAM_MWmiller"] - ) - - ax.plot( - roa, - -self.derived["q_MWmiller"], - c="g", - lw=2, - label="$P$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qOhm_MWmiller"], - c="k", - lw=2, - label="$P_{Oh}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qRF_MWmiller"], - c="b", - lw=2, - label="$P_{RF}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qBEAM_MWmiller"], - c="pink", - lw=2, - label="$P_{NBI}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qFus_MWmiller"], - c="r", - lw=2, - label="$P_{fus}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - -self.derived["qe_rad_MWmiller"], - c="c", - lw=2, - label="$P_{rad}$" if leg else "", - ls=ls, - ) - ax.plot( - roa, - self.derived["qz_MWmiller"], - c="orange", - lw=1, - label="$P_{ionz.}$" if leg else "", - ls=ls, - ) - - # P = Pe+Pi - # ax.plot(roa,P,ls='-',lw=3,alpha=0.1,c='k',label='$W$ (MJ)') - - ax.plot(roa, -P, lw=1, c="y", label="sum" if leg else "", ls=ls) - ax.set_xlabel("r/a") - ax.set_xlim([0, 1]) - ax.set_ylabel("$P$ (MW)") - # ax.set_ylim(bottom=0) - ax.set_title("Total Thermal Flows") - - GRAPHICStools.addLegendApart( - ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" - ) - - ax.axhline(y=0.0, lw=0.5, ls="--", c="k") - # GRAPHICStools.drawLineWithTxt(ax,0.0,label='',orientation='vertical',color='k',lw=1,ls='--',alpha=1.0,fontsize=10,fromtop=0.85,fontweight='normal', - # verticalalignment='bottom',horizontalalignment='left',separation=0) - - GRAPHICStools.addDenseAxis(ax) - GRAPHICStools.autoscale_y(ax, bottomy=0) - - def plot_temps(self, ax=None, leg=False, col="b", lw=2, extralab="", fs=10): - if ax is None: - fig, ax = plt.subplots() - - rho = self.profiles["rho(-)"] - - var = self.profiles["te(keV)"] - varL = "$T_e$ , $T_i$ (keV)" - if leg: - lab = extralab + "e" - else: - lab = "" - ax.plot(rho, var, lw=lw, ls="-", label=lab, c=col) - var = self.profiles["ti(keV)"][:, 0] - if leg: - lab = extralab + "i" - else: - lab = "" - ax.plot(rho, var, lw=lw, ls="--", label=lab, c=col) - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - GRAPHICStools.autoscale_y(ax, bottomy=0) - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - - def plot_dens(self, ax=None, leg=False, col="b", lw=2, extralab="", fs=10): - if ax is None: - fig, ax = plt.subplots() - - rho = self.profiles["rho(-)"] - - var = self.profiles["ne(10^19/m^3)"] * 1e-1 - varL = "$n_e$ ($10^{20}/m^3$)" - if leg: - lab = extralab + "e" - else: - lab = "" - ax.plot(rho, var, lw=lw, ls="-", label=lab, c=col) - ax.set_xlim([0, 1]) - ax.set_xlabel("$\\rho$") - ax.set_ylabel(varL) - GRAPHICStools.autoscale_y(ax, bottomy=0) - ax.legend(loc="best", fontsize=fs) - - GRAPHICStools.addDenseAxis(ax) - - def plotGeometry(self, ax=None, surfaces_rho=np.linspace(0, 1, 11), color="b", label = '', lw=1.0, lw1=2.0): - if ("R_surface" in self.derived) and (self.derived["R_surface"] is not None): - if ax is None: - plt.ion() - fig, ax = plt.subplots() - provided = False - else: - provided = True - - for rho in surfaces_rho: - ir = np.argmin(np.abs(self.profiles["rho(-)"] - rho)) - - ax.plot( - self.derived["R_surface"][ir, :], - self.derived["Z_surface"][ir, :], - "-", - lw=lw if rho<1.0 else lw1, - c=color, - ) - - ax.axhline(y=0, ls="--", lw=0.2, c="k") - ax.plot( - [self.profiles["rmaj(m)"][0]], - [self.profiles["zmag(m)"][0]], - "o", - markersize=2, - c=color, - label = label - ) - - if not provided: - ax.set_xlabel("R (m)") - ax.set_ylabel("Z (m)") - ax.set_title("Surfaces @ rho=" + str(surfaces_rho), fontsize=8) - ax.set_aspect("equal") - else: - print("\t- Cannot plot flux surface geometry", typeMsg="w") - - def plotPeaking( - self, ax, c="b", marker="*", label="", debugPlot=False, printVals=False - ): - nu_effCGYRO = self.derived["nu_eff"] * 2 / self.derived["Zeff_vol"] - ne_peaking = self.derived["ne_peaking0.2"] - ax.scatter([nu_effCGYRO], [ne_peaking], s=400, c=c, marker=marker, label=label) - - if printVals: - print(f"\t- nu_eff = {nu_effCGYRO}, ne_peaking = {ne_peaking}") - - # Extra - r = self.profiles["rmin(m)"] - volp = self.derived["volp_miller"] - ix = np.argmin(np.abs(self.profiles["rho(-)"] - 0.9)) - - if debugPlot: - fig, axq = plt.subplots() - - ne = self.profiles["ne(10^19/m^3)"] - axq.plot(self.profiles["rho(-)"], ne, color="m") - ne_vol = ( - CALCtools.integrateFS(ne * 0.1, r, volp)[-1] / self.derived["volume"] - ) - axq.axhline(y=ne_vol * 10, color="m") - - ne = copy.deepcopy(self.profiles["ne(10^19/m^3)"]) - ne[ix:] = (0,) * len(ne[ix:]) - ne_vol = CALCtools.integrateFS(ne * 0.1, r, volp)[-1] / self.derived["volume"] - ne_peaking0 = ( - ne[np.argmin(np.abs(self.derived["rho_pol"] - 0.2))] * 0.1 / ne_vol - ) - - if debugPlot: - axq.plot(self.profiles["rho(-)"], ne, color="r") - axq.axhline(y=ne_vol * 10, color="r") - - ne = copy.deepcopy(self.profiles["ne(10^19/m^3)"]) - ne[ix:] = (ne[ix],) * len(ne[ix:]) - ne_vol = CALCtools.integrateFS(ne * 0.1, r, volp)[-1] / self.derived["volume"] - ne_peaking1 = ( - ne[np.argmin(np.abs(self.derived["rho_pol"] - 0.2))] * 0.1 / ne_vol - ) - - ne_peaking0 = ne_peaking - - ax.errorbar( - [nu_effCGYRO], - [ne_peaking], - yerr=[[ne_peaking - ne_peaking1], [ne_peaking0 - ne_peaking]], - marker=marker, - c=c, - markersize=16, - capsize=2.0, - fmt="s", - elinewidth=1.0, - capthick=1.0, - ) - - if debugPlot: - axq.plot(self.profiles["rho(-)"], ne, color="b") - axq.axhline(y=ne_vol * 10, color="b") - plt.show() - - # print(f'{ne_peaking0}-{ne_peaking}-{ne_peaking1}') - - return nu_effCGYRO, ne_peaking - - def plotRelevant(self, axs = None, color = 'b', label ='', lw = 1, ms = 1): - - if axs is None: - fig = plt.figure() - axs = fig.subplot_mosaic( - """ - ABCDH - AEFGI - """ - ) - axs = [axs['A'], axs['B'], axs['C'], axs['D'], axs['E'], axs['F'], axs['G'], axs['H'], axs['I']] - - # ---------------------------------- - # Equilibria - # ---------------------------------- - - ax = axs[0] - rho = np.linspace(0, 1, 21) - - self.plotGeometry(ax=ax, surfaces_rho=rho, label=label, color=color, lw=lw, lw1=lw*3) - - ax.set_xlabel("R (m)") - ax.set_ylabel("Z (m)") - ax.set_aspect("equal") - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Equilibria") - - # ---------------------------------- - # Kinetic Profiles - # ---------------------------------- - - # T profiles - ax = axs[1] - - ax.plot(self.profiles['rho(-)'], self.profiles['te(keV)'], '-o', markersize=ms, lw = lw, label=label+', e', color=color) - ax.plot(self.profiles['rho(-)'], self.profiles['ti(keV)'][:,0], '--*', markersize=ms, lw = lw, label=label+', i', color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$T$ (keV)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Temperatures") - - # ne profiles - ax = axs[2] - - ax.plot(self.profiles['rho(-)'], self.profiles['ne(10^19/m^3)']*1E-1, '-o', markersize=ms, lw = lw, label=label, color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$n_e$ ($10^{20}m^{-3}$)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Electron Density") - - # ---------------------------------- - # Pressure - # ---------------------------------- - - ax = axs[3] - - ax.plot(self.profiles['rho(-)'], self.derived['ptot_manual'], '-o', markersize=ms, lw = lw, label=label, color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$p_{kin}$ (MPa)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Total Pressure") - - # ---------------------------------- - # Current - # ---------------------------------- - - # q-profile - ax = axs[4] - - ax.plot(self.profiles['rho(-)'], self.profiles['q(-)'], '-o', markersize=ms, lw = lw, label=label, color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$q$") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Safety Factor") - - # ---------------------------------- - # Powers - # ---------------------------------- - - # RF - ax = axs[5] - - ax.plot(self.profiles['rho(-)'], self.profiles['qrfe(MW/m^3)'], '-o', markersize=ms, lw = lw, label=label+', e', color=color) - ax.plot(self.profiles['rho(-)'], self.profiles['qrfi(MW/m^3)'], '--*', markersize=ms, lw = lw, label=label+', i', color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$P_{ich}$ (MW/m$^3$)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("ICH Power Deposition") - - # Ohmic - ax = axs[6] - - ax.plot(self.profiles['rho(-)'], self.profiles['qohme(MW/m^3)'], '-o', markersize=ms, lw = lw, label=label, color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$P_{oh}$ (MW/m$^3$)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Ohmic Power Deposition") - - # ---------------------------------- - # Heat fluxes - # ---------------------------------- - - ax = axs[7] - - ax.plot(self.profiles['rho(-)'], self.derived['qe_MWm2'], '-o', markersize=ms, lw = lw, label=label+', e', color=color) - ax.plot(self.profiles['rho(-)'], self.derived['qi_MWm2'], '--*', markersize=ms, lw = lw, label=label+', i', color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$Q$ ($MW/m^2$)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Energy Fluxes") - - # ---------------------------------- - # Dynamic targets - # ---------------------------------- - - ax = axs[8] - - ax.plot(self.profiles['rho(-)'], self.derived['qrad'], '-o', markersize=ms, lw = lw, label=label+', rad', color=color) - ax.plot(self.profiles['rho(-)'], self.profiles['qei(MW/m^3)'], '--*', markersize=ms, lw = lw, label=label+', exc', color=color) - if 'qfuse(MW/m^3)' in self.profiles: - ax.plot(self.profiles['rho(-)'], self.profiles['qfuse(MW/m^3)']+self.profiles['qfusi(MW/m^3)'], '-.s', markersize=ms, lw = lw, label=label+', fus', color=color) - - ax.set_xlabel("$\\rho_N$") - ax.set_ylabel("$Q$ ($MW/m^2$)") - #ax.set_ylim(bottom = 0) - ax.set_xlim(0,1) - ax.legend(prop={'size':8}) - GRAPHICStools.addDenseAxis(ax) - ax.set_title("Dynamic Targets") - - - def csv(self, file="input.gacode.xlsx"): - dictExcel = IOtools.OrderedDict() - - for ikey in self.profiles: - print(ikey) - if len(self.profiles[ikey].shape) == 1: - dictExcel[ikey] = self.profiles[ikey] - else: - dictExcel[ikey] = self.profiles[ikey][:, 0] - - IOtools.writeExcel_fromDict(dictExcel, file, fromRow=1) - - def parabolizePlasma(self): - PORTALSinteraction.parabolizePlasma(self) - - def changeRFpower(self, PrfMW=25.0): - PORTALSinteraction.changeRFpower(self, PrfMW=PrfMW) - - def imposeBCtemps(self, TkeV=0.5, rho=0.9, typeEdge="linear", Tesep=0.1, Tisep=0.2): - PORTALSinteraction.imposeBCtemps( - self, TkeV=TkeV, rho=rho, typeEdge=typeEdge, Tesep=Tesep, Tisep=Tisep - ) - - def imposeBCdens(self, n20=2.0, rho=0.9, typeEdge="linear", nedge20=0.5): - PORTALSinteraction.imposeBCdens( - self, n20=n20, rho=rho, typeEdge=typeEdge, nedge20=nedge20 - ) - - def addSawtoothEffectOnOhmic(self, PohTot, mixRadius=None, plotYN=False): - """ - This will implement a flat profile inside the mixRadius to reduce the ohmic power by certain amount - """ - - if mixRadius is None: - mixRadius = self.profiles["rho(-)"][np.where(self.profiles["q(-)"] > 1)][0] - - print(f"\t- Original Ohmic power: {self.derived['qOhm_MWmiller'][-1]:.2f}MW") - Ohmic_old = copy.deepcopy(self.profiles["qohme(MW/m^3)"]) - - dvol = self.derived["volp_miller"] * np.append( - [0], np.diff(self.profiles["rmin(m)"]) - ) - - print( - f"\t- Will implement sawtooth ohmic power correction inside rho={mixRadius}" - ) - Psaw = CDFtools.profilePower( - self.profiles["rho(-)"], - dvol, - PohTot - self.derived["qOhm_MWmiller"][-1], - mixRadius, - ) - self.profiles["qohme(MW/m^3)"] += Psaw - self.deriveQuantities() - - print(f"\t- New Ohmic power: {self.derived['qOhm_MWmiller'][-1]:.2f}MW") - Ohmic_new = copy.deepcopy(self.profiles["qohme(MW/m^3)"]) - - if plotYN: - fig, ax = plt.subplots() - ax.plot(self.profiles["rho(-)"], Ohmic_old, "r", lw=2) - ax.plot(self.profiles["rho(-)"], Ohmic_new, "g", lw=2) - plt.show() - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Code conversions - # --------------------------------------------------------------------------------------------------------------------------------------- - - def to_tglf(self, rhos=[0.5], TGLFsettings=1): - - # <> Function to interpolate a curve <> - from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function - - inputsTGLF = {} - for rho in rhos: - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Define interpolator at this rho - # --------------------------------------------------------------------------------------------------------------------------------------- - - def interpolator(y): - return interpolation_function(rho, self.profiles['rho(-)'],y).item() - - TGLFinput, TGLFoptions, label = GACODEdefaults.addTGLFcontrol(TGLFsettings) - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Controls come from options - # --------------------------------------------------------------------------------------------------------------------------------------- - - controls = TGLFoptions - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Species come from profiles - # --------------------------------------------------------------------------------------------------------------------------------------- - - #mass_ref = self.derived["mi_ref"] - # input.gacode uses the deuterium mass as reference already (https://github.com/gafusion/gacode/issues/398), so this should be 2.0 - mass_ref = 2.0 - - mass_e = 0.000272445 * mass_ref - - species = { - 1: { - 'ZS': -1.0, - 'MASS': mass_e/mass_ref, - 'RLNS': interpolator(self.derived['aLne']), - 'RLTS': interpolator(self.derived['aLTe']), - 'TAUS': 1.0, - 'AS': 1.0, - 'VPAR': interpolator(self.derived['vpar']), - 'VPAR_SHEAR': interpolator(self.derived['vpar_shear']), - 'VNS_SHEAR': 0.0, - 'VTS_SHEAR': 0.0}, - } - - for i in range(len(self.Species)): - species[i+2] = { - 'ZS': self.Species[i]['Z'], - 'MASS': self.Species[i]['A']/mass_ref, - 'RLNS': interpolator(self.derived['aLni'][:,i]), - 'RLTS': interpolator(self.derived['aLTi'][:,0] if self.Species[i]['S'] == 'therm' else self.derived["aLTi"][:,i]), - 'TAUS': interpolator(self.derived["tite_all"][:,i]), - 'AS': interpolator(self.derived['fi'][:,i]), - 'VPAR': interpolator(self.derived['vpar']), - 'VPAR_SHEAR': interpolator(self.derived['vpar_shear']), - 'VNS_SHEAR': 0.0, - 'VTS_SHEAR': 0.0 - } - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Plasma comes from profiles - # --------------------------------------------------------------------------------------------------------------------------------------- - - plasma = { - 'NS': len(species)+1, - 'SIGN_BT': -1.0, - 'SIGN_IT': -1.0, - 'VEXB': 0.0, - 'VEXB_SHEAR': interpolator(self.derived['vexb_shear']), - 'BETAE': interpolator(self.derived['betae']), - 'XNUE': interpolator(self.derived['xnue']), - 'ZEFF': interpolator(self.derived['Zeff']), - 'DEBYE': interpolator(self.derived['debye']), - } - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Geometry comes from profiles - # --------------------------------------------------------------------------------------------------------------------------------------- - - parameters = { - 'RMIN_LOC': self.derived['roa'], - 'RMAJ_LOC': self.derived['Rmajoa'], - 'ZMAJ_LOC': self.derived["Zmagoa"], - 'DRMINDX_LOC': self.derived['drmin/dr'], - 'DRMAJDX_LOC': self.derived['dRmaj/dr'], - 'DZMAJDX_LOC': self.derived['dZmaj/dr'], - 'Q_LOC': self.profiles["q(-)"], - 'KAPPA_LOC': self.profiles["kappa(-)"], - 'S_KAPPA_LOC': self.derived['s_kappa'], - 'DELTA_LOC': self.profiles["delta(-)"], - 'S_DELTA_LOC': self.derived['s_delta'], - 'ZETA_LOC': self.profiles["zeta(-)"], - 'S_ZETA_LOC': self.derived['s_zeta'], - 'P_PRIME_LOC': self.derived['pprime'], - 'Q_PRIME_LOC': self.derived['s_q'], - } - - geom = {} - for k in parameters: - par = torch.nan_to_num(torch.from_numpy(parameters[k]) if type(parameters[k]) is np.ndarray else parameters[k], nan=0.0, posinf=1E10, neginf=-1E10) - geom[k] = interpolator(par) - - geom['BETA_LOC'] = 0.0 - geom['KX0_LOC'] = 0.0 - - # --------------------------------------------------------------------------------------------------------------------------------------- - # Merging - # --------------------------------------------------------------------------------------------------------------------------------------- - - input_dict = {**controls, **plasma, **geom} - - for i in range(len(species)): - for k in species[i+1]: - input_dict[f'{k}_{i+1}'] = species[i+1][k] - - inputsTGLF[rho] = input_dict - - return inputsTGLF - - def to_transp(self, folder = '~/scratch/', shot = '12345', runid = 'P01', times = [0.0,1.0], Vsurf = 0.0): - - print("\t- Converting to TRANSP") - folder = IOtools.expandPath(folder) - folder.mkdir(parents=True, exist_ok=True) - - transp = TRANSPhelpers.transp_run(folder, shot, runid) - for time in times: - transp.populate_time.from_profiles(time,self, Vsurf = Vsurf) - - transp.write_ufiles() - - return transp - - def to_eped(self, ped_rho = 0.95): - - neped_19 = np.interp(ped_rho, self.profiles['rho(-)'], self.profiles['ne(10^19/m^3)']) - - eped_evaluation = { - 'Ip': np.abs(self.profiles['current(MA)'][0]), - 'Bt': np.abs(self.profiles['bcentr(T)'][0]), - 'R': np.abs(self.profiles['rcentr(m)'][0]), - 'a': np.abs(self.derived['a']), - 'kappa995': np.abs(self.derived['kappa995']), - 'delta995': np.abs(self.derived['delta995']), - 'neped': np.abs(neped_19), - 'betan': np.abs(self.derived['BetaN_engineering']), - 'zeff': np.abs(self.derived['Zeff_vol']), - 'tesep': np.abs(self.profiles['te(keV)'][-1])*1E3, - 'nesep_ratio': np.abs(self.profiles['ne(10^19/m^3)'][-1] / neped_19), - } - - return eped_evaluation - -class DataTable: - def __init__(self, variables=None): - - if variables is not None: - self.variables = variables - else: - - # Default for confinement mode access studies (JWH 03/2024) - self.variables = { - "Rgeo": ["rcentr(m)", "pos_0", "profiles", ".2f", 1, "m"], - "ageo": ["a", None, "derived", ".2f", 1, "m"], - "volume": ["volume", None, "derived", ".2f", 1, "m"], - "kappa @psi=0.95": ["kappa(-)", "psi_0.95", "profiles", ".2f", 1, None], - "delta @psi=0.95": ["delta(-)", "psi_0.95", "profiles", ".2f", 1, None], - "Bt": ["bcentr(T)", "pos_0", "profiles", ".1f", 1, "T"], - "Ip": ["current(MA)", "pos_0", "profiles", ".1f", 1, "MA"], - "Pin": ["qIn", None, "derived", ".1f", 1, "MW"], - "Te @rho=0.9": ["te(keV)", "rho_0.90", "profiles", ".2f", 1, "keV"], - "Ti/Te @rho=0.9": ["tite", "rho_0.90", "derived", ".2f", 1, None], - "ne @rho=0.9": [ - "ne(10^19/m^3)", - "rho_0.90", - "profiles", - ".2f", - 0.1, - "E20m-3", - ], - "ptot @rho=0.9": [ - "ptot_manual", - "rho_0.90", - "derived", - ".1f", - 1e3, - "kPa", - ], - "Zeff": ["Zeff_vol", None, "derived", ".1f", 1, None], - "fDT": ["fmain", None, "derived", ".2f", 1, None], - "H89p": ["H89", None, "derived", ".2f", 1, None], - "H98y2": ["H98", None, "derived", ".2f", 1, None], - "ne (vol avg)": ["ne_vol20", None, "derived", ".2f", 1, "E20m-3"], - "Ptop": ["ptop", None, "derived", ".1f", 1, "Pa"], - "fG": ["fG", None, "derived", ".2f", 1, None], - "Pfus": ["Pfus", None, "derived", ".1f", 1, "MW"], - "Prad": ["Prad", None, "derived", ".1f", 1, "MW"], - "Q": ["Q", None, "derived", ".2f", 1, None], - "Pnet @rho=0.9": ["qTr", "rho_0.90", "derived", ".1f", 1, "MW"], - "Qi/Qe @rho=0.9": ["QiQe", "rho_0.90", "derived", ".2f", 1, None], - } - - self.data = [] - - def export_to_csv(self, filename, title=None): - - title_data = [""] - for key in self.variables: - if self.variables[key][5] is None: - title_data.append(f"{key}") - else: - title_data.append(f"{key} ({self.variables[key][5]})") - - # Open a file with the given filename in write mode - with open(filename, mode="w", newline="") as file: - writer = csv.writer(file) - - # Write the title row first if it is provided - if title: - writer.writerow([title] + [""] * (len(self.data[0]) - 1)) - - writer.writerow(title_data) - - # Write each row in self.data to the CSV file - for row in self.data: - writer.writerow(row) - -def plotAll(profiles_list, figs=None, extralabs=None, lastRhoGradients=0.89): - if figs is not None: - figProf_1, figProf_2, figProf_3, figProf_4, figFlows, figProf_6, fig7 = figs - fn = None - else: - from mitim_tools.misc_tools.GUItools import FigureNotebook - - fn = FigureNotebook("Profiles", geometry="1800x900") - figProf_1, figProf_2, figProf_3, figProf_4, figFlows, figProf_6, fig7 = add_figures(fn) - - grid = plt.GridSpec(3, 3, hspace=0.3, wspace=0.3) - axsProf_1 = [ - figProf_1.add_subplot(grid[0, 0]), - figProf_1.add_subplot(grid[1, 0]), - figProf_1.add_subplot(grid[2, 0]), - figProf_1.add_subplot(grid[0, 1]), - figProf_1.add_subplot(grid[1, 1]), - figProf_1.add_subplot(grid[2, 1]), - figProf_1.add_subplot(grid[0, 2]), - figProf_1.add_subplot(grid[1, 2]), - figProf_1.add_subplot(grid[2, 2]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsProf_2 = [ - figProf_2.add_subplot(grid[0, 0]), - figProf_2.add_subplot(grid[0, 1]), - figProf_2.add_subplot(grid[1, 0]), - figProf_2.add_subplot(grid[1, 1]), - figProf_2.add_subplot(grid[0, 2]), - figProf_2.add_subplot(grid[1, 2]), - ] - grid = plt.GridSpec(3, 4, hspace=0.3, wspace=0.3) - ax00c = figProf_3.add_subplot(grid[0, 0]) - axsProf_3 = [ - ax00c, - figProf_3.add_subplot(grid[1, 0], sharex=ax00c), - figProf_3.add_subplot(grid[2, 0]), - figProf_3.add_subplot(grid[0, 1]), - figProf_3.add_subplot(grid[1, 1]), - figProf_3.add_subplot(grid[2, 1]), - figProf_3.add_subplot(grid[0, 2]), - figProf_3.add_subplot(grid[1, 2]), - figProf_3.add_subplot(grid[2, 2]), - figProf_3.add_subplot(grid[0, 3]), - figProf_3.add_subplot(grid[1, 3]), - figProf_3.add_subplot(grid[2, 3]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsProf_4 = [ - figProf_4.add_subplot(grid[0, 0]), - figProf_4.add_subplot(grid[1, 0]), - figProf_4.add_subplot(grid[0, 1]), - figProf_4.add_subplot(grid[1, 1]), - figProf_4.add_subplot(grid[0, 2]), - figProf_4.add_subplot(grid[1, 2]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsFlows = [ - figFlows.add_subplot(grid[0, 0]), - figFlows.add_subplot(grid[1, 0]), - figFlows.add_subplot(grid[0, 1]), - figFlows.add_subplot(grid[0, 2]), - figFlows.add_subplot(grid[1, 1]), - figFlows.add_subplot(grid[1, 2]), - ] - - grid = plt.GridSpec(2, 4, hspace=0.3, wspace=0.3) - axsProf_6 = [ - figProf_6.add_subplot(grid[0, 0]), - figProf_6.add_subplot(grid[:, 1]), - figProf_6.add_subplot(grid[0, 2]), - figProf_6.add_subplot(grid[1, 0]), - figProf_6.add_subplot(grid[1, 2]), - figProf_6.add_subplot(grid[0, 3]), - figProf_6.add_subplot(grid[1, 3]), - ] - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsImps = [ - fig7.add_subplot(grid[0, 0]), - fig7.add_subplot(grid[0, 1]), - fig7.add_subplot(grid[0, 2]), - fig7.add_subplot(grid[1, 0]), - fig7.add_subplot(grid[1, 1]), - fig7.add_subplot(grid[1, 2]), - ] - - ls = GRAPHICStools.listLS() - colors = GRAPHICStools.listColors() - for i, profiles in enumerate(profiles_list): - if extralabs is None: - extralab = f"#{i}, " - else: - extralab = f"{extralabs[i]}, " - profiles.plot( - axs1=axsProf_1, - axs2=axsProf_2, - axs3=axsProf_3, - axs4=axsProf_4, - axsFlows=axsFlows, - axs6=axsProf_6, - axsImps=axsImps, - color=colors[i], - legYN=True, - extralab=extralab, - lsFlows=ls[i], - legFlows=i == 0, - showtexts=False, - lastRhoGradients=lastRhoGradients, - ) - - return fn - - -def readTGYRO_profile_extra(file, varLabel="B_unit (T)"): - with open(file) as f: - aux = f.readlines() - - lenn = int(aux[36].split()[-1]) - - i = 38 - allVec = [] - while i < len(aux): - vec = np.array([float(j) for j in aux[i : i + lenn]]) - i += lenn - allVec.append(vec) - allVec = np.array(allVec) - - dictL = OrderedDict() - for line in aux[2:35]: - lab = line.split("(:)")[-1].split("\n")[0] - try: - dictL[lab] = int(line.split()[1]) - except: - dictL[lab] = [int(j) for j in line.split()[1].split("-")] - - for i in dictL: - if i.strip(" ") == varLabel: - val = allVec[dictL[i] - 1] - break - - return val - - -def aLT(r, p): - return ( - r[-1] - * CALCtools.produceGradient( - torch.from_numpy(r).to(torch.double), torch.from_numpy(p).to(torch.double) - ) - .cpu() - .cpu().numpy() - ) - - -def grad(r, p): - return MATHtools.deriv(torch.from_numpy(r), torch.from_numpy(p), array=False) - - -def ionName(Z, A): - # Based on Z - if Z == 2: - return "He" - elif Z == 9: - return "F" - elif Z == 6: - return "C" - elif Z == 11: - return "Na" - elif Z == 30: - return "Zn" - elif Z == 31: - return "Ga" - - # # Based on Mass (this is the correct way, since the radiation needs to be calculated with the full element) - # if A in [3,4]: return 'He' - # elif A == 18: return 'F' - # elif A == 12: return 'C' - # elif A == 22: return 'Na' - # elif A == 60: return 'Zn' - # elif A == 69: return 'Ga' - - -def gradientsMerger(p0, p_true, roa=0.46, blending=0.1): - p = copy.deepcopy(p0) - - aLTe_true = np.interp( - p.derived["roa"], p_true.derived["roa"], p_true.derived["aLTe"] - ) - aLTi_true = np.interp( - p.derived["roa"], p_true.derived["roa"], p_true.derived["aLTi"][:, 0] - ) - aLne_true = np.interp( - p.derived["roa"], p_true.derived["roa"], p_true.derived["aLne"] - ) - - ix1 = np.argmin(np.abs(p.derived["roa"] - roa + blending)) - ix2 = np.argmin(np.abs(p.derived["roa"] - roa)) - - aLT0 = aLTe_true[: ix1 + 1] - aLT2 = p.derived["aLTe"][ix2:] - aLT1 = np.interp( - p.derived["roa"][ix1 : ix2 + 1], - [p.derived["roa"][ix1], p.derived["roa"][ix2]], - [aLT0[-1], aLT2[0]], - )[1:-1] - - aLTe = np.append(np.append(aLT0, aLT1), aLT2) - Te = ( - CALCtools.integrateGradient( - torch.from_numpy(p.derived["roa"]).unsqueeze(0), - torch.Tensor(aLTe).unsqueeze(0), - p.profiles["te(keV)"][-1], - ) - .cpu() - .cpu().numpy()[0] - ) - - aLT0 = aLTi_true[: ix1 + 1] - aLT2 = p.derived["aLTi"][ix2:, 0] - aLT1 = np.interp( - p.derived["roa"][ix1 : ix2 + 1], - [p.derived["roa"][ix1], p.derived["roa"][ix2]], - [aLT0[-1], aLT2[0]], - )[1:-1] - - aLTi = np.append(np.append(aLT0, aLT1), aLT2) - Ti = ( - CALCtools.integrateGradient( - torch.from_numpy(p.derived["roa"]).unsqueeze(0), - torch.Tensor(aLTi).unsqueeze(0), - p.profiles["ti(keV)"][-1, 0], - ) - .cpu() - .cpu().numpy()[0] - ) - - aLT0 = aLne_true[: ix1 + 1] - aLT2 = p.derived["aLne"][ix2:] - aLT1 = np.interp( - p.derived["roa"][ix1 : ix2 + 1], - [p.derived["roa"][ix1], p.derived["roa"][ix2]], - [aLT0[-1], aLT2[0]], - )[1:-1] - - aLne = np.append(np.append(aLT0, aLT1), aLT2) - ne = ( - CALCtools.integrateGradient( - torch.from_numpy(p.derived["roa"]).unsqueeze(0), - torch.Tensor(aLne).unsqueeze(0), - p.profiles["ne(10^19/m^3)"][-1], - ) - .cpu() - .cpu().numpy()[0] - ) - - p.profiles["te(keV)"] = Te - p.profiles["ti(keV)"][:, 0] = Ti - p.profiles["ne(10^19/m^3)"] = ne - - p.deriveQuantities() - - return p - -def add_figures(fn, fnlab='', fnlab_pre='', tab_color=None): - - figProf_1 = fn.add_figure(label= fnlab_pre + "Profiles" + fnlab, tab_color=tab_color) - figProf_2 = fn.add_figure(label= fnlab_pre + "Powers" + fnlab, tab_color=tab_color) - figProf_3 = fn.add_figure(label= fnlab_pre + "Geometry" + fnlab, tab_color=tab_color) - figProf_4 = fn.add_figure(label= fnlab_pre + "Gradients" + fnlab, tab_color=tab_color) - figFlows = fn.add_figure(label= fnlab_pre + "Flows" + fnlab, tab_color=tab_color) - figProf_6 = fn.add_figure(label= fnlab_pre + "Other" + fnlab, tab_color=tab_color) - fig7 = fn.add_figure(label= fnlab_pre + "Impurities" + fnlab, tab_color=tab_color) - figs = [figProf_1, figProf_2, figProf_3, figProf_4, figFlows, figProf_6, fig7] + return geo_volume_prime, geo_surf, geo_fluxsurfave_grad_r, geo_fluxsurfave_bp2, geo_fluxsurfave_bt2, bt_geo0 - return figs +def xsec_area_RZ(R,Z): + # calculates the cross-sectional area of the plasma for each flux surface + xsec_area = [] + for i in range(R.shape[0]): + R0 = np.max(R[i,:]) - np.min(R[i,:]) + Z0 = np.max(Z[i,:]) - np.min(Z[i,:]) + xsec_area.append(np.trapz(R[i], Z[i])) -def impurity_location(profiles, impurity_of_interest): + xsec_area = np.array(xsec_area) - position_of_impurity = None - for i in range(len(profiles.Species)): - if profiles.Species[i]["N"] == impurity_of_interest: - if position_of_impurity is not None: - raise ValueError(f"[MITIM] Species {impurity_of_interest} found at positions {position_of_impurity} and {i}") - position_of_impurity = i - if position_of_impurity is None: - raise ValueError(f"[MITIM] Species {impurity_of_interest} not found in profiles") + return xsec_area - return position_of_impurity \ No newline at end of file diff --git a/src/mitim_tools/gacode_tools/TGLFtools.py b/src/mitim_tools/gacode_tools/TGLFtools.py index ee6c47aa..66bd75f6 100644 --- a/src/mitim_tools/gacode_tools/TGLFtools.py +++ b/src/mitim_tools/gacode_tools/TGLFtools.py @@ -4,13 +4,16 @@ import numpy as np import xarray as xr import matplotlib.pyplot as plt -from mitim_tools.gacode_tools import TGYROtools, PROFILEStools +from mitim_tools import __version__ as mitim_version +from mitim_tools import __mitimroot__ +from mitim_tools.gacode_tools import TGYROtools from mitim_tools.misc_tools import ( IOtools, GRAPHICStools, PLASMAtools, GUItools, ) +from mitim_tools.plasmastate_tools.utils import state_plotting from mitim_tools.gacode_tools.utils import ( NORMtools, GACODEinterpret, @@ -18,14 +21,15 @@ GACODEplotting, GACODErun, ) +from mitim_tools.simulation_tools import SIMtools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -mi_D = 2.01355 +from mitim_tools.misc_tools.PLASMAtools import md_u MAX_TGLF_SPECIES = 6 -class TGLF: +class TGLF(SIMtools.mitim_simulation): def __init__( self, rhos=[0.4, 0.6], # rho locations of interest @@ -34,97 +38,50 @@ def __init__( avTime=0.0, # Averaging window to extract CDF file alreadyRun=None, # Option2: Do more stuff with a class that has already been created and store ): - """ - TGLF class that manages the run and the results. - - The philosophy of this is that a single 'tglf' class will handle the tglf simulation and results - at one time slice but with possibility of several radii at once. - - It can also handle different TGLF settings, running them one by one, storing results in folders and then - grabbing them. - - Scans can also be run. At several radii at once if wanted. - - *Note* - The 'run' command does not require label. When performing a 'read', the results extracted from - the specified folder will be saved with the label indicated in 'read', in the "results" or "scans" - dictionaries. Plotting then can happen with more than one label of the same category. - - *Note* - The 'run' command uses input.tglf from the specified folder, but one can change the TGLFsettings presets, - extraOptions and multipliers. The modified inputs is not rewritten in the actual folder, it is only written - in the tmp folder on which the simulation takes place. - - *Note* - After a 'prep' command, the class can be detached from the file system, as it stores the input tglf file - to run later with different options. It also stores the Normalizations, since the runs are expected - to only change dimensionless parameteres. - - ************************************** - ***** Example use for standalone ***** - ************************************** - - # Initialize class, by specifying where the inputs to TGLF come from (TRANSP cdf) - tglf = TGLF(cdf='~/testTGLF/12345B12.CDF',time=1.45,avTime=0.1,rhos=[0.4,0.6]) - - # Prepare TGLF (this will create input.tglf in the specified folder) - cdf = tglf.prep('~/testTGLF/') - - # Run standalone TGLF (this will find the input.tglf in the previous folder, - # and then copy to this specify TGLF run, and run it there) - tglf.run(subFolderTGLF='tglf1/',TGLFsettings=1,extraOptions={'NS':3}) - # Read results - tglf.read(label='run1',folder='~/testTGLF/tglf1/') + super().__init__(rhos=rhos) - # Plot - plt.ion(); tglf.plot(labels=['run1']) + def code_call(folder, p, n = 1, additional_command="", **kwargs): + return f"tglf -e {folder} -n {n} -p {p} {additional_command}" - ********************************* - ***** Example use for scans ***** - ********************************* + def code_slurm_settings(name, minutes, total_cores_required, cores_per_code_call, type_of_submission, array_list=None, **kwargs_slurm): - # Initialize class, by specifying where the inputs to TGLF come from (TRANSP cdf) - tglf = TGLF(cdf='~/testTGLF/12345B12.CDF',time=1.45,avTime=0.1,rhos=[0.4,0.6]) - - # Prepare TGLF (this will create input.tglf in the specified folder) - cdf = tglf.prep('~/testTGLF/') - - # Run - tglf.runScan('scan1/',TGLFsettings=1,varUpDown=np.linspace(0.5,2.0,20),variable='RLTS_2') - - # Read scan - tglf.readScan(label='scan1',variable='RLTS_2') - - # Plot - plt.ion(); tglf.plotScan(labels=['scan1'],variableLabel='RLTS_2') - - **************************** - ***** Special analysis ***** - **************************** - - Following the prep phase, we can run "runAnalysis()" and select among the different options: - - Chi_inc - - D and V for trace impurity - Then, plotAnalysis() with the right option for different labels too - - **************************** - ***** Do more stuff with a class that has already been created and store - **************************** - - tglf = TGLF(alreadyRun=previousClass) - tglf.FolderGACODE = '~/testTGLF/' + slurm_settings = { + "name": name, + "minutes": minutes, + 'job_array_limit': None, # Limit to this number at most running jobs at the same time? + } - ** Modify the class as wish, and do run,read, etc ** - ** Because normalizations are stored in the prep phase, that's all ready ** - """ - print( - "\n-----------------------------------------------------------------------------------------" - ) + if type_of_submission == "slurm_standard": + + slurm_settings['ntasks'] = total_cores_required // cores_per_code_call + slurm_settings['cpuspertask'] = cores_per_code_call + + elif type_of_submission == "slurm_array": + + slurm_settings['ntasks'] = 1 + slurm_settings['cpuspertask'] = cores_per_code_call + slurm_settings['job_array'] = ",".join(array_list) + + return slurm_settings + + self.run_specifications = { + 'code': 'tglf', + 'input_file': 'input.tglf', + 'code_call': code_call, + 'code_slurm_settings': code_slurm_settings, + 'control_function': GACODEdefaults.addTGLFcontrol, + 'controls_file': 'input.tglf.controls', + 'state_converter': 'to_tglf', + 'input_class': TGLFinput, + 'complete_variation': completeVariation_TGLF, + 'default_cores': 4, # Default cores to use in the simulation + 'output_class': TGLFoutput, + } + + print("\n-----------------------------------------------------------------------------------------") print("\t\t\t TGLF class module") - print( - "-----------------------------------------------------------------------------------------\n" - ) + print("-----------------------------------------------------------------------------------------\n") if alreadyRun is not None: # For the case in which I have run TGLF somewhere else, not using to plot and modify the class @@ -132,9 +89,13 @@ def __init__( self.__dict__ = alreadyRun.__dict__ print("* Readying previously-run TGLF class", typeMsg="i") else: - self.ResultsFiles = [ - "out.tglf.run", + + self.ResultsFiles_minimal = [ "out.tglf.gbflux", + ] + + self.ResultsFiles = self.ResultsFiles_minimal + [ + "out.tglf.run", "out.tglf.eigenvalue_spectrum", "out.tglf.sum_flux_spectrum", "out.tglf.ky_spectrum", @@ -161,17 +122,9 @@ def __init__( self.LocationCDF = cdf if self.LocationCDF is not None: _, self.nameRunid = IOtools.getLocInfo(self.LocationCDF) - else: - self.nameRunid = "0" self.time, self.avTime = time, avTime - self.rhos = np.array(rhos) - ( - self.results, - self.scans, - self.tgyro, - self.ky_single, - ) = ({}, {}, None, None) + self.tgyro,self.ky_single = None, None self.NormalizationSets = { "TRANSP": None, @@ -182,6 +135,133 @@ def __init__( "SELECTED": None, } + # This is redefined (from parent) because it has the option of producing WaveForms (very TGLF specific) + def run( + self, + subfolder, + runWaveForms=None, # e.g. runWaveForms = [0.3,1.0] + forceClosestUnstableWF=True, # Look at the growth rate spectrum and run exactly the ky of the closest unstable + **kwargs_generic_run + ): + + code_executor_full = super().run(subfolder, **kwargs_generic_run) + + kwargs_generic_run['runWaveForms'] = runWaveForms + kwargs_generic_run['forceClosestUnstableWF'] = forceClosestUnstableWF + self._helper_wf(code_executor_full, **kwargs_generic_run) + + # This is redefined (from parent) because it has the option of producing WaveForms (very TGLF specific) + def run_scan( + self, + subfolder, + **kwargs, + ): + + code_executor_full = super().run_scan(subfolder,**kwargs) + + self._helper_wf(code_executor_full, **kwargs) + + def _run_wf(self, kys, code_executor, forceClosestUnstableWF=True, **kwargs_TGLFrun): + """ + extraOptions and multipliers are not being grabbed from kwargs_TGLFrun, but from code_executor + """ + + if kwargs_TGLFrun.get("only_minimal_files", False): + raise Exception('[MITIM] Option to run WF with only minimal files is not available yet') + + if "runWaveForms" in kwargs_TGLFrun: + del kwargs_TGLFrun["runWaveForms"] + + # Grab these from code_executor + if "extraOptions" in kwargs_TGLFrun: + del kwargs_TGLFrun["extraOptions"] + if "multipliers" in kwargs_TGLFrun: + del kwargs_TGLFrun["multipliers"] + + self.ky_single = kys + ResultsFiles = copy.deepcopy(self.ResultsFiles) + self.ResultsFiles = copy.deepcopy(self.ResultsFiles_WF) + + self.FoldersTGLF_WF = {} + if self.ky_single is not None: + + code_executorWF = {} + for ky_single0 in self.ky_single: + print(f"> Running TGLF waveform analysis, ky~{ky_single0}") + + self.FoldersTGLF_WF[f"ky{ky_single0}"] = {} + for subfolder in code_executor: + + ky_single_orig = copy.deepcopy(ky_single0) + + FolderTGLF_old = code_executor[subfolder][list(code_executor[subfolder].keys())[0]]["folder"] + + self.ky_single = None + self.read(label=f"ky{ky_single0}", folder=FolderTGLF_old, cold_startWF = False) + self.ky_single = kys + + self.FoldersTGLF_WF[f"ky{ky_single0}"][ + FolderTGLF_old + ] = FolderTGLF_old / f"ky{ky_single0}" + + ky_singles = [] + for i, ir in enumerate(self.rhos): + # -------- Get the closest unstable mode to the one requested + if forceClosestUnstableWF: + + # Only unstable ones + kys_n = [] + for j in range(len(self.results[f"ky{ky_single0}"]["output"][i].ky)): + if self.results[f"ky{ky_single0}"]["output"][i].g[0, j] > 0.0: + kys_n.append(self.results[f"ky{ky_single0}"]["output"][i].ky[j]) + kys_n = np.array(kys_n) + # ---- + + closest_ky = kys_n[np.argmin(np.abs(kys_n - ky_single_orig))] + print(f"\t- rho = {ir:.3f}, requested ky={ky_single_orig:.3f}, & closest unstable ky based on previous run: ky={closest_ky:.3f}",typeMsg="i",) + ky_single = closest_ky + else: + ky_single = ky_single0 + + ky_singles.append(ky_single) + # ------------------------------------------------------------ + + kwargs_TGLFrun0 = copy.deepcopy(kwargs_TGLFrun) + if "extraOptions" in kwargs_TGLFrun: + extraOptions_WF = copy.deepcopy(kwargs_TGLFrun["extraOptions"]) + del kwargs_TGLFrun0["extraOptions"] + + else: + extraOptions_WF = {} + + extraOptions_WF = copy.deepcopy(code_executor[subfolder][list(code_executor[subfolder].keys())[0]]["extraOptions"]) + multipliers_WF = copy.deepcopy(code_executor[subfolder][list(code_executor[subfolder].keys())[0]]["multipliers"]) + + extraOptions_WF["USE_TRANSPORT_MODEL"] = "F" + extraOptions_WF["WRITE_WAVEFUNCTION_FLAG"] = 1 + extraOptions_WF["KY"] = ky_singles + extraOptions_WF["VEXB_SHEAR"] = 0.0 # See email from G. Staebler on 05/16/2021 + + code_executorWF, _ = self._run_prepare( + (FolderTGLF_old / f"ky{ky_single0}").relative_to(FolderTGLF_old.parent), + code_executor=code_executorWF, + extraOptions=extraOptions_WF, + multipliers=multipliers_WF, + **kwargs_TGLFrun0, + ) + + # Run them all + self._run( + code_executorWF, + runWaveForms=[], + **kwargs_TGLFrun0, + ) + + # Recover previous stuff + self.ResultsFiles_WF = copy.deepcopy(self.ResultsFiles) + self.ResultsFiles = ResultsFiles + # ----------- + def prepare_for_save_TGLF(self): """ This is a function that will be called when saving the class as pickle. @@ -198,6 +278,8 @@ def prepare_for_save_TGLF(self): if "convolution_fun_fluct" in tglf_copy.results[label]: tglf_copy.results[label]["convolution_fun_fluct"] = None + del tglf_copy.run_specifications + return tglf_copy def save_pkl(self, file): @@ -210,12 +292,12 @@ def save_pkl(self, file): with open(file, "wb") as handle: pickle.dump(tglf_copy, handle, protocol=4) - def prep( + def prep_using_tgyro( self, FolderGACODE, # Main folder where all caculations happen (runs will be in subfolders) cold_start=False, # If True, do not use what it potentially inside the folder, run again - onlyThermal_TGYRO=False, # Ignore fast particles in TGYRO - recalculatePTOT=True, # Recalculate PTOT in TGYRO + remove_fast=False, # Ignore fast particles in TGYRO + recalculate_ptot=True, # Recalculate PTOT in TGYRO cdf_open=None, # Grab normalizations from CDF file that is open as transp_output class inputgacode=None, # *NOTE BELOW* specificInputs=None, # *NOTE BELOW* @@ -232,11 +314,19 @@ def prep( # PROFILES class. - profiles = ( - PROFILEStools.PROFILES_GACODE(inputgacode) - if inputgacode is not None - else None - ) + if inputgacode is not None: + + if isinstance(inputgacode, str) or isinstance(inputgacode, Path): + + from mitim_tools.gacode_tools import PROFILEStools + profiles = PROFILEStools.gacode_state(inputgacode) + + else: + + # If inputgacode is already a PROFILEStools object, just use it + profiles = inputgacode + else: + profiles = None # TGYRO class. It checks existence and creates input.profiles/input.gacode @@ -272,15 +362,10 @@ def prep( inp = TGLFinput(fii) exists = exists and not inp.onlyControl else: - print( - f"\t\t- Running scans because it does not exist file {IOtools.clipstr(fii)}" - ) + print(f"\t\t- Running scans because it does not exist file {IOtools.clipstr(fii)}") exists = False if exists: - print( - "\t\t- All input files to TGLF exist, not running scans", - typeMsg="i", - ) + print("\t\t- All input files to TGLF exist, not running scans",typeMsg="i",) """ Sometimes, if I'm running TGLF only from input.tglf file, I may not need to run the entire TGYRO workflow @@ -294,8 +379,8 @@ def prep( self.tgyro_results = self.tgyro.run_tglf_scan( rhos=self.rhos, cold_start=not exists, - onlyThermal=onlyThermal_TGYRO, - recalculatePTOT=recalculatePTOT, + onlyThermal=remove_fast, + recalculate_ptot=recalculate_ptot, donotrun=donotrun, ) @@ -305,18 +390,18 @@ def prep( print("\t- Creating dictionary with all input files generated by TGLF_scans") - self.inputsTGLF = {} + self.inputs_files = {} for cont, rho in enumerate(self.rhos): fileN = self.FolderGACODE / f"input.tglf_{rho:.4f}" inputclass = TGLFinput(file=fileN) - self.inputsTGLF[rho] = inputclass + self.inputs_files[rho] = inputclass # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Initialize by taking directly the inputs # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else: - self.inputsTGLF = specificInputs + self.inputs_files = specificInputs self.tgyro_results = tgyro_results """ @@ -336,7 +421,7 @@ def prep( print("> Setting up normalizations") print("\t- Using mass of deuterium to unnormalize TGLF (not necesarily the first ion)",typeMsg="i") - self.tgyro.profiles.deriveQuantities(mi_ref=mi_D) + self.tgyro.profiles.derive_quantities(mi_ref=md_u) self.NormalizationSets, cdf = NORMtools.normalizations( self.tgyro.profiles, @@ -349,504 +434,49 @@ def prep( return cdf - def prep_direct_tglf( - self, - FolderGACODE, # Main folder where all caculations happen (runs will be in subfolders) - cold_start=False, # If True, do not use what it potentially inside the folder, run again - onlyThermal_TGYRO=False, # Ignore fast particles in TGYRO - recalculatePTOT=True, # Recalculate PTOT in TGYRO - cdf_open=None, # Grab normalizations from CDF file that is open as transp_output class - inputgacode=None, # *NOTE BELOW* - specificInputs=None, # *NOTE BELOW* - tgyro_results=None, # *NOTE BELOW* - forceIfcold_start=False, # Extra flag - ): - """ - * Note on inputgacode, specificInputs and tgyro_results: - If I don't want to prepare, I can provide inputgacode and specificInputs, but I have to make sure they are consistent with one another! - Optionally, I can give tgyro_results for further info in such a case - """ - - print("> Preparation of TGLF run") - - # PROFILES class. - - self.profiles = ( - PROFILEStools.PROFILES_GACODE(inputgacode) - if inputgacode is not None - else None - ) - - if self.profiles is None: - - # TGYRO class. It checks existence and creates input.profiles/input.gacode - - self.tgyro = TGYROtools.TGYRO( - cdf=self.LocationCDF, time=self.time, avTime=self.avTime - ) - self.tgyro.prep( - FolderGACODE, - cold_start=cold_start, - remove_tmp=True, - subfolder="tmp_tgyro_prep", - profilesclass_custom=self.profiles, - forceIfcold_start=forceIfcold_start, - ) - - self.profiles = self.tgyro.profiles - - self.profiles.deriveQuantities(mi_ref=mi_D) - - self.profiles.correct(options={'recompute_ptot':recalculatePTOT,'removeFast':onlyThermal_TGYRO}) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Initialize by preparing a tgyro class and running for -1 iterations - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - if specificInputs is None: - - self.inputsTGLF = self.profiles.to_tglf(rhos=self.rhos) - - for rho in self.inputsTGLF: - self.inputsTGLF[rho] = TGLFinput.initialize_in_memory(self.inputsTGLF[rho]) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Initialize by taking directly the inputs - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - else: - self.inputsTGLF = specificInputs - - self.tgyro_results = tgyro_results - - self.FolderGACODE = IOtools.expandPath(FolderGACODE) - - if cold_start or not self.FolderGACODE.exists(): - IOtools.askNewFolder(self.FolderGACODE, force=forceIfcold_start) - - for rho in self.inputsTGLF: - self.inputsTGLF[rho].file = self.FolderGACODE / f'input.tglf_{rho:.4f}' - self.inputsTGLF[rho].writeCurrentStatus() - - """ - ~~~~~ Create Normalizations ~~~~~ - - Only input.gacode needed - - I can also give TRANSP CDF for complement. It is used in prep anyway, so good to store here - and have the values for plotting the experimental fluxes. - - I can also give TGYRO class for complement. It is used in prep anyway, so good to store here - for plotting and check grid conversions. - - Note about the TGLF normalization: - What matters is what's the mass used to normalized the MASS_X. - If TGYRO was used to generate the input.tglf file, then the normalization mass is deuterium and all - must be normalized to deuterium - """ - - print("> Setting up normalizations") - - print( - "\t- Using mass of deuterium to normalize things (not necesarily the first ion)", - typeMsg="w", - ) - self.profiles.deriveQuantities(mi_ref=mi_D) - - self.NormalizationSets, cdf = NORMtools.normalizations( - self.profiles, - LocationCDF=self.LocationCDF, - time=self.time, - avTime=self.avTime, - cdf_open=cdf_open, - tgyro=self.tgyro_results, - ) - - return cdf - - - - def prep_from_tglf( + def prep_from_file( self, FolderGACODE, # Main folder where all caculations happen (runs will be in subfolders) input_tglf_file, # input.tglf file to start with - input_gacode=None, - ): - print("> Preparation of TGLF class directly from input.tglf") - - # Main folder where things are - self.FolderGACODE = IOtools.expandPath(FolderGACODE) - - # Main folder where things are - self.NormalizationSets, _ = NORMtools.normalizations( - PROFILEStools.PROFILES_GACODE(input_gacode) - if input_gacode is not None - else None - ) - - # input_tglf_file - inputclass = TGLFinput(file=input_tglf_file) - - roa = inputclass.geom["RMIN_LOC"] - print(f"\t- This file correspond to r/a={roa} according to RMIN_LOC") - - if self.NormalizationSets["input_gacode"] is not None: - rho = np.interp( - roa, - self.NormalizationSets["input_gacode"].derived["roa"], - self.NormalizationSets["input_gacode"].profiles["rho(-)"], - ) - print(f"\t\t- rho={rho:.4f}, using input.gacode for conversion") - else: - print( - "\t\t- No input.gacode for conversion, assuming rho=r/a, EXTREME CAUTION PLEASE", - typeMsg="w", - ) - rho = roa - - self.rhos = [rho] - - self.inputsTGLF = {self.rhos[0]: inputclass} - - def run( - self, - subFolderTGLF, # 'tglf1/', - TGLFsettings=None, - extraOptions={}, - multipliers={}, - runWaveForms=None, # e.g. runWaveForms = [0.3,1.0] - forceClosestUnstableWF=True, # Look at the growth rate spectrum and run exactly the ky of the closest unstable - ApplyCorrections=True, # Removing ions with too low density and that are fast species - Quasineutral=False, # Ensures quasineutrality. By default is False because I may want to run the file directly - launchSlurm=True, - cold_start=False, - forceIfcold_start=False, - extra_name="exe", - anticipate_problems=True, - slurm_setup={ - "cores": 4, - "minutes": 5, - }, # Cores per TGLF call (so, when running nR radii -> nR*4) - attempts_execution=1, - only_minimal_files=False, - ): - - if runWaveForms is None: runWaveForms = [] - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Prepare inputs - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - tglf_executor, tglf_executor_full, folderlast = self._prepare_run_radii( - subFolderTGLF, - tglf_executor={}, - tglf_executor_full={}, - TGLFsettings=TGLFsettings, - extraOptions=extraOptions, - multipliers=multipliers, - runWaveForms=runWaveForms, - forceClosestUnstableWF=forceClosestUnstableWF, - ApplyCorrections=ApplyCorrections, - Quasineutral=Quasineutral, - launchSlurm=launchSlurm, - cold_start=cold_start, - forceIfcold_start=forceIfcold_start, - extra_name=extra_name, - slurm_setup=slurm_setup, - anticipate_problems=anticipate_problems, - attempts_execution=attempts_execution, - only_minimal_files=only_minimal_files, - ) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Run TGLF - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - self._run( - tglf_executor, - tglf_executor_full=tglf_executor_full, - TGLFsettings=TGLFsettings, - runWaveForms=runWaveForms, - forceClosestUnstableWF=forceClosestUnstableWF, - ApplyCorrections=ApplyCorrections, - Quasineutral=Quasineutral, - launchSlurm=launchSlurm, - cold_start=cold_start, - forceIfcold_start=forceIfcold_start, - extra_name=extra_name, - slurm_setup=slurm_setup, - only_minimal_files=only_minimal_files, - ) - - self.FolderTGLFlast = folderlast - - def _run( - self, - tglf_executor, - tglf_executor_full={}, - **kwargs_TGLFrun - ): - """ - extraOptions and multipliers are not being grabbed from kwargs_TGLFrun, but from tglf_executor for WF - """ - - print("\n> Run TGLF") - - if kwargs_TGLFrun.get("only_minimal_files", False): - filesToRetrieve = ["out.tglf.gbflux"] - else: - filesToRetrieve = self.ResultsFiles - - c = 0 - for subFolderTGLF in tglf_executor: - c += len(tglf_executor[subFolderTGLF]) - - if c > 0: - GACODErun.runTGLF( - self.FolderGACODE, - tglf_executor, - filesToRetrieve=filesToRetrieve, - minutes=kwargs_TGLFrun.get("slurm_setup", {}).get("minutes", 5), - cores_tglf=kwargs_TGLFrun.get("slurm_setup", {}).get("cores", 4), - name=f"tglf_{self.nameRunid}{kwargs_TGLFrun.get('extra_name', '')}", - launchSlurm=kwargs_TGLFrun.get("launchSlurm", True), - attempts_execution=kwargs_TGLFrun.get("attempts_execution", 1), - ) - else: - print("\t- TGLF not run because all results files found (please ensure consistency!)",typeMsg="i") - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Waveform if requested - # Cannot be in parallel to the previous run, because it needs the results of unstable ky - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - if "runWaveForms" in kwargs_TGLFrun and kwargs_TGLFrun["runWaveForms"] is not None and len(kwargs_TGLFrun["runWaveForms"]) > 0: - self._run_wf(kwargs_TGLFrun["runWaveForms"], tglf_executor_full, **kwargs_TGLFrun) - - def _prepare_run_radii( - self, - subFolderTGLF, # 'tglf1/', - rhos=None, - tglf_executor={}, - tglf_executor_full={}, - TGLFsettings=None, - extraOptions={}, - multipliers={}, - ApplyCorrections=True, # Removing ions with too low density and that are fast species - Quasineutral=False, # Ensures quasineutrality. By default is False because I may want to run the file directly - launchSlurm=True, - cold_start=False, - forceIfcold_start=False, - anticipate_problems=True, - slurm_setup={ - "cores": 4, - "minutes": 5, - }, # Cores per TGLF call (so, when running nR radii -> nR*4) - only_minimal_files=False, - **kwargs): - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Prepare for run - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - if rhos is None: - rhos = self.rhos - - inputs = copy.deepcopy(self.inputsTGLF) - FolderTGLF = self.FolderGACODE / subFolderTGLF - - ResultsFiles_new = [] - for i in self.ResultsFiles: - if "mitim.out" not in i: - ResultsFiles_new.append(i) - self.ResultsFiles = ResultsFiles_new - - if only_minimal_files: - filesToRetrieve = ["out.tglf.gbflux"] - else: - filesToRetrieve = self.ResultsFiles - - # Do I need to run all radii? - rhosEvaluate = cold_start_checker( - rhos, - filesToRetrieve, - FolderTGLF, - cold_start=cold_start, - ) - - if len(rhosEvaluate) == len(rhos): - # All radii need to be evaluated - IOtools.askNewFolder(FolderTGLF, force=forceIfcold_start) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Change this specific run of TGLF - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - ( - latest_inputsFileTGLF, - latest_inputsFileTGLFDict, - ) = changeANDwrite_TGLF( - rhos, - inputs, - FolderTGLF, - TGLFsettings=TGLFsettings, - extraOptions=extraOptions, - multipliers=multipliers, - ApplyCorrections=ApplyCorrections, - Quasineutral=Quasineutral, - ) - - tglf_executor_full[subFolderTGLF] = {} - tglf_executor[subFolderTGLF] = {} - for irho in self.rhos: - tglf_executor_full[subFolderTGLF][irho] = { - "folder": FolderTGLF, - "dictionary": latest_inputsFileTGLFDict[irho], - "inputs": latest_inputsFileTGLF[irho], - "extraOptions": extraOptions, - "multipliers": multipliers, - } - if irho in rhosEvaluate: - tglf_executor[subFolderTGLF][irho] = tglf_executor_full[subFolderTGLF][ - irho - ] - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Stop if I expect problems - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - if anticipate_problems: - anticipate_problems_func( - latest_inputsFileTGLFDict, rhosEvaluate, slurm_setup, launchSlurm - ) - - return tglf_executor, tglf_executor_full, FolderTGLF - - def _run_wf(self, kys, tglf_executor, **kwargs_TGLFrun): - """ - extraOptions and multipliers are not being grabbed from kwargs_TGLFrun, but from tglf_executor - """ - - if kwargs_TGLFrun.get("only_minimal_files", False): - raise Exception('[MITIM] Option to run WF with only minimal files is not available yet') - - if "runWaveForms" in kwargs_TGLFrun: - del kwargs_TGLFrun["runWaveForms"] - - # Grab these from tglf_executor - if "extraOptions" in kwargs_TGLFrun: - del kwargs_TGLFrun["extraOptions"] - if "multipliers" in kwargs_TGLFrun: - del kwargs_TGLFrun["multipliers"] - - self.ky_single = kys - ResultsFiles = copy.deepcopy(self.ResultsFiles) - self.ResultsFiles = copy.deepcopy(self.ResultsFiles_WF) - - self.FoldersTGLF_WF = {} - if self.ky_single is not None: - - tglf_executorWF = {} - for ky_single0 in self.ky_single: - print(f"> Running TGLF waveform analysis, ky~{ky_single0}") - - self.FoldersTGLF_WF[f"ky{ky_single0}"] = {} - for subFolderTGLF in tglf_executor: - - ky_single_orig = copy.deepcopy(ky_single0) - - FolderTGLF_old = tglf_executor[subFolderTGLF][ - list(tglf_executor[subFolderTGLF].keys())[0] - ]["folder"] - - self.ky_single = None - self.read( - label=f"ky{ky_single0}", folder=FolderTGLF_old, cold_startWF = False) - self.ky_single = kys - - self.FoldersTGLF_WF[f"ky{ky_single0}"][ - FolderTGLF_old - ] = FolderTGLF_old / f"ky{ky_single0}" - - ky_singles = [] - for i, ir in enumerate(self.rhos): - # -------- Get the closest unstable mode to the one requested - if ( - kwargs_TGLFrun["forceClosestUnstableWF"] - if "forceClosestUnstableWF" in kwargs_TGLFrun - else True - ): - # Only unstable ones - kys_n = [] - for j in range( - len(self.results[f"ky{ky_single0}"]["TGLFout"][i].ky) - ): - if ( - self.results[f"ky{ky_single0}"]["TGLFout"][i].g[ - 0, j - ] - > 0.0 - ): - kys_n.append( - self.results[f"ky{ky_single0}"]["TGLFout"][ - i - ].ky[j] - ) - kys_n = np.array(kys_n) - # ---- - - closest_ky = kys_n[ - np.argmin(np.abs(kys_n - ky_single_orig)) - ] - print( - f"\t- rho = {ir:.3f}, requested ky={ky_single_orig:.3f}, & closest unstable ky based on previous run: ky={closest_ky:.3f}", - typeMsg="i", - ) - ky_single = closest_ky - else: - ky_single = ky_single0 - - ky_singles.append(ky_single) - # ------------------------------------------------------------ - - kwargs_TGLFrun0 = copy.deepcopy(kwargs_TGLFrun) - if "extraOptions" in kwargs_TGLFrun: - extraOptions_WF = copy.deepcopy(kwargs_TGLFrun["extraOptions"]) - del kwargs_TGLFrun0["extraOptions"] + input_gacode=None, + ): + print("> Preparation of TGLF class directly from input.tglf") - else: - extraOptions_WF = {} + # Main folder where things are + self.FolderGACODE = IOtools.expandPath(FolderGACODE) - extraOptions_WF = copy.deepcopy(tglf_executor[subFolderTGLF][ - list(tglf_executor[subFolderTGLF].keys())[0] - ]["extraOptions"]) - multipliers_WF = copy.deepcopy(tglf_executor[subFolderTGLF][ - list(tglf_executor[subFolderTGLF].keys())[0] - ]["multipliers"]) + # Main folder where things are + from mitim_tools.gacode_tools import PROFILEStools + self.NormalizationSets, _ = NORMtools.normalizations( + PROFILEStools.gacode_state(input_gacode) + if input_gacode is not None + else None + ) - extraOptions_WF["USE_TRANSPORT_MODEL"] = "F" - extraOptions_WF["WRITE_WAVEFUNCTION_FLAG"] = 1 - extraOptions_WF["KY"] = ky_singles - extraOptions_WF["VEXB_SHEAR"] = ( - 0.0 # See email from G. Staebler on 05/16/2021 - ) + # input_tglf_file + inputclass = TGLFinput(file=input_tglf_file) - tglf_executorWF, _, _ = self._prepare_run_radii( - (FolderTGLF_old / f"ky{ky_single0}").relative_to(FolderTGLF_old.parent), - tglf_executor=tglf_executorWF, - extraOptions=extraOptions_WF, - multipliers=multipliers_WF, - **kwargs_TGLFrun0, - ) + roa = inputclass.plasma["RMIN_LOC"] + print(f"\t- This file correspond to r/a={roa} according to RMIN_LOC") - # Run them all - self._run( - tglf_executorWF, - runWaveForms=[], - **kwargs_TGLFrun0, + if self.NormalizationSets["input_gacode"] is not None: + rho = np.interp( + roa, + self.NormalizationSets["input_gacode"].derived["roa"], + self.NormalizationSets["input_gacode"].profiles["rho(-)"], + ) + print(f"\t\t- rho={rho:.4f}, using input.gacode for conversion") + else: + print( + "\t\t- No input.gacode for conversion, assuming rho=r/a, EXTREME CAUTION PLEASE", + typeMsg="w", ) + rho = roa + + self.rhos = [rho] + + self.inputs_files = {self.rhos[0]: inputclass} - # Recover previous stuff - self.ResultsFiles_WF = copy.deepcopy(self.ResultsFiles) - self.ResultsFiles = ResultsFiles - # ----------- def read( self, @@ -886,7 +516,7 @@ def read( # If no specified folder, check the last one if folder is None: - folder = self.FolderTGLFlast + folder = self.FolderSimLast # ----------------------------------------- @@ -1021,20 +651,18 @@ def plot( for irho in range(len(self.rhos)): successful_normalization = ( successful_normalization - and self.results[label]["TGLFout"][irho].unnormalization_successful + and self.results[label]["output"][irho].unnormalization_successful ) max_num_species = np.max( - [max_num_species, self.results[label]["TGLFout"][irho].num_species] + [max_num_species, self.results[label]["output"][irho].num_species] ) - for il in self.results[label]["TGLFout"][irho].fields: + for il in self.results[label]["output"][irho].fields: if il not in max_fields: max_fields.append(il) if fn is None: - self.fn = GUItools.FigureNotebook( - "TGLF MITIM Notebook", geometry="1700x900", vertical=True - ) + self.fn = GUItools.FigureNotebook("TGLF MITIM Notebook", geometry="1700x900", vertical=True) else: self.fn = fn @@ -1248,87 +876,9 @@ def plot( axFluc02Sym = axFluc02.twinx() if plotGACODE: - grid = plt.GridSpec(3, 3, hspace=0.3, wspace=0.3) - axsProf_1 = [ - figProf_1.add_subplot(grid[0, 0]), - figProf_1.add_subplot(grid[1, 0]), - figProf_1.add_subplot(grid[2, 0]), - figProf_1.add_subplot(grid[0, 1]), - figProf_1.add_subplot(grid[1, 1]), - figProf_1.add_subplot(grid[2, 1]), - figProf_1.add_subplot(grid[0, 2]), - figProf_1.add_subplot(grid[1, 2]), - figProf_1.add_subplot(grid[2, 2]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsProf_2 = [ - figProf_2.add_subplot(grid[0, 0]), - figProf_2.add_subplot(grid[0, 1]), - figProf_2.add_subplot(grid[1, 0]), - figProf_2.add_subplot(grid[1, 1]), - figProf_2.add_subplot(grid[0, 2]), - figProf_2.add_subplot(grid[1, 2]), - ] - grid = plt.GridSpec(3, 4, hspace=0.3, wspace=0.5) - ax00c = figProf_3.add_subplot(grid[0, 0]) - axsProf_3 = [ - ax00c, - figProf_3.add_subplot(grid[1, 0], sharex=ax00c), - figProf_3.add_subplot(grid[2, 0], sharex=ax00c), - figProf_3.add_subplot(grid[0, 1], sharex=ax00c), - figProf_3.add_subplot(grid[1, 1], sharex=ax00c), - figProf_3.add_subplot(grid[2, 1], sharex=ax00c), - figProf_3.add_subplot(grid[0, 2], sharex=ax00c), - figProf_3.add_subplot(grid[1, 2], sharex=ax00c), - figProf_3.add_subplot(grid[2, 2], sharex=ax00c), - figProf_3.add_subplot(grid[0, 3], sharex=ax00c), - figProf_3.add_subplot(grid[1, 3], sharex=ax00c), - figProf_3.add_subplot(grid[2, 3], sharex=ax00c), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsProf_4 = [ - figProf_4.add_subplot(grid[0, 0]), - figProf_4.add_subplot(grid[0, 1]), - figProf_4.add_subplot(grid[0, 2]), - figProf_4.add_subplot(grid[1, 0]), - figProf_4.add_subplot(grid[1, 1]), - figProf_4.add_subplot(grid[1, 2]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - - axsProf_5 = [ - figProf_5.add_subplot(grid[0, 0]), - figProf_5.add_subplot(grid[1, 0]), - figProf_5.add_subplot(grid[0, 1]), - figProf_5.add_subplot(grid[0, 2]), - figProf_5.add_subplot(grid[1, 1]), - figProf_5.add_subplot(grid[1, 2]), - ] - - grid = plt.GridSpec(2, 4, hspace=0.3, wspace=0.3) - axsProf_6 = [ - figProf_6.add_subplot(grid[0, 0]), - figProf_6.add_subplot(grid[:, 1]), - figProf_6.add_subplot(grid[0, 2]), - figProf_6.add_subplot(grid[1, 0]), - figProf_6.add_subplot(grid[1, 2]), - figProf_6.add_subplot(grid[0, 3]), - figProf_6.add_subplot(grid[1, 3]), - ] - - grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) - axsProf_7 = [ - figProf_7.add_subplot(grid[0, 0]), - figProf_7.add_subplot(grid[0, 1]), - figProf_7.add_subplot(grid[0, 2]), - figProf_7.add_subplot(grid[1, 0]), - figProf_7.add_subplot(grid[1, 1]), - figProf_7.add_subplot(grid[1, 2]), - ] - + axsProf_1, axsProf_2, axsProf_3, axsProf_4, axsProf_5, axsProf_6, axsProf_7 = state_plotting.add_axes([figProf_1, figProf_2, figProf_3, figProf_4, figProf_5, figProf_6, figProf_7]) + + grid = plt.GridSpec(2, 4, hspace=0.2, wspace=0.6) if plotNormalizations: @@ -1375,10 +925,10 @@ def plot( # -------------------------------- # Plot Raw TGLF (normalized) # -------------------------------- - self.results[label]["TGLFout"][irho].plotTGLF_Summary( + self.results[label]["output"][irho].plotTGLF_Summary( c=colors[cont], label=full_label, axs=axsTGLF1, irho_cont=irho_cont ) - self.results[label]["TGLFout"][irho].plotTGLF_Contributors( + self.results[label]["output"][irho].plotTGLF_Contributors( c=colors[cont], label=full_label, axs=axsTGLF2, @@ -1386,11 +936,11 @@ def plot( title_legend=title_legend, cont=cont, ) - self.results[label]["TGLFout"][irho].plotTGLF_Model( + self.results[label]["output"][irho].plotTGLF_Model( axs=axsTGLF3, c=colors[cont], label=full_label ) - self.results[label]["TGLFout"][irho].plotTGLF_Fluctuations( + self.results[label]["output"][irho].plotTGLF_Fluctuations( axs=axsTGLF_flucts, c=colors[cont], label=full_label, @@ -1399,7 +949,7 @@ def plot( cont=cont, ) - self.results[label]["TGLFout"][irho].plotTGLF_Field( + self.results[label]["output"][irho].plotTGLF_Field( quantity="phi", c=colors[cont], label=full_label, @@ -1410,7 +960,7 @@ def plot( ) if "a_par" in max_fields: - self.results[label]["TGLFout"][irho].plotTGLF_Field( + self.results[label]["output"][irho].plotTGLF_Field( quantity="a_par", c=colors[cont], label=full_label, @@ -1420,7 +970,7 @@ def plot( cont=cont, ) if "a_per" in max_fields: - self.results[label]["TGLFout"][irho].plotTGLF_Field( + self.results[label]["output"][irho].plotTGLF_Field( quantity="a_per", c=colors[cont], label=full_label, @@ -1453,9 +1003,9 @@ def plot( if successful_normalization: GACODEplotting.plotTGLFspectrum( [axS00, axS10], - self.results[label]["TGLFout"][irho].ky, - self.results[label]["TGLFout"][irho].g[0, :], - freq=self.results[label]["TGLFout"][irho].f[0, :], + self.results[label]["output"][irho].ky, + self.results[label]["output"][irho].g[0, :], + freq=self.results[label]["output"][irho].f[0, :], coeff=0.0, c=colors[cont], ls="-", @@ -1483,22 +1033,22 @@ def plot( for irho_cont in range(len(self.rhos)): irho = np.where(self.results[label]["x"] == self.rhos[irho_cont])[0][0] - if self.results[label]["TGLFout"][irho].unnormalization_successful: - Qe.append(self.results[label]["TGLFout"][irho].Qe_unn) - Qi.append(self.results[label]["TGLFout"][irho].Qi_unn) - Ge.append(self.results[label]["TGLFout"][irho].Ge_unn) + if self.results[label]["output"][irho].unnormalization_successful: + Qe.append(self.results[label]["output"][irho].Qe_unn) + Qi.append(self.results[label]["output"][irho].Qi_unn) + Ge.append(self.results[label]["output"][irho].Ge_unn) TeF.append( - self.results[label]["TGLFout"][irho].AmplitudeSpectrum_Te_level + self.results[label]["output"][irho].AmplitudeSpectrum_Te_level ) - neTe.append(self.results[label]["TGLFout"][irho].neTeSpectrum_level) + neTe.append(self.results[label]["output"][irho].neTeSpectrum_level) - roas.append(self.results[label]["TGLFout"][irho].roa) + roas.append(self.results[label]["output"][irho].roa) - QeGB.append(self.results[label]["TGLFout"][irho].Qe) - QiGB.append(self.results[label]["TGLFout"][irho].Qi) - GeGB.append(self.results[label]["TGLFout"][irho].Ge) + QeGB.append(self.results[label]["output"][irho].Qe) + QiGB.append(self.results[label]["output"][irho].Qi) + GeGB.append(self.results[label]["output"][irho].Ge) - if self.results[label]["TGLFout"][irho].unnormalization_successful: + if self.results[label]["output"][irho].unnormalization_successful: axT2.plot(self.rhos, Qe, "-o", c=colorLab[0], lw=2, label=full_label) axS01.plot(self.rhos, Qe, "-o", c=colorLab[0], lw=2, label=full_label) @@ -1779,7 +1329,7 @@ def plot( a = normalization["rmin"][-1] * 100 rhosa = rho_s / a - kys = self.results[label]["TGLFout"][irho].ky / rho_s + kys = self.results[label]["output"][irho].ky / rho_s xP = np.linspace(0, kys[-1], 1000) @@ -1800,7 +1350,7 @@ def plot( yP = np.ones(len(xP)) ax = axFluc00 - fluct = self.results[label]["TGLFout"][irho].AmplitudeSpectrum_Te + fluct = self.results[label]["output"][irho].AmplitudeSpectrum_Te ylabel = "$A_{T_e}(k_y)$" GACODEplotting.plotTGLFfluctuations( ax, @@ -1824,7 +1374,7 @@ def plot( axFluc00Sym.plot(xP, yP, ls="-.", lw=0.5, color=colors[cont]) ax = axFluc10e - fluct = self.results[label]["TGLFout"][ + fluct = self.results[label]["output"][ irho ].AmplitudeSpectrum_Te * np.interp(kys, xP, yP) ylabel = "$A_{T_e}(k_y)$*W" @@ -1847,7 +1397,7 @@ def plot( ax.plot(kysPlot, fluctPlot, "--", lw=0.3, color=colors[cont]) ax = axFluc01 - fluct = self.results[label]["TGLFout"][irho].AmplitudeSpectrum_ne + fluct = self.results[label]["output"][irho].AmplitudeSpectrum_ne ylabel = "$A_{n_e}(k_y)$" GACODEplotting.plotTGLFfluctuations( ax, @@ -1871,7 +1421,7 @@ def plot( axFluc01Sym.plot(xP, yP, ls="-.", lw=0.5, color=colors[cont]) ax = axFluc11e - fluct = self.results[label]["TGLFout"][ + fluct = self.results[label]["output"][ irho ].AmplitudeSpectrum_ne * np.interp(kys, xP, yP) ylabel = "$A_{n_e}(k_y)$*W" @@ -1896,10 +1446,10 @@ def plot( # --- for inmode in range( - self.results[label]["TGLFout"][irho].num_nmodes + self.results[label]["output"][irho].num_nmodes ): ax = axFluc02 - fluct = self.results[label]["TGLFout"][irho].neTeSpectrum[ + fluct = self.results[label]["output"][irho].neTeSpectrum[ inmode, : ] ylabel = "$n_eT_e(k_y)$" @@ -1937,7 +1487,7 @@ def plot( ) ax = axFluc12e - fluct = self.results[label]["TGLFout"][irho].neTeSpectrum[ + fluct = self.results[label]["output"][irho].neTeSpectrum[ inmode, : ] * np.interp(kys, xP, yP) ylabel = "$n_eT_e(k_y)$*W" @@ -2016,12 +1566,12 @@ def plot( ][0] T.append( - self.results[label]["TGLFout"][irho].AmplitudeSpectrum_Te_level + self.results[label]["output"][irho].AmplitudeSpectrum_Te_level ) N.append( - self.results[label]["TGLFout"][irho].AmplitudeSpectrum_ne_level + self.results[label]["output"][irho].AmplitudeSpectrum_ne_level ) - NT.append(self.results[label]["TGLFout"][irho].neTeSpectrum_level) + NT.append(self.results[label]["output"][irho].neTeSpectrum_level) TL.append(f"{labZX}$\\rho_N={self.rhos[irho_cont]:.4f}$") C.append(colors[cont]) cont += 1 @@ -2192,15 +1742,15 @@ def plot( # all eigenvalues ax00.plot( - self.results[label]["TGLFout"][irho_cont].ky, - self.results[label]["TGLFout"][irho_cont].g[0], + self.results[label]["output"][irho_cont].ky, + self.results[label]["output"][irho_cont].g[0], "-s", markersize=3, color=colors[cont], ) ax10.plot( - self.results[label]["TGLFout"][irho_cont].ky, - self.results[label]["TGLFout"][irho_cont].f[0], + self.results[label]["output"][irho_cont].ky, + self.results[label]["output"][irho_cont].f[0], "-s", markersize=3, color=colors[cont], @@ -2226,15 +1776,15 @@ def plot( # all eigenvalues ax00.plot( - self.results[label]["TGLFout"][irho_cont].ky, - self.results[label]["TGLFout"][irho_cont].g[i + 1], + self.results[label]["output"][irho_cont].ky, + self.results[label]["output"][irho_cont].g[i + 1], "-s", markersize=1, color=colors[cont], ) ax10.plot( - self.results[label]["TGLFout"][irho_cont].ky, - self.results[label]["TGLFout"][irho_cont].f[i + 1], + self.results[label]["output"][irho_cont].ky, + self.results[label]["output"][irho_cont].f[i + 1], "-s", markersize=1, color=colors[cont], @@ -2320,250 +1870,104 @@ def plot( legYN=contLab == 0, ) - # ~~~~~~~~~~~~~~ Scan options - - def runScan( + def _helper_wf( self, - subFolderTGLF, # 'scan1', - multipliers={}, - variable="RLTS_1", - varUpDown=[0.5, 1.0, 1.5], - variables_scanTogether=[], - relativeChanges=True, - **kwargs_TGLFrun, + code_executor_full, + **kwargs ): + + runWaveForms = kwargs['runWaveForms'] if 'runWaveForms' in kwargs else None + forceClosestUnstableWF = kwargs['forceClosestUnstableWF'] if 'forceClosestUnstableWF' in kwargs else True - # ------------------------------------- - # Add baseline - # ------------------------------------- - if (1.0 not in varUpDown) and relativeChanges: - print("\n* Since variations vector did not include base case, I am adding it",typeMsg="i",) - varUpDown_new = [] - added = False - for i in varUpDown: - if i > 1.0 and not added: - varUpDown_new.append(1.0) - added = True - varUpDown_new.append(i) - else: - varUpDown_new = varUpDown - - - tglf_executor, tglf_executor_full, folders, varUpDown_new = self._prepare_scan( - subFolderTGLF, - multipliers=multipliers, - variable=variable, - varUpDown=varUpDown_new, - variables_scanTogether=variables_scanTogether, - relativeChanges=relativeChanges, - **kwargs_TGLFrun, - ) + if 'runWaveForms' in kwargs: + del kwargs['runWaveForms'] + if 'forceClosestUnstableWF' in kwargs: + del kwargs['forceClosestUnstableWF'] - # Run them all - self._run( - tglf_executor, - tglf_executor_full=tglf_executor_full, - **kwargs_TGLFrun, - ) + # ********************************************************************************************* + # Waveform if requested (not in parallel because it needs the results of unstable ky) + # ********************************************************************************************* - # Read results - for cont_mult, mult in enumerate(varUpDown_new): - name = f"{variable}_{mult}" - self.read( - label=f"{self.subFolderTGLF_scan}_{name}", - folder=folders[cont_mult], - cold_startWF = False, - require_all_files=not kwargs_TGLFrun.get("only_minimal_files",False), - ) + if runWaveForms is not None and len(runWaveForms) > 0: - def _prepare_scan( + # Keep the same folder as before + self.keep_folder = copy.deepcopy(self.FolderSimLast) + + # Run WF + self._run_wf(runWaveForms, code_executor_full, forceClosestUnstableWF=forceClosestUnstableWF, **kwargs) + + # Get back to it + self.FolderSimLast = self.keep_folder + + #TODO #TOREMOVE + def runScan(self,subfolder,**kwargs): + self.run_scan(subfolder, **kwargs) + def readScan(self, **kwargs): + self.read_scan(**kwargs) + def plotScan(self,**kwargs): + self.plot_scan(**kwargs) + + def read_scan( self, - subFolderTGLF, # 'scan1', - multipliers={}, + label="scan1", + subfolder=None, variable="RLTS_1", - varUpDown=[0.5, 1.0, 1.5], - variables_scanTogether=[], - relativeChanges=True, - **kwargs_TGLFrun, - ): - """ - Multipliers will be modified by adding the scaning variables, but I don't want to modify the original - multipliers, as they may be passed to the next scan - - Set relativeChanges=False if varUpDown contains the exact values to change, not multipleiers - """ - multipliers_mod = copy.deepcopy(multipliers) - - self.subFolderTGLF_scan = subFolderTGLF - - if relativeChanges: - for i in range(len(varUpDown)): - varUpDown[i] = round(varUpDown[i], 6) - - print(f"\n- Proceeding to scan {variable}{' together with '+', '.join(variables_scanTogether) if len(variables_scanTogether)>0 else ''}:") - - tglf_executor = {} - tglf_executor_full = {} - folders = [] - for cont_mult, mult in enumerate(varUpDown): - mult = round(mult, 6) - - if relativeChanges: - print(f"\n + Multiplier: {mult} -----------------------------------------------------------------------------------------------------------") - else: - print(f"\n + Value: {mult} ----------------------------------------------------------------------------------------------------------------") - - multipliers_mod[variable] = mult - - for variable_scanTogether in variables_scanTogether: - multipliers_mod[variable_scanTogether] = mult - - name = f"{variable}_{mult}" - - species = self.inputsTGLF[self.rhos[0]] # Any rho will do - - multipliers_mod = completeVariation(multipliers_mod, species) - - if not relativeChanges: - for ikey in multipliers_mod: - kwargs_TGLFrun["extraOptions"][ikey] = multipliers_mod[ikey] - multipliers_mod = {} - - # Force ensure quasineutrality if the - if variable in ["AS_3", "AS_4", "AS_5", "AS_6"]: - kwargs_TGLFrun["Quasineutral"] = True - - # Only ask the cold_start in the first round - kwargs_TGLFrun["forceIfcold_start"] = cont_mult > 0 or ( - "forceIfcold_start" in kwargs_TGLFrun and kwargs_TGLFrun["forceIfcold_start"] - ) - - tglf_executor, tglf_executor_full, folderlast = self._prepare_run_radii( - f"{self.subFolderTGLF_scan}_{name}", - tglf_executor=tglf_executor, - tglf_executor_full=tglf_executor_full, - multipliers=multipliers_mod, - **kwargs_TGLFrun, - ) - - folders.append(copy.deepcopy(folderlast)) - - return tglf_executor, tglf_executor_full, folders, varUpDown - - def readScan( - self, label="scan1", subFolderTGLF=None, variable="RLTS_1", positionIon=2 + positionIon=2 ): + + output_object = "output" + + variable_mapping = { + 'scanned_variable': ["parsed", variable, None], + 'Qe_gb': [output_object, 'Qe', None], + 'Qi_gb': [output_object, 'Qi', None], + 'Ge_gb': [output_object, 'Ge', None], + 'Gi_gb': [output_object, 'GiAll', positionIon - 2], + 'Mt_gb': [output_object, 'Mt', None], + 'S_gb': [output_object, 'Se', None], + 'ky': [output_object, 'ky', None], + 'g': [output_object, 'g', None], + 'f': [output_object, 'f', None], + 'Qifast_gb': [output_object, 'Qifast', None], + 'eta_ITGETG': [output_object, 'eta_ITGETG', None], + 'eta_ITGTEM': [output_object, 'eta_ITGTEM', None], + 'g_lowk_max': [output_object, 'g_lowk_max', None], + 'f_lowk_max': [output_object, 'f_lowk_max', None], + 'k_lowk_max': [output_object, 'k_lowk_max', None], + 'g_ITG_max': [output_object, 'g_ITG_max', None], + 'g_ETG_max': [output_object, 'g_ETG_max', None], + 'g_TEM_max': [output_object, 'g_TEM_max', None], + } + + variable_mapping_unn = { + 'Qe': [output_object, 'Qe_unn', None], + 'Qi': [output_object, 'Qi_unn', None], + 'Ge': [output_object, 'Ge_unn', None], + 'Gi': [output_object, 'GiAll_unn', positionIon - 2], + 'Mt': [output_object, 'Mt_unn', None], + 'S': [output_object, 'Se_unn', None], + 'Qifast': [output_object, 'Qifast_unn', None], + } + + super().read_scan( + label=label, + subfolder=subfolder, + variable=variable, + positionIon=positionIon, + variable_mapping=variable_mapping, + variable_mapping_unn=variable_mapping_unn + ) - if subFolderTGLF is None: - subFolderTGLF = self.subFolderTGLF_scan - - self.scans[label] = {} - self.scans[label]["variable"] = variable - self.scans[label]["positionBase"] = None - self.scans[label]["unnormalization_successful"] = True - self.scans[label]["results_tags"] = [] - - self.positionIon_scan = positionIon - - # ---- - x, Qe, Qi, Ge, Gi, ky, g, f, eta1, eta2, itg, tem, etg = [],[],[],[],[],[],[],[],[],[],[],[],[] - Qe_gb, Qi_gb, Ge_gb, Gi_gb = [], [], [], [] - etalow_g, etalow_f, etalow_k = [], [], [] - cont = 0 - for ikey in self.results: - isThisTheRightReadResults = (subFolderTGLF in ikey) and (variable== "_".join(ikey.split("_")[:-1]).split(subFolderTGLF + "_")[-1]) - - if isThisTheRightReadResults: - - self.scans[label]["results_tags"].append(ikey) - x0, Qe0, Qi0, Ge0, Gi0, ky0, g0, f0, eta10, eta20, itg0, tem0, etg0 = [],[],[],[],[],[],[],[],[],[],[],[],[] - Qe_gb0, Qi_gb0, Ge_gb0, Gi_gb0 = [], [], [], [] - etalow_g0, etalow_f0, etalow_k0 = [], [], [] - for irho_cont in range(len(self.rhos)): - irho = np.where(self.results[ikey]["x"] == self.rhos[irho_cont])[0][0] - - # Unnormalized - x0.append(self.results[ikey]["parsed"][irho][variable]) - Qe_gb0.append(self.results[ikey]["TGLFout"][irho].Qe) - Qi_gb0.append(self.results[ikey]["TGLFout"][irho].Qi) - Ge_gb0.append(self.results[ikey]["TGLFout"][irho].Ge) - Gi_gb0.append(self.results[ikey]["TGLFout"][irho].GiAll[self.positionIon_scan - 2]) - ky0.append(self.results[ikey]["TGLFout"][irho].ky) - g0.append(self.results[ikey]["TGLFout"][irho].g) - f0.append(self.results[ikey]["TGLFout"][irho].f) - eta10.append(self.results[ikey]["TGLFout"][irho].etas["metrics"]["eta_ITGTEM"]) - eta20.append(self.results[ikey]["TGLFout"][irho].etas["metrics"]["eta_ITGETG"]) - etalow_g0.append(self.results[ikey]["TGLFout"][irho].etas["metrics"]["g_lowk_max"]) - etalow_k0.append(self.results[ikey]["TGLFout"][irho].etas["metrics"]["k_lowk_max"]) - etalow_f0.append(self.results[ikey]["TGLFout"][irho].etas["metrics"]["f_lowk_max"]) - itg0.append(self.results[ikey]["TGLFout"][irho].etas["ITG"]["g_max"]) - tem0.append(self.results[ikey]["TGLFout"][irho].etas["TEM"]["g_max"]) - etg0.append(self.results[ikey]["TGLFout"][irho].etas["ETG"]["g_max"]) - - if self.results[ikey]["TGLFout"][irho].unnormalization_successful: - Qe0.append(self.results[ikey]["TGLFout"][irho].Qe_unn) - Qi0.append(self.results[ikey]["TGLFout"][irho].Qi_unn) - Ge0.append(self.results[ikey]["TGLFout"][irho].Ge_unn) - Gi0.append(self.results[ikey]["TGLFout"][irho].GiAll_unn[self.positionIon_scan - 2]) - else: - self.scans[label]["unnormalization_successful"] = False - - x.append(x0) - Qe.append(Qe0) - Qi.append(Qi0) - Ge.append(Ge0) - Qe_gb.append(Qe_gb0) - Qi_gb.append(Qi_gb0) - Ge_gb.append(Ge_gb0) - Gi_gb.append(Gi_gb0) - Gi.append(Gi0) - ky.append(ky0) - g.append(g0) - f.append(f0) - eta1.append(eta10) - eta2.append(eta20) - etalow_g.append(etalow_g0) - etalow_f.append(etalow_f0) - etalow_k.append(etalow_k0) - itg.append(itg0) - tem.append(tem0) - etg.append(etg0) - - if float(ikey.split('_')[-1]) == 1.0: - self.scans[label]["positionBase"] = cont - cont += 1 - - self.scans[label]["x"] = np.array(self.rhos) - self.scans[label]["xV"] = np.atleast_2d(np.transpose(x)) - self.scans[label]["Qe_gb"] = np.atleast_2d(np.transpose(Qe_gb)) - self.scans[label]["Qi_gb"] = np.atleast_2d(np.transpose(Qi_gb)) - self.scans[label]["Ge_gb"] = np.atleast_2d(np.transpose(Ge_gb)) - self.scans[label]["Gi_gb"] = np.atleast_2d(np.transpose(Gi_gb)) - self.scans[label]["Qe"] = np.atleast_2d(np.transpose(Qe)) - self.scans[label]["Qi"] = np.atleast_2d(np.transpose(Qi)) - self.scans[label]["Ge"] = np.atleast_2d(np.transpose(Ge)) - self.scans[label]["Gi"] = np.atleast_2d(np.transpose(Gi)) - self.scans[label]["eta1"] = np.atleast_2d(np.transpose(eta1)) - self.scans[label]["eta2"] = np.atleast_2d(np.transpose(eta2)) - self.scans[label]["itg"] = np.atleast_2d(np.transpose(itg)) - self.scans[label]["tem"] = np.atleast_2d(np.transpose(tem)) - self.scans[label]["etg"] = np.atleast_2d(np.transpose(etg)) - self.scans[label]["g_lowk_max"] = np.atleast_2d(np.transpose(etalow_g)) - self.scans[label]["f_lowk_max"] = np.atleast_2d(np.transpose(etalow_f)) - self.scans[label]["k_lowk_max"] = np.atleast_2d(np.transpose(etalow_k)) - self.scans[label]["ky"] = np.array(ky) - self.scans[label]["g"] = np.array(g) - self.scans[label]["f"] = np.array(f) - if len(self.scans[label]["ky"].shape) == 2: - self.scans[label]["ky"] = self.scans[label]["ky"].reshape((1, self.scans[label]["ky"].shape[0], self.scans[label]["ky"].shape[1])) - self.scans[label]["g"] = self.scans[label]["g"].reshape((1, self.scans[label]["g"].shape[0], self.scans[label]["g"].shape[1])) - self.scans[label]["f"] = self.scans[label]["f"].reshape((1, self.scans[label]["f"].shape[0], self.scans[label]["f"].shape[1])) - else: - self.scans[label]["ky"] = np.transpose(self.scans[label]["ky"], axes=[1, 0, 2]) - self.scans[label]["g"] = np.transpose(self.scans[label]["g"], axes=[1, 0, 2, 3]) - self.scans[label]["f"] = np.transpose(self.scans[label]["f"], axes=[1, 0, 2, 3]) + varS = ['ky', 'g', 'f'] + for var in varS: + if len(self.scans[label][var].shape) == 3: + axes_swap = [1, 2, 0] # [rho, scan, ky] + elif len(self.scans[label][var].shape) == 4: + axes_swap = [2, 3, 1, 0] # [rho, scan, nmode, ky] + + self.scans[label][var] = np.transpose(self.scans[label][var], axes=axes_swap) - def plotScan( + def plot_scan( self, labels=["scan1"], figs=None, @@ -2574,17 +1978,13 @@ def plotScan( forceXposition=None, plotTGLFs=True, ): + unnormalization_successful = True for label in labels: - unnormalization_successful = ( - unnormalization_successful - and self.scans[label]["unnormalization_successful"] - ) + unnormalization_successful = unnormalization_successful and self.scans[label]["unnormalization_successful"] if figs is None: - self.fn = GUItools.FigureNotebook( - "TGLF Scan MITIM Notebook", geometry="1500x900", vertical=True - ) + self.fn = GUItools.FigureNotebook("TGLF Scan MITIM Notebook", geometry="1500x900", vertical=True) if unnormalization_successful: fig1 = self.fn.add_figure(label="Fluxes") fig1e = self.fn.add_figure(label="Fluxes (GB)") @@ -2646,7 +2046,7 @@ def plotScan( positionBase = self.scans[label]["positionBase"] - x = self.scans[label]["xV"] + x = self.scans[label]["scanned_variable"] if relativeX: xbase = x[:, positionBase : positionBase + 1] x = (x - xbase) / xbase * 100.0 @@ -2663,11 +2063,11 @@ def plotScan( self.scans[label]["Ge_gb"], self.scans[label]["Gi_gb"], ) - eta1, eta2 = self.scans[label]["eta1"], self.scans[label]["eta2"] + eta1, eta2 = self.scans[label]["eta_ITGETG"], self.scans[label]["eta_ITGTEM"] itg, tem, etg = ( - self.scans[label]["itg"], - self.scans[label]["tem"], - self.scans[label]["etg"], + self.scans[label]["g_ITG_max"], + self.scans[label]["g_TEM_max"], + self.scans[label]["g_ETG_max"], ) ky, g, f = ( self.scans[label]["ky"], @@ -3121,16 +2521,20 @@ def plotScan( def runScanTurbulenceDrives( self, - subFolderTGLF="drives1", + subfolder="drives1", varUpDown = None, # This setting supercedes the resolutionPoints and variation resolutionPoints=5, variation=0.5, add_baseline_to = 'none', # 'all' or 'first' or 'none' - add_also_baseline_to_first = True, variablesDrives=["RLTS_1", "RLTS_2", "RLNS_1", "XNUE", "TAUS_2"], + minimum_delta_abs={}, positionIon=2, **kwargs_TGLFrun, ): + + ''' + positionIon is the index in the input.tglf file... so if you want for ion RLNS_5, positionIon=5 + ''' self.variablesDrives = variablesDrives @@ -3148,24 +2552,23 @@ def runScanTurbulenceDrives( # Prepare all scans # ------------------------------------------ - tglf_executor, tglf_executor_full, folders = {}, {}, [] + code_executor, code_executor_full, folders = {}, {}, [] for cont, variable in enumerate(self.variablesDrives): # Only ask the cold_start in the first round - kwargs_TGLFrun["forceIfcold_start"] = cont > 0 or ( - "forceIfcold_start" in kwargs_TGLFrun and kwargs_TGLFrun["forceIfcold_start"] - ) + kwargs_TGLFrun["forceIfcold_start"] = cont > 0 or ("forceIfcold_start" in kwargs_TGLFrun and kwargs_TGLFrun["forceIfcold_start"]) - scan_name = f"{subFolderTGLF}_{variable}" # e.g. turbDrives_RLTS_1 + scan_name = f"{subfolder}_{variable}" # e.g. turbDrives_RLTS_1 - tglf_executor0, tglf_executor_full0, folders0, _ = self._prepare_scan( + code_executor0, code_executor_full0, folders0, _ = self._prepare_scan( scan_name, variable=variable, varUpDown=varUpDown_dict[variable], + minimum_delta_abs=minimum_delta_abs, **kwargs_TGLFrun, ) - tglf_executor = tglf_executor | tglf_executor0 - tglf_executor_full = tglf_executor_full | tglf_executor_full0 + code_executor = code_executor | code_executor0 + code_executor_full = code_executor_full | code_executor_full0 folders += folders0 # ------------------------------------------ @@ -3173,8 +2576,8 @@ def runScanTurbulenceDrives( # ------------------------------------------ self._run( - tglf_executor, - tglf_executor_full=tglf_executor_full, + code_executor, + code_executor_full=code_executor_full, **kwargs_TGLFrun, ) @@ -3187,16 +2590,16 @@ def runScanTurbulenceDrives( for mult in varUpDown_dict[variable]: name = f"{variable}_{mult}" self.read( - label=f"{self.subFolderTGLF_scan}_{name}", + label=f"{self.subfolder_scan}_{name}", folder=folders[cont], cold_startWF = False, require_all_files=not kwargs_TGLFrun.get("only_minimal_files", False), ) cont += 1 - scan_name = f"{subFolderTGLF}_{variable}" # e.g. turbDrives_RLTS_1 + scan_name = f"{subfolder}_{variable}" # e.g. turbDrives_RLTS_1 - self.readScan(label=scan_name, variable=variable,positionIon=positionIon) + self.read_scan(label=scan_name, variable=variable,positionIon=positionIon) def plotScanTurbulenceDrives( self, label="drives1", figs=None, **kwargs_TGLFscanPlot @@ -3228,7 +2631,7 @@ def plotScanTurbulenceDrives( kwargs_TGLFscanPlot.pop("figs", None) - self.plotScan( + self.plot_scan( labels=labels, figs=figs1, variableLabel="X", @@ -3237,13 +2640,13 @@ def plotScanTurbulenceDrives( ) kwargs_TGLFscanPlot["plotTGLFs"] = False - self.plotScan( + self.plot_scan( labels=labels, figs=figs2, variableLabel="X", **kwargs_TGLFscanPlot ) def runAnalysis( self, - subFolderTGLF="analysis1", + subfolder="analysis1", label="analysis1", analysisType="chi_e", trace=[50.0, 174.0], @@ -3280,14 +2683,14 @@ def runAnalysis( np.linspace(1, 1 + variation / 2, 6)[1:], ) - self.runScan( - subFolderTGLF, + self.run_scan( + subfolder, varUpDown=varUpDown, variable=self.variable, **kwargs_TGLFrun, ) - self.readScan(label=label, variable=self.variable) + self.read_scan(label=label, variable=self.variable) if analysisType == "chi_e": Te_prof = self.NormalizationSets["SELECTED"]["Te_keV"] @@ -3304,7 +2707,7 @@ def runAnalysis( rho = self.NormalizationSets["SELECTED"]["rho"] a = self.NormalizationSets["SELECTED"]["rmin"][-1] - x = self.scans[label]["xV"] + x = self.scans[label]["scanned_variable"] yV = self.scans[label][self.variable_y] self.scans[label]["chi_inc"] = [] @@ -3349,27 +2752,27 @@ def runAnalysis( print(f"*** Running D and V analysis for trace ({fimp:.1e}) species with Z={trace[0]:.1f}, A={trace[1]:.1f}") - self.inputsTGLF_orig = copy.deepcopy(self.inputsTGLF) + self.inputs_files_orig = copy.deepcopy(self.inputs_files) # ------------------------ # Add trace impurity # ------------------------ - for irho in self.inputsTGLF: - position = self.inputsTGLF[irho].addTraceSpecie(Z, A, AS=fimp) + for irho in self.inputs_files: + position = self.inputs_files[irho].addTraceSpecie(Z, A, AS=fimp) self.variable = f"RLNS_{position}" - self.runScan( - subFolderTGLF, + self.run_scan( + subfolder, varUpDown=varUpDown, variable=self.variable, **kwargs_TGLFrun, ) - self.readScan(label=label, variable=self.variable, positionIon=position) + self.read_scan(label=label, variable=self.variable, positionIon=position) - x = self.scans[label]["xV"] + x = self.scans[label]["scanned_variable"] yV = self.scans[label]["Gi"] self.variable_y = "Gi" @@ -3401,7 +2804,7 @@ def runAnalysis( self.scans[label]["VoD"].append(V / D) # Back to original (not trace) - self.inputsTGLF = self.inputsTGLF_orig + self.inputs_files = self.inputs_files_orig def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): if figs is None: @@ -3419,7 +2822,7 @@ def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): variableLabel = "RLTS_2" elif analysisType == "Z": variableLabel = self.variable - self.plotScan( + self.plot_scan( labels=labels, figs=[fig2, fig2e, fig3], variableLabel=variableLabel ) @@ -3461,11 +2864,11 @@ def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): ) ) - xV = self.scans[label]["xV"][irho] + xV = self.scans[label]["scanned_variable"][irho] Qe = self.scans[label][self.scans[label]["var_y"]][irho] xgrid = self.scans[label]["x_grid"][irho] ygrid = np.array(self.scans[label]["y_grid"][irho]) - xba = self.scans[label]["xV"][irho][ + xba = self.scans[label]["scanned_variable"][irho][ self.scans[label]["positionBase"] ] yba = np.interp(xba, xV, Qe) @@ -3613,7 +3016,7 @@ def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): ax = ax00 ax.plot( - self.scans[label]["xV"][irho], + self.scans[label]["scanned_variable"][irho], np.array(self.scans[label]["Gi"][irho]), "o-", c=col, @@ -3626,7 +3029,7 @@ def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): lw=0.5, c=col, ) - # ax.axvline(x=self.scans[label]['xV'][irho][self.scans[label]['positionBase']],ls='--',c=col,lw=1.) + # ax.axvline(x=self.scans[label]['scanned_variable'][irho][self.scans[label]['positionBase']],ls='--',c=col,lw=1.) cont += 1 @@ -3654,10 +3057,10 @@ def plotAnalysis(self, labels=["analysis1"], analysisType="chi_e", figs=None): rho_mod = np.append([0], rho) aLn = np.append([0], y) import torch - from mitim_modules.powertorch.physics import CALCtools + from mitim_modules.powertorch.utils import CALCtools BC = 1.0 - T = CALCtools.integrateGradient( + T = CALCtools.integration_Lx( torch.from_numpy(rho_mod).unsqueeze(0), torch.from_numpy(aLn).unsqueeze(0), BC, @@ -3718,10 +3121,8 @@ def updateConvolution(self): ) else: for i in self.latest_inputsFileTGLFDict: - if "DRMAJDX_LOC" in self.latest_inputsFileTGLFDict[i].geom: - self.DRMAJDX_LOC[i] = self.latest_inputsFileTGLFDict[i].geom[ - "DRMAJDX_LOC" - ] + if "DRMAJDX_LOC" in self.latest_inputsFileTGLFDict[i].plasma: + self.DRMAJDX_LOC[i] = self.latest_inputsFileTGLFDict[i].plasma["DRMAJDX_LOC"] else: self.DRMAJDX_LOC[i] = 0.0 print( @@ -3737,7 +3138,7 @@ def updateConvolution(self): ) = GACODEdefaults.convolution_CECE(self.d_perp_dict, dRdx=self.DRMAJDX_LOC) -def completeVariation(setVariations, species): +def completeVariation_TGLF(setVariations, species): ions_info = species.ions_info setVariations_new = copy.deepcopy(setVariations) @@ -3776,82 +3177,11 @@ def completeVariation(setVariations, species): return setVariations_new - -# ~~~~~~~~~ Input class - - -def changeANDwrite_TGLF( - rhos, - inputs0, - FolderTGLF, - TGLFsettings=None, - extraOptions={}, - multipliers={}, - ApplyCorrections=True, - Quasineutral=False, -): - """ - Received inputs classes and gives text. - ApplyCorrections refer to removing ions with too low density and that are fast species - """ - - inputs = copy.deepcopy(inputs0) - - modInputTGLF = {} - ns_max = [] - for i, rho in enumerate(rhos): - print(f"\t- Changing input file for rho={rho:.4f}") - NS = inputs[rho].plasma["NS"] - inputTGLF_rho = GACODErun.modifyInputs( - inputs[rho], - Settings=TGLFsettings, - extraOptions=extraOptions, - multipliers=multipliers, - position_change=i, - addControlFunction=GACODEdefaults.addTGLFcontrol, - NS=NS, - ) - - newfile = FolderTGLF / f"input.tglf_{rho:.4f}" - - if TGLFsettings is not None: - # Apply corrections - if ApplyCorrections: - print("\t- Applying corrections") - inputTGLF_rho.removeLowDensitySpecie() - inputTGLF_rho.removeFast() - - # Ensure that plasma to run is quasineutral - if Quasineutral: - inputTGLF_rho.ensureQuasineutrality() - else: - print('\t- Not applying corrections nor quasineutrality because "TGLFsettings" is None') - - inputTGLF_rho.writeCurrentStatus(file=newfile) - - modInputTGLF[rho] = inputTGLF_rho - - ns_max.append(inputs[rho].plasma["NS"]) - - # Convert back to a string because that's how runTGLFproduction operates - inputFileTGLF = inputToVariable(FolderTGLF, rhos) - - if (np.diff(ns_max) > 0).any(): - print( - "> Each radial location has its own number of species... probably because of removal of fast or low density...", - typeMsg="w", - ) - print( - "\t * Reading of TGLF results will fail... consider doing something before launching run", - typeMsg="q", - ) - - return inputFileTGLF, modInputTGLF - - def reduceToControls(dict_all): - controls, plasma, geom = {}, {}, {} + controls, plasma = {}, {} for ikey in dict_all: + + # Plasma if ikey in [ "VEXB", "VEXB_SHEAR", @@ -3865,41 +3195,30 @@ def reduceToControls(dict_all): ]: plasma[ikey] = dict_all[ikey] - elif (len(ikey.split("_")) > 1) and (ikey.split("_")[-1] in ["SA", "LOC"]): - geom[ikey] = dict_all[ikey] + # Geometry + elif (len(ikey.split("_")) > 1) and ( (ikey.split("_")[-1] in ["SA", "LOC"]) or (ikey.split("_")[0] in ["SHAPE"]) ): + plasma[ikey] = dict_all[ikey] + # Controls else: controls[ikey] = dict_all[ikey] - return controls, plasma, geom - + return controls, plasma -class TGLFinput: +class TGLFinput(SIMtools.GACODEinput): def __init__(self, file=None): - self.file = IOtools.expandPath(file) if isinstance(file, (str, Path)) else None - - if self.file is not None and self.file.exists(): - with open(self.file, "r") as f: - lines = f.readlines() - file_txt = "".join(lines) - else: - file_txt = "" - input_dict = GACODErun.buildDictFromInput(file_txt) - - self.process(input_dict) - - @classmethod - def initialize_in_memory(cls, input_dict): - instance = cls() - instance.process(input_dict) - return instance + super().__init__( + file=file, + controls_file= __mitimroot__ / "templates" / "input.tglf.controls", + code = 'TGLF', + n_species = 'NS' + ) def process(self, input_dict): # Get number of recorded species - self.num_recorded = 0 - if "NS" in input_dict: - self.num_recorded = int(input_dict["NS"]) + if self.n_species in input_dict: + self.num_recorded = int(input_dict[self.n_species]) # Species ----------- self.species = {} @@ -3928,7 +3247,7 @@ def process(self, input_dict): specie[var] = 0.0 self.species[i + 1] = specie - self.controls, self.plasma, self.geom = reduceToControls(controls_all) + self.controls, self.plasma = reduceToControls(controls_all) self.processSpecies() def processSpecies(self, MinMultiplierToBeFast=2.0): @@ -3953,6 +3272,7 @@ def processSpecies(self, MinMultiplierToBeFast=2.0): } thermal_indeces = [1, 2] + fast_indeces = [] for i in range(len(self.species) - 2): TiTe = self.species[3 + i]["TAUS"] if TiTe < thrTemperatureRatio: @@ -3960,19 +3280,34 @@ def processSpecies(self, MinMultiplierToBeFast=2.0): thermal_indeces.append(3 + i) else: self.ions_info[3 + i] = {"type": "fast"} + fast_indeces.append(3 + i) self.ions_info["thermal_list"] = thermal_indeces - self.ions_info["thermal_list_extras"] = thermal_indeces[ - 2: - ] # remove electrons and mains - + self.ions_info["thermal_list_extras"] = thermal_indeces[2:] # remove electrons and mains + + self.ions_info["fast_list"] = fast_indeces + self.onlyControl = False + else: - print( - "\t- No species in this input.tglf (it is either a controls-only file or there was a problem generating it)" - ) + print("\t- No species in this input.tglf (it is either a controls-only file or there was a problem generating it)") self.onlyControl = True + def anticipate_problems(self): + + threshold = 1e-10 + + minn = [] + for cont, ip in enumerate(self.species): + if (cont <= self.num_recorded) and ( + self.species[ip]["AS"] < threshold + ): + minn.append(ip) + + if len(minn) > 0: + print(f"* Ions in positions {ip} have a relative density lower than {threshold}, which can cause problems",typeMsg="q") + + def isThePlasmaDT(self): """ First two ions are D and T? @@ -3986,7 +3321,7 @@ def isThePlasmaDT(self): return np.abs(mrat - 1.5) < 0.01 - def removeFast(self): + def remove_fast(self): self.processSpecies() i = 1 while i <= len(self.species): @@ -4043,19 +3378,11 @@ def ensureQuasineutrality(self): diff = self.calcualteQuasineutralityError() print(f"\t- Oiriginal quasineutrality error: {diff:.1e}", typeMsg="i") - print( - f"\t- Modifying species {speciesMod} to ensure quasineutrality", - typeMsg="i", - ) + print(f"\t- Modifying species {speciesMod} to ensure quasineutrality", typeMsg="i") for i in speciesMod: self.species[i]["AS"] -= diff / self.species[i]["ZS"] / len(speciesMod) self.processSpecies() - print( - "\t- New quasineutrality error: {0:.1e}".format( - self.calcualteQuasineutralityError() - ), - typeMsg="i", - ) + print("\t- New quasineutrality error: {0:.1e}".format(self.calcualteQuasineutralityError()), typeMsg="i") def calcualteQuasineutralityError(self): fiZi = 0 @@ -4099,56 +3426,64 @@ def addTraceSpecie(self, ZS, MASS, AS=1e-6, position=None, increaseNS=True, posi return position - def writeCurrentStatus(self, file=None): - print("\t- Writting TGLF input file") + def write_state(self, file=None): maxSpeciesTGLF = 6 # TGLF cannot handle more than 6 species if file is None: file = self.file + # Local formatter: floats -> 6 significant figures in exponential (uppercase), + # ints stay as ints, bools as 0/1, sequences space-separated with same rule. + def _fmt_num(x): + import numpy as _np + if isinstance(x, (bool, _np.bool_)): + return "True" if x else "False" + if isinstance(x, (_np.floating, float)): + # 6 significant figures in exponential => 5 digits after decimal + return f"{float(x):.5E}" + if isinstance(x, (_np.integer, int)): + return f"{int(x)}" + return str(x) + + def _fmt_value(val): + import numpy as _np + if isinstance(val, (list, tuple, _np.ndarray)): + # Flatten numpy arrays but keep ordering; join with spaces + if isinstance(val, _np.ndarray): + flat = val.flatten().tolist() + else: + flat = list(val) + return " ".join(_fmt_num(v) for v in flat) + return _fmt_num(val) + with open(file, "w") as f: - f.write( - "#-------------------------------------------------------------------------\n" - ) - f.write( - "# TGLF input file modified by MITIM framework (Rodriguez-Fernandez, 2020)\n" - ) - f.write( - "#-------------------------------------------------------------------------" - ) + f.write("#-------------------------------------------------------------------------\n") + f.write(f"# {self.code} input file modified by MITIM {mitim_version}\n") + f.write("#-------------------------------------------------------------------------") f.write("\n\n# Control parameters\n") f.write("# ------------------\n\n") for ikey in self.controls: var = self.controls[ikey] - f.write(f"{ikey.ljust(23)} = {var}\n") + f.write(f"{ikey.ljust(23)} = {_fmt_value(var)}\n") - f.write("\n\n# Geometry parameters\n") - f.write("# ------------------\n\n") - for ikey in self.geom: - var = self.geom[ikey] - f.write(f"{ikey.ljust(23)} = {var}\n") - - f.write("\n\n# Plasma parameters\n") + f.write("\n\n# Plasma/Geometry parameters\n") f.write("# ------------------\n\n") for ikey in self.plasma: - if ikey == "NS": + if ikey == self.n_species: var = np.min([self.plasma[ikey], maxSpeciesTGLF]) + if var < self.plasma[ikey]: + print(f"\t- Maximum number of species in TGLF reached, not considering after {maxSpeciesTGLF} species",typeMsg="w",) else: var = self.plasma[ikey] - f.write(f"{ikey.ljust(23)} = {var}\n") + f.write(f"{ikey.ljust(23)} = {_fmt_value(var)}\n") f.write("\n\n# Species\n") f.write("# -------\n") for ikey in self.species: if ikey > maxSpeciesTGLF: - print( - "\t- Maximum number of species in TGLF reached, not considering after {0} species".format( - maxSpeciesTGLF - ), - typeMsg="w", - ) + print(f"\t- Maximum number of species in TGLF reached, not considering after {maxSpeciesTGLF} species",typeMsg="w",) break if ikey == 1: extralab = " (electrons)" @@ -4159,7 +3494,7 @@ def writeCurrentStatus(self, file=None): f.write(f"\n# Specie #{ikey}{extralab}\n") for ivar in self.species[ikey]: ikar = f"{ivar}_{ikey}" - f.write(f"{ikar.ljust(12)} = {self.species[ikey][ivar]}\n") + f.write(f"{ikar.ljust(12)} = {_fmt_value(self.species[ikey][ivar])}\n") print(f"\t\t~ File {IOtools.clipstr(file)} written") @@ -4344,19 +3679,8 @@ def plotPlasma(self, axs=None, color="b", legends=True): ax.plot(x, y, "-o", lw=1, color=color) ax.set_xticks(x) ax.set_xticklabels(x, rotation=90, fontsize=6) - ax.set_ylabel("PLASMA") - - ax = axs[1] + ax.set_ylabel("PLASMA & GEOMETRY") - x, y = [], [] - for i in self.geom: - x.append(i) - y.append(self.geom[i]) - x, y = np.array(x), np.array(y) - ax.plot(x, y, "-o", lw=1, color=color) - ax.set_xticks(x) - ax.set_xticklabels(x, rotation=90, fontsize=6) - ax.set_ylabel("GEOMETRY") def plotControls(self, axs=None, color="b", markersize=5): if axs is None: @@ -4371,7 +3695,7 @@ def plotControls(self, axs=None, color="b", markersize=5): cont = 0 x, y = x1, y1 - dicts = [self.controls] # ,self.geom,self.plasma] + dicts = [self.controls] for dictT in dicts: for i in dictT: @@ -4424,25 +3748,6 @@ def identifySpecie(dict_species, dict_find): return found_index -# From file to dict - - -def inputToVariable(finalFolder, rhos): - """ - Entire text file to variable - """ - - inputFilesTGLF = {} - for cont, rho in enumerate(rhos): - fileN = finalFolder / f"input.tglf_{rho:.4f}" - - with open(fileN, "r") as f: - lines = f.readlines() - inputFilesTGLF[rho] = "".join(lines) - - return inputFilesTGLF - - # ~~~~~~~~~~~~~ Functions to handle results @@ -4459,51 +3764,49 @@ def readTGLFresults( for rho in rhos: # Read full folder - TGLFout = TGLFoutput( + output = TGLFoutput( FolderGACODE_tmp, suffix=f"_{rho:.4f}" if suffix is None else suffix, require_all_files=require_all_files, ) # Unnormalize - TGLFout.unnormalize( + output.unnormalize( NormalizationSets["SELECTED"], rho=rho, convolution_fun_fluct=convolution_fun_fluct, factorTot_to_Perp=factorTot_to_Perp, ) - TGLFstd_TGLFout.append(TGLFout) - inputclasses.append(TGLFout.inputclass) + TGLFstd_TGLFout.append(output) + inputclasses.append(output.inputclass) - parse = GACODErun.buildDictFromInput(TGLFout.inputFileTGLF) + parse = SIMtools.buildDictFromInput(output.inputFile) parsed.append(parse) results = { "inputclasses": inputclasses, "parsed": parsed, - "TGLFout": TGLFstd_TGLFout, + "output": TGLFstd_TGLFout, "x": np.array(rhos), } return results -class TGLFoutput: - def __init__(self, FolderGACODE, suffix="",require_all_files=True): +class TGLFoutput(SIMtools.GACODEoutput): + def __init__(self, FolderGACODE, suffix="", require_all_files=True): + super().__init__() + self.FolderGACODE, self.suffix = FolderGACODE, suffix - if suffix == "": - print( - f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} without suffix" - ) + if self.suffix == "": + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} without suffix") else: - print( - f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} with suffix {suffix}" - ) + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} with suffix {self.suffix}") self.inputclass = TGLFinput(file=self.FolderGACODE / f"input.tglf{self.suffix}") - self.roa = self.inputclass.geom["RMIN_LOC"] + self.roa = self.inputclass.plasma["RMIN_LOC"] self.read(require_all_files=require_all_files) self.postprocess() @@ -4512,7 +3815,7 @@ def __init__(self, FolderGACODE, suffix="",require_all_files=True): def postprocess(self): coeff, klow = 0.0, 0.8 - self.etas = processGrowthRates( + etas = processGrowthRates( self.ky, self.g[0, :], self.f[0, :], @@ -4522,6 +3825,16 @@ def postprocess(self): coeff=coeff, ) + self.eta_ITGETG = etas["metrics"]["eta_ITGETG"] + self.eta_ITGTEM = etas["metrics"]["eta_ITGTEM"] + self.g_lowk_max = etas["metrics"]["g_lowk_max"] + self.f_lowk_max = etas["metrics"]["f_lowk_max"] + self.k_lowk_max = etas["metrics"]["k_lowk_max"] + + self.g_ITG_max = etas["ITG"]["g_max"] + self.g_TEM_max = etas["TEM"]["g_max"] + self.g_ETG_max = etas["ETG"]["g_max"] + self.QeES = np.sum(self.SumFlux_Qe_phi) self.QeEM = np.sum(self.SumFlux_Qe_a) self.QiES = np.sum(self.SumFlux_Qi_phi) @@ -4529,17 +3842,16 @@ def postprocess(self): self.GeES = np.sum(self.SumFlux_Ge_phi) self.GeEM = np.sum(self.SumFlux_Ge_a) + # Redefined because of very specific TGLF stuff def read(self,require_all_files=True): # -------------------------------------------------------------------------------- # Ions to include? e.g. IncludeExtraIonsInQi = [2,3,4] -> This will sum to ion 1 # -------------------------------------------------------------------------------- - IncludeExtraIonsInQi = ( - [i - 1 for i in self.inputclass.ions_info["thermal_list_extras"]] - if self.inputclass is not None - else [] - ) + IncludeExtraIonsInQi = [i - 1 for i in self.inputclass.ions_info["thermal_list_extras"]] if self.inputclass is not None else [] self.ions_included = (1,) + tuple(IncludeExtraIonsInQi) + + self.fast_included = tuple([i-1 for i in self.inputclass.ions_info["fast_list"]]) if self.inputclass is not None else () # ------------------------------------------------------------------------ # Fluxes @@ -4555,16 +3867,35 @@ def read(self,require_all_files=True): self.Ge = data[0, 0] self.Qe = data[1, 0] + self.Me = data[2, 0] + self.Se = data[3, 0] self.GiAll = data[0, 1:] self.QiAll = data[1, 1:] + self.MiAll = data[2, 1:] + self.SiAll = data[3, 1:] - print( - f"\t\t- For Qi, summing contributions from ions {self.ions_included} (#0 is e-)", - typeMsg="i", - ) + print(f"\t\t- For Qi, summing contributions from ions {self.ions_included} (#0 is e-)",typeMsg="i",) self.Gi = data[0, self.ions_included].sum() self.Qi = data[1, self.ions_included].sum() + + if len(self.fast_included)>0: + print(f"\t\t- For Qifast, summing contributions from fast ions {self.fast_included} (#0 is e-)",typeMsg="i",) + self.Qifast = data[1, self.fast_included].sum() + else: + print(f"\t\t- No fast ions included",typeMsg="i",) + self.Qifast = 0.0 + + signMt = - self.inputclass.plasma['SIGN_IT'] # Following tgyro_flux.f90 + print(f"\t\t- Sign of Mt given by toroidal current direction (SIGN_IT={-signMt}): {signMt}",typeMsg="i",) + self.Me *= signMt + self.MiAll *= signMt + + print("\t\t- For Mt, summing all species contributions",typeMsg="i",) + self.Mt = self.Me + self.MiAll.sum() + + print("\t\t- For St, summing all ion species contributions",typeMsg="i",) + self.Si = self.SiAll.sum() if require_all_files: @@ -5010,16 +4341,16 @@ def read(self,require_all_files=True): with open(self.FolderGACODE / ("input.tglf" + self.suffix), "r") as fi: lines = fi.readlines() - self.inputFileTGLF = "".join(lines) + self.inputFile = "".join(lines) - def unnormalize( - self, normalization, rho=None, convolution_fun_fluct=None, factorTot_to_Perp=1.0 - ): + def unnormalize(self, normalization, rho=None, convolution_fun_fluct=None, factorTot_to_Perp=1.0): if normalization is not None: rho_x = normalization["rho"] roa_x = normalization["roa"] q_gb = normalization["q_gb"] g_gb = normalization["g_gb"] + pi_gb = normalization["pi_gb"] + s_gb = normalization["s_gb"] rho_s = normalization["rho_s"] a = normalization["rmin"][-1] @@ -5036,9 +4367,14 @@ def unnormalize( self.Qe_unn = self.Qe * q_gb[ir] self.Qi_unn = self.Qi * q_gb[ir] + self.Qifast_unn = self.Qifast * q_gb[ir] + self.QiAll_unn = self.QiAll * q_gb[ir] self.Ge_unn = self.Ge * g_gb[ir] self.GiAll_unn = self.GiAll * g_gb[ir] + self.Mt_unn = self.Mt * pi_gb[ir] + self.Se_unn = self.Se * s_gb[ir] + self.AmplitudeSpectrum_Te_level = GACODErun.obtainFluctuationLevel( self.ky, self.AmplitudeSpectrum_Te, @@ -5210,7 +4546,7 @@ def plotTGLF_Summary(self, c="b", label="", axs=None, irho_cont=0): try: gammaExB = np.abs( - self.inputsTGLF[self.rhos[irho_cont]].plasma["VEXB_SHEAR"] + self.inputs_files[self.rhos[irho_cont]].plasma["VEXB_SHEAR"] ) if gammaExB > 1e-5: GRAPHICStools.drawLineWithTxt( @@ -6382,83 +5718,3 @@ def createCombinedRuns(tglfs=(), new_names=(), results_names=(), isItScan=False) normalizations[new_names[i]] = tglfs[i].NormalizationSets return tglf, normalizations - - -def cold_start_checker( - rhos, - ResultsFiles, - FolderTGLF, - cold_start=False, - print_each_time=False, -): - """ - This function checks if the TGLF inputs are already in the folder. If they are, it returns True - """ - cont_each = 0 - if cold_start: - rhosEvaluate = rhos - else: - rhosEvaluate = [] - for ir in rhos: - existsRho = True - for j in ResultsFiles: - ffi = FolderTGLF / f"{j}_{ir:.4f}" - existsThis = ffi.exists() - existsRho = existsRho and existsThis - if not existsThis: - if print_each_time: - print(f"\t* {ffi} does not exist") - else: - cont_each += 1 - if not existsRho: - rhosEvaluate.append(ir) - - if not print_each_time and cont_each > 0: - print(f'\t* {cont_each} files from expected set are missing') - - if len(rhosEvaluate) < len(rhos) and len(rhosEvaluate) > 0: - print( - "~ Not all radii are found, but not removing folder and running only those that are needed", - typeMsg="i", - ) - - return rhosEvaluate - - -def anticipate_problems_func( - latest_inputsFileTGLFDict, rhosEvaluate, slurm_setup, launchSlurm -): - - # ----------------------------------- - # ------ Check density for problems - # ----------------------------------- - - threshold = 1e-10 - - minn = [] - for irho in latest_inputsFileTGLFDict: - for cont, ip in enumerate(latest_inputsFileTGLFDict[irho].species): - if (cont <= latest_inputsFileTGLFDict[irho].plasma["NS"]) and ( - latest_inputsFileTGLFDict[irho].species[ip]["AS"] < threshold - ): - minn.append([irho, ip]) - - if len(minn) > 0: - print( - f"* Ions in positions [rho,pos] {minn} have a relative density lower than {threshold}, which can cause problems", - typeMsg="q", - ) - - # ----------------------------------- - # ------ Check cores problem - # ----------------------------------- - - expected_allocated_cores = int(len(rhosEvaluate) * slurm_setup["cores"]) - - warning = 32 * 2 - - if launchSlurm: - print( - f'\t- Slurm job will be submitted with {expected_allocated_cores} cores ({len(rhosEvaluate)} radii x {slurm_setup["cores"]} cores/radius)', - typeMsg="" if expected_allocated_cores < warning else "q", - ) diff --git a/src/mitim_tools/gacode_tools/TGYROtools.py b/src/mitim_tools/gacode_tools/TGYROtools.py index 139ec674..9dfd1676 100644 --- a/src/mitim_tools/gacode_tools/TGYROtools.py +++ b/src/mitim_tools/gacode_tools/TGYROtools.py @@ -8,19 +8,11 @@ GRAPHICStools, PLASMAtools, ) -from mitim_tools.gacode_tools import TGLFtools, PROFILEStools +from mitim_tools.gacode_tools import TGLFtools from mitim_tools.gacode_tools.utils import GACODEinterpret, GACODEdefaults, GACODErun +from mitim_tools.simulation_tools import SIMtools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed -import time - -try: - from mitim_tools.gacode_tools.utils import PORTALSinteraction -except: - print( - "- I could not import PORTALSinteraction, likely a consequence of botorch incompatbility", - typeMsg="w", - ) """ Same philosophy as the TGLFtools @@ -149,7 +141,8 @@ def prep( ) self.file_input_profiles = self.FolderGACODE / "input.gacode" - self.profiles = PROFILEStools.PROFILES_GACODE(self.file_input_profiles) + from mitim_tools.gacode_tools import PROFILEStools + self.profiles = PROFILEStools.gacode_state(self.file_input_profiles) if correctPROFILES: self.profiles.correct(write=True) @@ -258,9 +251,7 @@ def run( Tepred, Tipred, nepred = PredictionSet self.FolderTGYRO = IOtools.expandPath(self.FolderGACODE / subFolderTGYRO) - self.FolderTGYRO_tmp = ( - self.FolderTGYRO / "tmp_tgyro_run" - ) # Folder to run TGYRO on (or to retrieve the raw outputs from a cluster) + self.FolderTGYRO_tmp = self.FolderTGYRO / "tmp_tgyro_run" # Folder to run TGYRO on (or to retrieve the raw outputs from a cluster) inputclass_TGYRO = TGYROinput( input_profiles=self.profiles, @@ -375,20 +366,19 @@ def run( f"\t\t- Creating only-controls input.tglf file in {IOtools.clipstr(str(self.FolderTGYRO_tmp.resolve()))}input.tglf" ) inputclass_TGLF = TGLFtools.TGLFinput() - inputclass_TGLF = GACODErun.modifyInputs( + inputclass_TGLF = SIMtools.modifyInputs( inputclass_TGLF, - Settings=TGLFsettings, + code_settings=TGLFsettings, extraOptions=extraOptionsTGLF, addControlFunction=GACODEdefaults.addTGLFcontrol, NS=self.loc_n_ion + 1, ) - inputclass_TGLF.writeCurrentStatus(file=self.FolderTGYRO_tmp / "input.tglf") + inputclass_TGLF.write_state(file=self.FolderTGYRO_tmp / "input.tglf") # ----------------------------------- # ------ Write input profiles # ----------------------------------- - print(f"\t\t- Using input.profiles from {IOtools.clipstr(self.profiles.file)}") fil = "input.gacode" if self.profiles.profiles['rho(-)'][0] > 0.0: @@ -399,7 +389,7 @@ def run( if "z_eff(-)" not in self.profiles.profiles: self.profiles.profiles["z_eff(-)"] = self.profiles.derived["Zeff"] - self.profiles.writeCurrentStatus(file=self.FolderTGYRO_tmp / f"{fil}") + self.profiles.write_state(file=self.FolderTGYRO_tmp / f"{fil}") # ----------------------------------- # ------ Create TGYRO file @@ -418,7 +408,7 @@ def run( special_radii=special_radii_mod, ) - inputclass_TGYRO.writeCurrentStatus(file=self.FolderTGYRO_tmp / "input.tgyro") + inputclass_TGYRO.write_state(file=self.FolderTGYRO_tmp / "input.tgyro") # ----------------------------------- # ------ Check density for problems @@ -468,14 +458,10 @@ def run( ------------------------------------------------------------------------------------------------------------------------ """ if modify_inputgacodenew: - print( - "\t- It was requested that input.gacode.new is modified according to what TypeTarget was", - typeMsg="i", - ) + print("\t- It was requested that input.gacode.new is modified according to what TypeTarget was",typeMsg="i",) - inputgacode_new = PROFILEStools.PROFILES_GACODE( - self.FolderTGYRO_tmp / "input.gacode.new" - ) + from mitim_tools.gacode_tools import PROFILEStools + inputgacode_new = PROFILEStools.gacode_state(self.FolderTGYRO_tmp / "input.gacode.new") if TGYRO_physics_options["TypeTarget"] < 3: for ikey in [ @@ -511,7 +497,7 @@ def run( inputgacode_new.profiles["rho(-)"] * 0.0 ) - inputgacode_new.writeCurrentStatus() + inputgacode_new.write_state() # ------------------------------------------------------------------------------------------------------------------------ # Copy those files that I'm interested in, plus the extra file, into the main folder @@ -549,7 +535,8 @@ def read(self, label="tgyro1", folder=None, file_input_profiles=None): else: prof = self.profiles else: - prof = PROFILEStools.PROFILES_GACODE(file_input_profiles) + from mitim_tools.gacode_tools import PROFILEStools + prof = PROFILEStools.gacode_state(file_input_profiles) self.results[label] = TGYROoutput(folder, profiles=prof) @@ -646,7 +633,7 @@ def grab_tglf_objects(self, subfolder="tglf_runs", fromlabel="tgyro1", rhos=None inputsTGLF[rho] = inputclass tglf = TGLFtools.TGLF(rhos=rhos) - tglf.prep( + tglf.prep_using_tgyro( self.FolderGACODE / subfolder, specificInputs=inputsTGLF, inputgacode=self.FolderTGYRO / "input.gacode", @@ -672,7 +659,7 @@ def runTGLF(self, fromlabel="tgyro1", rhos=None, cold_start=False): label = f"{self.nameRuns_default}_tglf1" self.tglf[fromlabel].run( - subFolderTGLF=f"{label}", + subfolder=f"{label}", TGLFsettings=None, ApplyCorrections=False, cold_start=cold_start, @@ -701,7 +688,7 @@ def runTGLFsensitivities(self, fromlabel="tgyro1", rho=0.5, cold_start=False): ) self.tglf[fromlabel].runScanTurbulenceDrives( - subFolderTGLF=f"{self.nameRuns_default}_tglf", + subfolder=f"{self.nameRuns_default}_tglf", TGLFsettings=None, ApplyCorrections=False, cold_start=cold_start, @@ -719,7 +706,7 @@ def run_tglf_scan( cold_start=False, label="tgyro1", donotrun=False, - recalculatePTOT=True, + recalculate_ptot=True, ): """ onlyThermal will remove from the TGYRO run the fast species, so the resulting input.tglf files will not have @@ -751,7 +738,7 @@ def run_tglf_scan( "onlyThermal": onlyThermal, "quasineutrality": quasineutrality, "neoclassical": 0, # Do not run or check NEOTGYRO canno - "PtotType": int(not recalculatePTOT), # Recalculate Ptot or use what's there + "PtotType": int(not recalculate_ptot), # Recalculate Ptot or use what's there } # ------------------------------------------------------------ @@ -1059,9 +1046,7 @@ def plot(self, fn=None, labels=["tgyro1"], doNotShow=False, fn_color=None): ax.plot(res.roa[-1], res.Qi_sim[-1], "-o", c="b", markersize=3) axE.plot(res.roa[-1], res.Qi_res[-1], "-o", c="b", markersize=3) - ax.plot( - res.roa[-1], res.Ce_tar[-1], "--o", c="m", label="Qconv", markersize=3 - ) + ax.plot(res.roa[-1], res.Ce_tar[-1], "--o", c="m", label="Qconv", markersize=3) ax.plot(res.roa[-1], res.Ce_sim[-1], "-o", c="m", markersize=3) axE.plot(res.roa[-1], np.abs(res.Ce_res[-1]), "-o", c="m", markersize=3) @@ -1212,10 +1197,9 @@ class TGYROoutput: def __init__(self, FolderTGYRO, profiles=None): self.FolderTGYRO = FolderTGYRO - if (profiles is None) and (FolderTGYRO / f"input.gacode").exists(): - profiles = PROFILEStools.PROFILES_GACODE( - FolderTGYRO / f"input.gacode", calculateDerived=False - ) + from mitim_tools.gacode_tools import PROFILEStools + if (profiles is None) and (FolderTGYRO / "input.gacode").exists(): + profiles = PROFILEStools.gacode_state(FolderTGYRO / f"input.gacode", derive_quantities=False) self.profiles = profiles @@ -1227,12 +1211,9 @@ def __init__(self, FolderTGYRO, profiles=None): self.readNu() self.readProfiles() - calculateDerived = True + derive_quantities = True try: - self.profiles_final = PROFILEStools.PROFILES_GACODE( - self.FolderTGYRO / f"input.gacode.new", - calculateDerived=calculateDerived, - ) + self.profiles_final = PROFILEStools.gacode_state(self.FolderTGYRO / "input.gacode.new",derive_quantities=derive_quantities,) except: self.profiles_final = None @@ -1441,14 +1422,14 @@ def readFluxes(self): # Errors - Constructed outside of TGYRO call (e.g. powerstate) # *************************************************************** - if not (self.FolderTGYRO / f"out.tgyro.flux_e_stds").exists(): + if not (self.FolderTGYRO / "out.tgyro.flux_e_stds").exists(): self.tgyro_stds = False else: print("\t- Errors in TGYRO fluxes and targets found, adding to class") self.tgyro_stds = True - file = self.FolderTGYRO / f"out.tgyro.flux_e_stds" + file = self.FolderTGYRO / "out.tgyro.flux_e_stds" ( _, self.GeGB_sim_neo_stds, @@ -2100,9 +2081,7 @@ def derived(self): Note: This is only valid in the converged case??????????????? """ - if (self.profiles_final is not None) and ( - "derived" in self.profiles_final.__dict__ - ): + if (self.profiles_final is not None) and ("derived" in self.profiles_final.__dict__): prof = self.profiles_final elif (self.profiles is not None) and ("derived" in self.profiles.__dict__): prof = self.profiles @@ -2116,9 +2095,7 @@ def derived(self): self.Q_better = self.P_fusT_tgyro / self.P_inT - if (self.profiles_final is not None) and ( - "derived" in self.profiles_final.__dict__ - ): + if (self.profiles_final is not None) and ("derived" in self.profiles_final.__dict__): self.Q_best = self.profiles_final.derived["Q"] """ @@ -2170,10 +2147,7 @@ def useFineGridTargets(self, impurityPosition=1): ) # Profiles do not include ion fluxes for j in range(self.Gi_tar.shape[0]): - self.Gi_tar[j, i, :], self.Ci_tar[j, i, :] = ( - self.Ce_tar[i, :] * 0.0, - self.Ce_tar[i, :] * 0.0, - ) + self.Gi_tar[j, i, :], self.Ci_tar[j, i, :] = self.Ce_tar[i, :] * 0.0, self.Ce_tar[i, :] * 0.0 self.Mt_tar[i, :] = np.interp( rho_coarse, rho_fine, self.profiles_final.derived["mt_Jm2"] @@ -2185,9 +2159,6 @@ def useFineGridTargets(self, impurityPosition=1): self.Ge_tarMW = self.Ge_tar * self.dvoldr self.Ce_tarMW = self.Ce_tar * self.dvoldr - def TGYROmodeledVariables(self, *args, **kwargs): - return PORTALSinteraction.TGYROmodeledVariables(self, *args, **kwargs) - def plot(self, fn=None, label="", prelabel="", fn_color=None): if fn is None: from mitim_tools.misc_tools.GUItools import FigureNotebook @@ -3666,7 +3637,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qe_fus_MWmiller"] + P = self.profiles.derived["qe_fus_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: @@ -3674,7 +3645,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qe_fus_MWmiller"] + P = self.profiles_final.derived["qe_fus_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) GRAPHICStools.addDenseAxis(ax) @@ -3707,7 +3678,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qe_aux_MWmiller"] + P = self.profiles.derived["qe_aux_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: @@ -3715,7 +3686,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qe_aux_MWmiller"] + P = self.profiles_final.derived["qe_aux_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) GRAPHICStools.addDenseAxis(ax) @@ -3788,7 +3759,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = -1 * sign * self.profiles.derived["qe_rad_MWmiller"] + P = -1 * sign * self.profiles.derived["qe_rad_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: @@ -3796,7 +3767,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qe_rad_MWmiller"] + P = self.profiles_final.derived["qe_rad_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) ax.legend(prop={"size": 6}) @@ -3860,14 +3831,14 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = -self.profiles.derived["qe_exc_MWmiller"] + P = -self.profiles.derived["qe_exc_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: roa = ( self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = -self.profiles_final.derived["qe_exc_MWmiller"] + P = -self.profiles_final.derived["qe_exc_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) ax.legend(prop={"size": 6}) @@ -3905,14 +3876,14 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qe_MWmiller"] + P = self.profiles.derived["qe_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: roa = ( self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qe_MWmiller"] + P = self.profiles_final.derived["qe_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) ax.legend(prop={"size": 6}) @@ -3946,14 +3917,14 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qi_fus_MWmiller"] + P = self.profiles.derived["qi_fus_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: roa = ( self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qi_fus_MWmiller"] + P = self.profiles_final.derived["qi_fus_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) GRAPHICStools.addDenseAxis(ax) @@ -3995,20 +3966,20 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qi_aux_MWmiller"] + P = self.profiles.derived["qi_aux_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: roa = ( self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qi_aux_MWmiller"] + P = self.profiles_final.derived["qi_aux_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) ax.plot( roa, - self.profiles_final.derived["qi_aux_MWmiller"] - + self.profiles_final.derived["qe_aux_MWmiller"], + self.profiles_final.derived["qi_aux_MW"] + + self.profiles_final.derived["qe_aux_MW"], "-.", c="y", lw=0.5, @@ -4034,8 +4005,8 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): / self.profiles.profiles["rmin(m)"][-1] ) P = ( - self.profiles.derived["qe_fus_MWmiller"] - + self.profiles.derived["qi_fus_MWmiller"] + self.profiles.derived["qe_fus_MW"] + + self.profiles.derived["qi_fus_MW"] ) ax.plot(roa, 5 * P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: @@ -4044,8 +4015,8 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): / self.profiles_final.profiles["rmin(m)"][-1] ) P = ( - self.profiles_final.derived["qe_fus_MWmiller"] - + self.profiles_final.derived["qi_fus_MWmiller"] + self.profiles_final.derived["qe_fus_MW"] + + self.profiles_final.derived["qi_fus_MW"] ) ax.plot(roa, 5 * P, "--", c="k", label="profiles_new (miller)", lw=1.0) @@ -4085,7 +4056,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qe_exc_MWmiller"] + P = self.profiles.derived["qe_exc_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: @@ -4093,7 +4064,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qe_exc_MWmiller"] + P = self.profiles_final.derived["qe_exc_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) GRAPHICStools.addDenseAxis(ax) @@ -4119,7 +4090,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles.derived["qi_MWmiller"] + P = self.profiles.derived["qi_MW"] ax.plot(roa, P, "--", c="green", label="profiles (miller)", lw=1.0) if self.profiles_final is not None: @@ -4127,7 +4098,7 @@ def plot(self, fn=None, label="", prelabel="", fn_color=None): self.profiles_final.profiles["rmin(m)"] / self.profiles.profiles["rmin(m)"][-1] ) - P = self.profiles_final.derived["qi_MWmiller"] + P = self.profiles_final.derived["qi_MW"] ax.plot(roa, P, "--", c="k", label="profiles_new (miller)", lw=1.0) GRAPHICStools.addDenseAxis(ax) @@ -4366,7 +4337,7 @@ def plotBalance(self, fig=None): axs.append(fig.add_subplot(grid[1, 1])) axs.append(fig.add_subplot(grid[1, 2])) - self.profiles_final.plotBalance( + self.profiles_final.plot_flows( axs=axs, limits=[self.roa[-1, 1], self.roa[-1, -1]] ) @@ -4439,20 +4410,12 @@ def plotConvergence(self, fig1=None): ) GRAPHICStools.addDenseAxis(ax) - # GRAPHICStools.autoscale_y(ax) - GRAPHICStools.addLegendApart( - ax, ratio=0.9, withleg=False, extraPad=0, loc="center left", size=6 - ) - # GRAPHICStools.addLegendApart(ax2,ratio=0.9,withleg=False,extraPad=0,loc='center left',size=6) + GRAPHICStools.addLegendApart(ax, ratio=0.9, withleg=False, extraPad=0, loc="center left", size=6) ax = ax10 - colsE = ( - GRAPHICStools.listColors() - ) # GRAPHICStools.colorTableFade(self.radii-1,startcolor='b',endcolor='b',alphalims=[0.3,1.0]) - colsI = ( - GRAPHICStools.listColors() - ) # GRAPHICStools.colorTableFade(self.radii-1,startcolor='r',endcolor='r',alphalims=[0.3,1.0]) + colsE = GRAPHICStools.listColors() + colsI = GRAPHICStools.listColors() for i in range(self.radii - 1): label = f"r/a={self.roa[0, i + 1]:.4f}" @@ -4501,26 +4464,17 @@ def plotConvergence(self, fig1=None): ax.set_xlim(left=0) ax.set_ylabel("Individual Residuals (GB)") ax.set_yscale("log") - # ax.legend(loc='best',prop={'size':5}) - GRAPHICStools.addLegendApart( - ax, ratio=0.9, withleg=True, extraPad=0, loc="center left", size=6 - ) - # ax2 = GRAPHICStools.addXaxis(ax,self.iterations,self.calls_solver,label='Calls to transport solver',whichticks=whichticks) - + GRAPHICStools.addLegendApart(ax, ratio=0.9, withleg=True, extraPad=0, loc="center left", size=6) GRAPHICStools.addDenseAxis(ax) - # GRAPHICStools.autoscale_y(ax) ax = ax01 - ax.plot( - self.iterations, self.residual_manual_real, "-s", color="b", markersize=5 - ) + ax.plot(self.iterations, self.residual_manual_real, "-s", color="b", markersize=5) ax.set_xlabel("Iterations") ax.set_xlim(left=0) ax.set_ylabel("Residual (real)") ax.set_yscale("log") GRAPHICStools.addDenseAxis(ax) - # GRAPHICStools.autoscale_y(ax) _ = GRAPHICStools.addXaxis( ax, @@ -4564,11 +4518,7 @@ def plotConvergence(self, fig1=None): ax.set_ylabel("Individual Residuals (real)") ax.set_yscale("log") - # ax2 = GRAPHICStools.addXaxis(ax,self.iterations,self.calls_solver,label='Calls to transport solver',whichticks=whichticks) - GRAPHICStools.addDenseAxis(ax) - # GRAPHICStools.autoscale_y(ax) - def plotAll(TGYROoutputs, labels=None, fn=None): if fn is None: @@ -4596,7 +4546,7 @@ def __init__(self, input_profiles, file=None, onlyThermal=False, limitSpecies=10 else: self.file_txt = "" - self.input_dict = GACODErun.buildDictFromInput(self.file_txt) + self.input_dict = SIMtools.buildDictFromInput(self.file_txt) # Species self.species = input_profiles.Species @@ -4611,7 +4561,7 @@ def __init__(self, input_profiles, file=None, onlyThermal=False, limitSpecies=10 ) self.loc_n_ion = spec["LOC_N_ION"] - def writeCurrentStatus(self, file=None): + def write_state(self, file=None): print("\t- Writting TGYRO input file") if file is None: diff --git a/src/mitim_tools/gacode_tools/scripts/compare_MXH3.py b/src/mitim_tools/gacode_tools/scripts/compare_MXH3.py index 227d45fa..b8928a99 100644 --- a/src/mitim_tools/gacode_tools/scripts/compare_MXH3.py +++ b/src/mitim_tools/gacode_tools/scripts/compare_MXH3.py @@ -12,7 +12,7 @@ """ file_input_gacode = sys.argv[1] -p = PROFILEStools.PROFILES_GACODE(file_input_gacode) +p = PROFILEStools.gacode_state(file_input_gacode) file_geq = sys.argv[2] g = GEQtools.MITIMgeqdsk(file_geq) @@ -25,7 +25,7 @@ g.plotFluxSurfaces(ax=ax, fluxes=ff, rhoPol=False, sqrt=True, color="r", plot1=False) -p.plotGeometry(ax=ax, surfaces_rho=ff, color="b") +p.plot_state_flux_surfaces(ax=ax, surfaces_rho=ff, color="b") ax.set_xlabel("R (m)") ax.set_ylabel("Z (m)") diff --git a/src/mitim_tools/gacode_tools/scripts/read_cgyro.py b/src/mitim_tools/gacode_tools/scripts/read_cgyro.py index facde86b..e908af39 100644 --- a/src/mitim_tools/gacode_tools/scripts/read_cgyro.py +++ b/src/mitim_tools/gacode_tools/scripts/read_cgyro.py @@ -1,32 +1,120 @@ import argparse +import pickle +from mitim_tools.gacode_tools.utils.CGYROutils import CGYROoutput +from xml.etree.ElementInclude import include +import matplotlib.pyplot as plt from IPython import embed -from mitim_tools.misc_tools import IOtools from mitim_tools.gacode_tools import CGYROtools +import os """ -e.g. plot_cgyro.py folder +e.g. read_cgyro.py folder """ def main(): parser = argparse.ArgumentParser() parser.add_argument("folders", type=str, nargs="*") + parser.add_argument("--suffixes", required=False, type=str, nargs="*", default=None) + parser.add_argument("--two", action="store_true", help="Include 2D plots") + parser.add_argument("--linear", action="store_true", help="Just a plot of the linear spectra") + parser.add_argument("--tmin", type=float, nargs="*", default=None, help="Minimum time to calculate mean and std") + parser.add_argument("--scan_subfolder_id" , type=str, nargs="*", default="KY", help="If reading a linear scan, the subfolders contain this common identifier") + parser.add_argument("--noplot", action="store_true", help="If set, it will not plot anything, just read the data.") + parser.add_argument("--pickle", action="store_true", help="If set, it will save the read data in a pickle file for faster reading next time.") + parser.add_argument("--minimal", action="store_true") + args = parser.parse_args() folders = args.folders + linear = args.linear + tmin = args.tmin + include_2D = args.two + skip_plotting = args.noplot + pkl = args.pickle + minimal = args.minimal + suffixes = args.suffixes + + scan_subfolder_id = args.scan_subfolder_id + + if isinstance(scan_subfolder_id, str): + scan_subfolder_id = [scan_subfolder_id for _ in range(len(folders))] + + if suffixes is None: + suffixes = ["" for _ in range(len(folders))] + + for i in range(len(suffixes)): + if suffixes[i] == "_": + suffixes[i] = "" + + if tmin is None: + tmin = [0.0] * len(folders) + last_tmin_for_linear = True + else: + last_tmin_for_linear = False + # Read c = CGYROtools.CGYRO() labels = [] + output_pickle = {} for i, folder in enumerate(folders): - labels.append(f"{IOtools.reducePathLevel(folder)[-1]}") - c.read(label=labels[-1], folder=folder) + labels.append(f"case {i + 1}") + + if linear: + c.read_linear_scan( + label=labels[-1], + folder=folder, + suffix=suffixes[i], + preffix=scan_subfolder_id[i], + minimal=minimal + ) + elif include_2D: + c.read( + label=labels[-1], + folder=folder, + tmin=tmin[i], + last_tmin_for_linear=last_tmin_for_linear, + suffix=suffixes[i], + preffix=scan_subfolder_id[i], + minimal=minimal + ) + else: + c.read( + label=labels[-1], + folder=folder, + tmin=tmin[i], + last_tmin_for_linear=last_tmin_for_linear, + suffix=suffixes[i], + preffix=scan_subfolder_id[i], + minimal=minimal + ) + + if pkl: + print("Pickling data...") + print(c.results[labels[-1]]['output']) + folder_abs = os.path.abspath(folder) + simname = folder_abs.rstrip("/").split("/")[-1] + print(f"Pickling to {simname}.pkl") + + with open(f"{folder}/{simname}_data.pkl", "wb") as f: + pickle.dump(c.results[labels[-1]]['output'], f) + print("Pickling done.") + + if not skip_plotting: + if linear: + # Plot linear spectrum + c.plot_quick_linear(labels=labels) + plt.show() + else: + c.plot(labels=labels, include_2D=include_2D, common_colorbar=True) + c.fn.show() + + embed() + - c.plot(labels=labels) - c.fn.show() - embed() if __name__ == "__main__": main() diff --git a/src/mitim_tools/gacode_tools/scripts/read_gacode.py b/src/mitim_tools/gacode_tools/scripts/read_gacode.py index 9c994607..86c2e508 100644 --- a/src/mitim_tools/gacode_tools/scripts/read_gacode.py +++ b/src/mitim_tools/gacode_tools/scripts/read_gacode.py @@ -1,4 +1,5 @@ import argparse +from mitim_tools.plasmastate_tools.utils import state_plotting from mitim_tools.gacode_tools import PROFILEStools """ @@ -11,27 +12,29 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("files", type=str, nargs="*") - parser.add_argument( - "--rho", type=float, required=False, default=0.89 - ) # Last rho for gradients plot + parser.add_argument("--rho", type=float, required=False, default=0.89) # Last rho for gradients plot + parser.add_argument("--print", required=False, default=False, action="store_true") # Last rho for gradients plot args = parser.parse_args() files = args.files rho = args.rho + print_only = args.print # Read profs = [] for file in files: - p = PROFILEStools.PROFILES_GACODE(file) + p = PROFILEStools.gacode_state(file) profs.append(p) p.printInfo() # Plot - fn = PROFILEStools.plotAll(profs, lastRhoGradients=rho) + if not print_only: - fn.show() + fn = state_plotting.plotAll(profs, lastRhoGradients=rho) + + fn.show() # Import IPython and embed an interactive session from IPython import embed diff --git a/src/mitim_tools/gacode_tools/scripts/read_tglf.py b/src/mitim_tools/gacode_tools/scripts/read_tglf.py index 09086fc4..54cb497d 100644 --- a/src/mitim_tools/gacode_tools/scripts/read_tglf.py +++ b/src/mitim_tools/gacode_tools/scripts/read_tglf.py @@ -31,7 +31,7 @@ def main(): suffixes[i] = "" tglf = TGLFtools.TGLF() - tglf.prep_from_tglf( + tglf.prep_from_file( folders[0], folders[0] / f"input.tglf{suffixes[0]}", input_gacode=input_gacode ) for i, folder in enumerate(folders): diff --git a/src/mitim_tools/gacode_tools/scripts/read_tgyro.py b/src/mitim_tools/gacode_tools/scripts/read_tgyro.py index 679b050f..25856a60 100644 --- a/src/mitim_tools/gacode_tools/scripts/read_tgyro.py +++ b/src/mitim_tools/gacode_tools/scripts/read_tgyro.py @@ -21,7 +21,7 @@ def main(): tgyros = [] for folder in folders: prof_file = folder / "input.gacode" - prof = PROFILEStools.PROFILES_GACODE(prof_file) + prof = PROFILEStools.gacode_state(prof_file) p = TGYROtools.TGYROoutput(folder, profiles=prof) tgyros.append(p) diff --git a/src/mitim_tools/gacode_tools/scripts/run_tglf.py b/src/mitim_tools/gacode_tools/scripts/run_tglf.py index 0e5f3c55..c535ff8f 100644 --- a/src/mitim_tools/gacode_tools/scripts/run_tglf.py +++ b/src/mitim_tools/gacode_tools/scripts/run_tglf.py @@ -28,9 +28,7 @@ def main(): parser.add_argument("--gacode", required=False, type=str, default=None) parser.add_argument("--scan", required=False, type=str, default=None) parser.add_argument("--drives", required=False, default=False, action="store_true") - parser.add_argument( - "--cold_start", "-r", required=False, default=False, action="store_true" - ) + parser.add_argument("--cold_start", "-r", required=False, default=False, action="store_true") args = parser.parse_args() @@ -47,29 +45,29 @@ def main(): # ------------------------------------------------------------------------------ tglf = TGLFtools.TGLF() - tglf.prep_from_tglf(folder, input_tglf, input_gacode=input_gacode) + tglf.prep_from_file(folder, input_tglf, input_gacode=input_gacode) # ------------------------------------------------------------------------------ # Workflow # ------------------------------------------------------------------------------ if drives: - tglf.runScanTurbulenceDrives(subFolderTGLF="scan_turb", TGLFsettings=None) + tglf.runScanTurbulenceDrives(subfolder="scan_turb", code_settings=None) tglf.plotScanTurbulenceDrives(label="scan_turb") elif scan is not None: tglf.runScan( - subFolderTGLF="scan1", + subfolder="scan1", variable=scan, varUpDown=np.linspace(0.2, 2.0, 5), - TGLFsettings=None, + code_settings=None, cold_start=cold_start, ) tglf.readScan(label="scan1", variable=scan) tglf.plotScan(labels=["scan1"], variableLabel=scan) else: - tglf.run(subFolderTGLF="run1", TGLFsettings=None, cold_start=cold_start) + tglf.run(subfolder="run1", code_settings=None, cold_start=cold_start) tglf.read(label="run1") tglf.plot(labels=["run1"]) diff --git a/src/mitim_tools/gacode_tools/utils/CGYROutils.py b/src/mitim_tools/gacode_tools/utils/CGYROutils.py new file mode 100644 index 00000000..960df58d --- /dev/null +++ b/src/mitim_tools/gacode_tools/utils/CGYROutils.py @@ -0,0 +1,780 @@ +import os +import scipy +import numpy as np +from pathlib import Path +import statsmodels.api as sm +import matplotlib.pyplot as plt +from mitim_tools.misc_tools import IOtools +from mitim_tools.simulation_tools import SIMtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +try: + from pygacode.cgyro.data_plot import cgyrodata_plot + from pygacode import gacodefuncs +except ModuleNotFoundError: + print("\t- Could not find pygacode module in this environment. Please install it if you need CGYRO capabilities", typeMsg='w') +from IPython import embed +import pandas as pd + +class CGYROlinear_scan: + def __init__(self, labels, results): + + self.labels = labels + + # Store the data in a structured way + self.aLTi = [] + self.ky = [] + self.g_mean = [] + self.f_mean = [] + + self.neTe_mean = [] + + self.Qe_mean = [] + self.Qi_mean = [] + + for label in labels: + self.ky.append(results[label]['output'][0].ky[0]) + self.aLTi.append(results[label]['output'][0].aLTi) + self.g_mean.append(results[label]['output'][0].g_mean[0]) + self.f_mean.append(results[label]['output'][0].f_mean[0]) + + self.Qe_mean.append(results[label]['output'][0].Qe_mean) + self.Qi_mean.append(results[label]['output'][0].Qi_mean) + + try: + self.neTe_mean.append(results[label]['output'][0].neTe_kx0_mean[0]) + except: + self.neTe_mean.append(np.nan) + + self.ky = np.array(self.ky) + self.aLTi = np.array(self.aLTi) + self.g_mean = np.array(self.g_mean) + self.f_mean = np.array(self.f_mean) + self.neTe_mean = np.array(self.neTe_mean) + self.Qe_mean = np.array(self.Qe_mean) + self.Qi_mean = np.array(self.Qi_mean) + + + # Organize them by ky + order = np.argsort(self.ky) + self.ky = self.ky[order] + self.aLTi = self.aLTi[order] + self.g_mean = self.g_mean[order] + self.f_mean = self.f_mean[order] + self.neTe_mean = self.neTe_mean[order] + self.Qe_mean = self.Qe_mean[order] + self.Qi_mean = self.Qi_mean[order] + self.labels = [self.labels[i] for i in order] + self.results = {label: results[label] for label in self.labels} + + +class CGYROoutput(SIMtools.GACODEoutput): + def __init__(self, folder, suffix = None, tmin=0.0, minimal=False, last_tmin_for_linear=True, **kwargs): + + super().__init__() + + self.folder = folder + self.tmin = tmin + + if isinstance(self.folder, str): + self.folder = Path(self.folder) + + self.cgyrodata = self.read_using_cgyroplot(self.folder, suffix) + + # -------------------------------------------------------------- + # Read inputs + # -------------------------------------------------------------- + + self.params1D = {} + for var in self.cgyrodata.__dict__: + par = self.cgyrodata.__dict__[var] + if isinstance(par, bool) or IOtools.isnum(par): + self.params1D[var] = par + elif isinstance(par, (list, np.ndarray)) and par.ndim==1 and len(par) <= 5: + for i, p in enumerate(par): + self.params1D[f"{var}_{i}"] = p + + # -------------------------------------------------------------- + # Postprocess with MITIM-curated structures and variables + # -------------------------------------------------------------- + + # Check for linear run + if 'phib' in self.cgyrodata.__dict__: + print('\t- This is a linear run', typeMsg='i') + self.linear = True + if last_tmin_for_linear: + print('\t- Forcing tmin to the last time point', typeMsg='i') + self.tmin = self.cgyrodata.t[-1] + + else: + self.linear = False + + self.cgyrodata.getflux(cflux='auto') + self.cgyrodata.getnorm("elec") + self.cgyrodata.getgeo() + self.cgyrodata.getxflux() + + # Understand positions + if -1 in self.cgyrodata.z: + self.electron_flag = np.where(self.cgyrodata.z == -1)[0][0] + else: + self.electron_flag = None + self.all_flags = np.arange(0, len(self.cgyrodata.z), 1) + self.ions_flags = self.all_flags[self.all_flags != self.electron_flag] + + self.all_names = [f"{gacodefuncs.specmap(self.cgyrodata.mass[i],self.cgyrodata.z[i])}({self.cgyrodata.z[i]},{self.cgyrodata.mass[i]:.1f})" for i in self.all_flags] + + self.fields = np.arange(self.cgyrodata.n_field) + self.aLTi = self.cgyrodata.dlntdr[0] + self.aLTe = self.cgyrodata.dlntdr[self.electron_flag] + self.aLne = self.cgyrodata.dlnndr[self.electron_flag] + + + # ************************ + # Normalization + # ************************ + + self.t = self.cgyrodata.tnorm + self.ky = self.cgyrodata.kynorm + self.kx = self.cgyrodata.kxnorm + self.theta = self.cgyrodata.theta + + if self.cgyrodata.theta_plot == 1: + self.theta_stored = np.array([0.0]) + else: + self.theta_stored = np.array([-1+2.0*i/self.cgyrodata.theta_plot for i in range(self.cgyrodata.theta_plot)]) + + self.Qgb = self.cgyrodata.q_gb_norm + self.Ggb = self.cgyrodata.gamma_gb_norm + + self.artificial_rhos_factor = self.cgyrodata.rho_star_norm / self.cgyrodata.rhonorm + + self._process_linear() + + if (not minimal): # and (self.linear == False): + self.cgyrodata.getbigfield() + + if 'kxky_phi' in self.cgyrodata.__dict__: + try: + self._process_fluctuations() + except ValueError as e: + print(f'\t- Error processing fluctuations: {e}', typeMsg='w') + else: + print(f'\t- No fluctuations found in CGYRO data ({IOtools.clipstr(self.folder)}), skipping fluctuation processing and will not be able to plot default Notebook', typeMsg='w') + else: + print('\t- Minimal mode, skipping fluctuations processing', typeMsg='i') + + try: + self._process_fluxes() + except ValueError as e: + print(f'\t- Error processing fluxes: {e}', typeMsg='w') + #self._process_fluxes() + self._saturate_signals() + + self.remove_symlinks() + + def read_using_cgyroplot(self, folder, suffix): + + original_dir = os.getcwd() + + # Handle files with suffix by creating temporary symbolic links + self.temp_links = [] + if suffix: + import glob + + # Find all files with the suffix pattern + pattern = f"{folder.resolve()}{os.sep}*{suffix}" + suffixed_files = glob.glob(pattern) + + for suffixed_file in suffixed_files: + # Create expected filename without suffix + original_name = suffixed_file.replace(suffix, '') + + # Only create symlink if the original doesn't exist and the suffixed file does + if not os.path.exists(original_name) and os.path.exists(suffixed_file): + try: + os.symlink(suffixed_file, original_name) + self.temp_links.append(original_name) + print(f"\t- Created temporary link: {os.path.basename(original_name)} -> {os.path.basename(suffixed_file)}") + except (OSError, FileExistsError) as e: + print(f"\t- Warning: Could not create symlink for {os.path.basename(suffixed_file)}: {e}", typeMsg='w') + + try: + print(f"\t- Reading CGYRO data from {folder.resolve()}") + cgyrodata = cgyrodata_plot(f"{folder.resolve()}{os.sep}") + except FileNotFoundError: + raise Exception(f"[MITIM] Could not find CGYRO data in {folder.resolve()}. Please check the folder path or run CGYRO first.") + except Exception as e: + print(f"\t- Error reading CGYRO data: {e}") + if print('- Could not read data, do you want me to try do "cgyro -t" in the folder?',typeMsg='q'): + os.chdir(folder) + os.system("cgyro -t") + cgyrodata = cgyrodata_plot(f"{folder.resolve()}{os.sep}") + finally: + + os.chdir(original_dir) + + return cgyrodata + + def remove_symlinks(self): + # Remove temporary symbolic links + for temp_link in self.temp_links: + try: + if os.path.islink(temp_link): + os.unlink(temp_link) + print(f"\t- Removed temporary link: {os.path.basename(temp_link)}") + except OSError as e: + print(f"\t- Warning: Could not remove temporary link {os.path.basename(temp_link)}: {e}", typeMsg='w') + + def _process_linear(self): + + # check for convergence + self.linear_converged = False + info_file = f"{self.folder.resolve()}/out.cgyro.info" + if not os.path.exists(info_file): + raise FileNotFoundError(f"[MITIM] Could not find CGYRO info file at {info_file}. Please check the folder path or run CGYRO first.") + else: + with open(info_file, 'r') as f: + lines = f.readlines() + for line in lines: + if "EXIT: (CGYRO) Linear converged" in line: + self.linear_converged = True + break + + self.f = self.cgyrodata.fnorm[0,:,:] # (ky, time) + self.g = self.cgyrodata.fnorm[1,:,:] # (ky, time) + if self.g is np.nan or self.f is np.nan: + raise ValueError(f"[MITIM] Could not find f or g in CGYRO data at {info_file}. Please check the folder path or run CGYRO first.") + + # Ballooning Modes (complex eigenfunctions) + if 'phib' in self.cgyrodata.__dict__: + self.phi_ballooning = self.cgyrodata.phib # (ball, time) + self.apar_ballooning = self.cgyrodata.aparb # (ball, time) + self.bpar_ballooning = self.cgyrodata.bparb # (ball, time) + self.theta_ballooning = self.cgyrodata.thetab # (ball, time) + + def _process_fluctuations(self): + # Fluctuations (complex numbers) + + gbnorm = False + + theta = -1 + + moment, species, field = 'phi', None, 0 + self.phi, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,ntoroidal,time) + if 'kxky_apar' in self.cgyrodata.__dict__: + field = 1 + self.apar, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,ntoroidal,time) + field = 2 + self.bpar, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,ntoroidal,time) + + self.tmax_fluct = _detect_exploiding_signal(self.t, self.phi**2) + if 'kxky_n' in self.cgyrodata.__dict__: + moment, species, field = 'n', self.electron_flag, 0 + self.ne, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,ntoroidal,time) + + species = self.ions_flags + self.ni_all, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,nions,ntoroidal,time) + self.ni = self.ni_all.sum(axis=1) # [COMPLEX] (nradial,ntoroidal,time) + + if 'kxky_e' in self.cgyrodata.__dict__: + moment, species, field = 'e', self.electron_flag, 0 + Ee, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,ntoroidal,time) + + species = self.ions_flags + Ei_all, _ = self.cgyrodata.kxky_select(theta,field,moment,species,gbnorm=gbnorm) # [COMPLEX] (nradial,nions,ntoroidal,time) + Ei = Ei_all.sum(axis=1) # [COMPLEX] (nradial,ntoroidal,time) + # Transform to temperature + self.Te = 2/3 * Ee - self.ne + self.Ti_all = 2/3 * Ei_all - self.ni_all + self.Ti = 2/3 * Ei - self.ni + + # Sum over radial modes and divide between n=0 and n>0 modes, RMS + variables = ['phi', 'apar', 'bpar', 'ne', 'ni_all', 'ni', 'Te', 'Ti', 'Ti_all'] + for var in variables: + if var in self.__dict__: + # Make sure I go to the real units for all of them ******************* + self.__dict__[var] = self.__dict__[var] * self.artificial_rhos_factor + # ******************************************************************** + + # Case with dimensions: (nradial,ntoroidal,time) + if len(self.__dict__[var].shape) == 3: + axis_radial = 0 + axis_toroidal = 1 + var_ntor0 = self.__dict__[var][:,0,:] + var_ntorn = self.__dict__[var][:,1:,:] + # Case with dimensions: (nradial,ntoroidal,nions,time) + elif len(self.__dict__[var].shape) == 4: + axis_radial = 0 + axis_toroidal = 2 + var_ntor0 = self.__dict__[var][:,:,0,:] + var_ntorn = self.__dict__[var][:,:,1:,:] + + # Sum over radial modes + self.__dict__[var+'_rms_sumnr'] = (abs(self.__dict__[var][:,:,:])**2).sum(axis=(axis_radial))**0.5 # (ntoroidal, time) or (nions, ntoroidal, time) + + # Sum over radial modes AND separate n=0 and n>0 (sum) modes + self.__dict__[var+'_rms_sumnr_n0'] = (abs(self.__dict__[var][:,0,:])**2).sum(axis=axis_radial)**0.5 # (time) or (nions, time) + self.__dict__[var+'_rms_sumnr_sumn1'] = (abs(self.__dict__[var][:,1:,:])**2).sum(axis=(axis_radial,axis_toroidal))**0.5 # (time) or (nions, time) + + # Sum over radial modes and toroidal modes + self.__dict__[var+'_rms_sumnr_sumn'] = (abs(self.__dict__[var][:,:,:])**2).sum(axis=(axis_radial,axis_toroidal))**0.5 # (time) or (nions, time) + + # Separate n=0, n>0 (sum) modes, and all n (sum) modes + self.__dict__[var+'_rms_n0'] = (abs(var_ntor0)**2)**0.5 # (nradial,time) + self.__dict__[var+'_rms_sumn1'] = (abs(var_ntorn)**2).sum(axis=(axis_toroidal))**0.5 # (nradial,time) + self.__dict__[var+'_rms_sumn'] = (abs(self.__dict__[var])**2).sum(axis=(axis_toroidal))**0.5 # (nradial,time) + + # Cross-phases + if 'ne' in self.__dict__ and 'Te' in self.__dict__: + self.neTe = _cross_phase(self.t, self.ne, self.Te) * 180/ np.pi # (nradial, ntoroidal, time) + self.neTe_kx0 = self.neTe[np.argmin(np.abs(self.kx)),:,:] # (ntoroidal, time) + + if 'ni' in self.__dict__ and 'Ti' in self.__dict__: + self.niTi = _cross_phase(self.t, self.ni, self.Ti) * 180/ np.pi # (nradial, ntoroidal, time) + self.niTi_kx0 = self.niTi[np.argmin(np.abs(self.kx)),:,:] + + if 'phi' in self.__dict__ and 'Te' in self.__dict__: + self.phiTe = _cross_phase(self.t, self.phi, self.Te) * 180/ np.pi # (nradial, ntoroidal, time) + self.phiTe_kx0 = self.phiTe[np.argmin(np.abs(self.kx)),:,:] + + if 'phi' in self.__dict__ and 'Ti' in self.__dict__: + self.phiTi = _cross_phase(self.t, self.phi, self.Ti) * 180/ np.pi # (nradial, ntoroidal, time) + self.phiTi_kx0 = self.phiTi[np.argmin(np.abs(self.kx)),:,:] + + self.phiTi_all = [] + if 'phi' in self.__dict__ and 'Ti_all' in self.__dict__: + for ion in self.ions_flags: + self.phiTi_all.append(_cross_phase(self.t, self.phi, self.Ti_all[:,ion,:]) * 180/ np.pi) + self.phiTi_all = np.array(self.phiTi_all) + self.phiTi_all_kx0 = self.phiTi_all[:,np.argmin(np.abs(self.kx)),:,:] + + if 'ne' in self.__dict__ and 'phi' in self.__dict__: + self.phine = _cross_phase(self.t, self.phi, self.ne) * 180/ np.pi # (nradial, ntoroidal, time) + self.phine_kx0 = self.phine[np.argmin(np.abs(self.kx)),:,:] + + if 'ni' in self.__dict__ and 'phi' in self.__dict__: + self.phini = _cross_phase(self.t, self.phi, self.ni) * 180/ np.pi # (nradial, ntoroidal, time) + self.phini_kx0 = self.phini[np.argmin(np.abs(self.kx)),:,:] + + self.phini_all = [] + if 'phi' in self.__dict__ and 'ni_all' in self.__dict__: + for ion in self.ions_flags: + self.phini_all.append(_cross_phase(self.t, self.phi, self.ni_all[:,ion,:]) * 180/ np.pi) + self.phini_all = np.array(self.phini_all) + self.phini_all_kx0 = self.phini_all[:,np.argmin(np.abs(self.kx)),:,:] + + # Correlation length + phi = (abs(self.phi[:,self.ky>0,:])).sum(axis=1) # Sum over toroidal modes n>0 + phim, _ = apply_ac(self.t,phi,tmin=self.tmin) + phim = np.append(0, phim) # Add n=0 mode + if np.isinf(phim).any() or np.isnan(phim).any(): + print(f"\t- Warning: Correlation length calculation failed due to infinite/nan values. Setting l_corr to NaN.", typeMsg='w') + self.l_corr = np.nan + else: + self.lr_corr = calculate_lcorr(phim, self.kx, self.cgyrodata.n_radial) + + def _process_fluxes(self): + + # ************************ + # Fluxes + # ************************ + + ky_flux = self.cgyrodata.ky_flux # (species, moments, fields, ntoroidal, time) + + fields = ['phi','apar','bpar'][:self.cgyrodata.n_field] + + # Electron energy flux + + i_species, i_moment = -1, 1 + for i_field, field in enumerate(fields): + if field == 'phi': + self.Qe_ES_ky = ky_flux[i_species, i_moment, i_field, :, :] + elif field == 'apar': + self.Qe_EM_apar_ky = ky_flux[i_species, i_moment, i_field, :, :] + self.Qe_EM_ky = self.Qe_EM_apar_ky.copy() + elif field == 'bpar': + self.Qe_EM_aper_ky = ky_flux[i_species, i_moment, i_field, :, :] + self.Qe_EM_ky += self.Qe_EM_aper_ky + + if 'Qe_EM_ky' in self.__dict__: + self.Qe_ky = self.Qe_ES_ky + self.Qe_EM_ky + else: + self.Qe_ky = self.Qe_ES_ky + + # Electron particle flux + + i_species, i_moment = -1, 0 + for i_field, field in enumerate(fields): + if field == 'phi': + self.Ge_ES_ky = ky_flux[i_species, i_moment, i_field, :, :] + elif field == 'apar': + self.Ge_EM_apar_ky = ky_flux[i_species, i_moment, i_field, :, :] + self.Ge_EM_ky = self.Ge_EM_apar_ky.copy() + elif field == 'bpar': + self.Ge_EM_aper_ky = ky_flux[i_species, i_moment, i_field, :, :] + self.Ge_EM_ky += self.Ge_EM_aper_ky + + if 'Ge_EM_ky' in self.__dict__: + self.Ge_ky = self.Ge_ES_ky + self.Ge_EM_ky + else: + self.Ge_ky = self.Ge_ES_ky + + # Ions energy flux + + i_species, i_moment = self.ions_flags, 1 + for i_field, field in enumerate(fields): + if field == 'phi': + self.Qi_all_ES_ky = ky_flux[i_species, i_moment, i_field, :, :] + # sum over species + self.Qi_ES_ky = self.Qi_all_ES_ky.sum(axis=0) + elif field == 'apar': + self.Qi_all_EM_apar_ky = ky_flux[i_species, i_moment, i_field, :, :] + self.Qi_all_EM_ky = self.Qi_all_EM_apar_ky.copy() + # sum over species + self.Qi_EM_apar_ky = self.Qi_all_EM_apar_ky.sum(axis=0) + elif field == 'bpar': + self.Qi_all_EM_aper_ky = ky_flux[i_species, i_moment, i_field, :, :] + self.Qi_all_EM_ky += self.Qi_all_EM_aper_ky + # sum over species + self.Qi_EM_aper_ky = self.Qi_all_EM_aper_ky.sum(axis=0) + + + if 'Qi_all_EM_ky' in self.__dict__: + self.Qi_all_ky = self.Qi_all_ES_ky + self.Qi_all_EM_ky + self.Qi_ky = self.Qi_all_ky.sum(axis=0) + self.Qi_EM_ky = self.Qi_all_EM_ky.sum(axis=0) + else: + self.Qi_all_ky = self.Qi_all_ES_ky + self.Qi_ky = self.Qi_all_ky.sum(axis=0) + self.Qi_ES_ky = self.Qi_all_ES_ky.sum(axis=0) + + # ************************ + # Sum total + # ************************ + variables = ['Qe','Ge','Qi','Qi_all'] + for var in variables: + for i in ['', '_ES', '_EM_apar', '_EM_aper', '_EM']: + if var+i+'_ky' in self.__dict__: + self.__dict__[var+i] = self.__dict__[var+i+'_ky'].sum(axis=-2) # (time) + + # Convert to MW/m^2 + self.QeMWm2 = self.Qe * self.Qgb + self.QiMWm2 = self.Qi * self.Qgb + self.Qi_allMWm2 = self.Qi_all * self.Qgb + + def _saturate_signals(self): + + # ************************ + # Saturated + # ************************ + + flags = [ + 'Qe', + 'QeMWm2', + 'Qe_ky', + 'Qi', + 'QiMWm2', + 'Qi_all', + 'Qi_allMWm2', + 'Qi_ky', + 'Qi_all_ky', + 'Ge', + 'Ge_ky', + 'Qe_ES', + 'Qi_ES', + 'Ge_ES', + 'Qe_EM', + 'Qi_EM', + 'Ge_EM', + 'g', + 'f', + ] + + flags_fluctuations = [ + 'phi_rms_sumnr', + 'apar_rms_sumnr', + 'bpar_rms_sumnr', + 'ne_rms_sumnr', + 'ni_rms_sumnr', + 'ni_all_rms_sumnr', + 'Te_rms_sumnr', + 'Ti_rms_sumnr', + 'Ti_all_rms_sumnr', + 'phi_rms_n0', + 'phi_rms_sumn1', + 'phi_rms_sumn', + 'apar_rms_n0', + 'apar_rms_sumn1', + 'apar_rms_sumn', + 'bpar_rms_n0', + 'bpar_rms_sumn1', + 'bpar_rms_sumn', + 'ne_rms_n0', + 'ne_rms_sumn1', + 'ne_rms_sumn', + 'ni_rms_n0', + 'ni_rms_sumn1', + 'ni_rms_sumn', + 'ni_all_rms_n0', + 'ni_all_rms_sumn1', + 'ni_all_rms_sumn', + 'Te_rms_n0', + 'Te_rms_sumn1', + 'Te_rms_sumn', + 'Ti_rms_n0', + 'Ti_rms_sumn1', + 'Ti_rms_sumn', + 'Ti_all_rms_n0', + 'Ti_all_rms_sumn1', + 'Ti_all_rms_sumn', + 'neTe_kx0', + 'niTi_kx0', + 'phiTe_kx0', + 'phine_kx0', + 'phini_kx0', + 'phiTi_kx0', + 'phini_all_kx0', + 'phiTi_all_kx0', + ] + + for iflag in flags: + if iflag in self.__dict__: + self.__dict__[iflag+'_mean'], self.__dict__[iflag+'_std'] = apply_ac( + self.t, + self.__dict__[iflag], + tmin=self.tmin, + label_print=iflag, + print_msg=iflag in ['Qi', 'Qe', 'Ge'], + ) + + for iflag in flags_fluctuations: + if iflag in self.__dict__: + self.__dict__[iflag+'_mean'], self.__dict__[iflag+'_std'] = apply_ac( + self.t, + self.__dict__[iflag], + tmin=self.tmin, + tmax=self.tmax_fluct, + label_print=iflag, + ) + +def _grab_ncorrelation(S, debug=False): + # Calculate the autocorrelation function + i_acf = sm.tsa.acf(S, nlags=len(S)) + + if i_acf.min() > 1/np.e: + print("Autocorrelation function does not reach 1/e, will use full length of time series for n_corr.", typeMsg='w') + + # Calculate how many time slices make the autocorrelation function is 1/e (conventional decorrelation level) + icor = np.abs(i_acf-1/np.e).argmin() + + # Define number of samples + n_corr = len(S) / ( 3.0 * icor ) #Define "sample" as 3 x autocor time + + if debug: + fig, ax = plt.subplots() + ax.plot(i_acf, '-o', label='ACF') + ax.axhline(1/np.e, color='r', linestyle='--', label='1/e') + ax.set_xlabel('Lags'); ax.set_xlim([0, icor+20]) + ax.set_ylabel('ACF') + ax.legend() + plt.show() + embed() + + return n_corr, icor + +def apply_ac(t, S, tmin = 0, tmax = None, label_print = '', print_msg = False, debug=False): + + it0 = np.argmin(np.abs(t - tmin)) + it1 = np.argmin(np.abs(t - tmax)) if tmax is not None else len(t) # If tmax is None, use the full length of t + + if it1 <= it0: + it0 = it1 + + # Calculate the mean and std of the signal after tmin (last dimension is time) + S_mean = np.mean(S[..., it0:it1+1], axis=-1) + S_std = np.std(S[..., it0:it1+1], axis=-1) + + if S.ndim == 1: + # 1D case: single time series + n_corr, icor = _grab_ncorrelation(S[it0:it1+1], debug=debug) + S_std = S_std / np.sqrt(n_corr) + + if print_msg: + print(f"\t- {(label_print + ': a') if len(label_print)>0 else 'A'}utocorr time: {icor:.1f} -> {n_corr:.1f} samples -> {S_mean:.2e} +-{S_std:.2e}") + + else: + # Multi-dimensional case: flatten all dimensions except the last one + shape_orig = S.shape[:-1] # Original shape without time dimension + S_reshaped = S.reshape(-1, S.shape[-1]) # Flatten to (n_series, n_time) + + n_series = S_reshaped.shape[0] + n_corr = np.zeros(n_series) + icor = np.zeros(n_series) + + # Calculate correlation for each flattened time series + for i in range(n_series): + n_corr[i], icor[i] = _grab_ncorrelation(S_reshaped[i, it0:it1+1], debug=debug) + + # Reshape correlation arrays back to original shape (without time dimension) + n_corr = n_corr.reshape(shape_orig) + icor = icor.reshape(shape_orig) + + # Apply correlation correction to standard deviation + S_std = S_std / np.sqrt(n_corr) + + # Print results - handle different dimensionalities + if print_msg: + if S.ndim == 2: + # 2D case: print each series + for i in range(S.shape[0]): + print(f"\t- {(label_print + f'_{i}: a') if len(label_print)>0 else 'A'}utocorr: {icor[i]:.1f} -> {n_corr[i]:.1f} samples -> {S_mean[i]:.2e} +-{S_std[i]:.2e}") + else: + # Higher dimensional case: print summary statistics + print(f"\t- {(label_print + ': a') if len(label_print)>0 else 'A'}utocorr time: {icor.mean():.1f}±{icor.std():.1f} -> {n_corr.mean():.1f}±{n_corr.std():.1f} samples -> shape {S_mean.shape}") + + return S_mean, S_std + + +def _cross_phase(t, f1, f2): + """ + Calculate the cross-phase between two complex signals. + + Parameters: + f1, f2 : np.ndarray + Complex signals (e.g., fluctuations). + + Returns: + np.ndarray + Cross-phase in radians. + """ + + return np.angle(f1 * np.conj(f2)) + +def _detect_exploiding_signal(t,f1): + + try: + idx = np.where(np.isnan(f1.sum(axis=(0,1))) | np.isinf(f1.sum(axis=(0,1))))[0][0] + max_t = t[idx] + if print(f"\t- Warning: Exploding signal detected at t>={max_t:.2f}", typeMsg='w'): + return max_t + else: + return t[-1] + except IndexError: + return t[-1] # No exploding signal detected, return last time point + +def calculate_lcorr(phim, kx, nx, debug=False): + """Calculate the correlation length in the radial direction. + + Completely based on pygacode + """ + + ave = np.roll(phim,-nx//2) + ave[0] = 0.0 + corr = np.fft.fft(ave,nx) + corr = np.fft.fftshift(corr) + corr /= np.max(np.abs(corr)) + corr = corr.real + delta_r = np.fft.fftfreq(nx) + delta_r = np.fft.fftshift(delta_r) + Lx = 2*np.pi/(kx[1]-kx[0]) + delta_r *= Lx + + corr_hilbert = scipy.signal.hilbert(corr) + corr_env = np.abs(corr_hilbert) + def absexp(x,tau): + return np.exp(-np.abs(x)/tau) + l_corr, _ = scipy.optimize.curve_fit(absexp, delta_r, corr_env, p0=10.0) + + if debug: + fig, ax = plt.subplots() + ax.plot(delta_r,0*delta_r,color='k',ls='--') + ax.plot(delta_r,corr,color='m') + ax.plot(delta_r,corr_env,color='b') + ax.plot(delta_r,absexp(delta_r,l_corr),color='k',ls='-.') + ax.set_xlim([np.min(delta_r),np.max(delta_r)]) + ax.set_ylim(-1,1) + plt.show() + embed() + + return l_corr[0] # Return the correlation length in the radial direction + + +def quends_analysis(t, S, debug = False): + + import quends as qnds + + time_dependent_data = {'time': t, 'signal': S} + df = pd.DataFrame(time_dependent_data, index = pd.RangeIndex(len(t))) + + dst = qnds.DataStream(df) + + window_size = 10 + + trimmed_df = dst.trim(column_name="signal", method="std") #, window_size=10) + + mean = trimmed_df.mean(window_size=window_size)['signal'] + std = trimmed_df.mean_uncertainty(window_size=window_size)['signal'] + + stats = trimmed_df.compute_statistics(window_size=window_size) + + if debug: + plotter = qnds.Plotter() + plotter.steady_state_automatic_plot(dst, ["signal"]) + plotter.plot_acf(trimmed_df) + print(stats) + plt.show() + embed() + + return mean, std, stats + +def fetch_CGYROoutput(folder_local, folders_remote, machine, minimal=True, delete_local=False): + '''This is a helper function to bring back only the python object from a remote CGYRO run + this is useful when nonlinear runs are too large to be transfered back. It is important to + make sure MITIM is the same version in the remote and local machine. for now I'm writing the commit hash to a file + and checking if they are the same and raising a warning if not.''' + from mitim_tools.misc_tools import FARMINGtools + import pickle + # execute pickle_cgyro remotely + folders_string = " ".join(folders_remote) + + if minimal: + minimal_flag = "--minimal" + else: + minimal_flag = "" + + print("MINIMAL FLAG", minimal_flag) + command = f"mitim_plot_cgyro --noplot --pickle {minimal_flag} {folders_string}" + print(f"Executing remotely: {command}") + FARMINGtools.perform_quick_remote_execution( + folder_local, + machine, + command + ) + + # retrieve remote file + remote_files = [f"{folder_remote}/{folder_remote.split('/')[-1]}_data.pkl" for folder_remote in folders_remote] + FARMINGtools.retrieve_files_from_remote( + folder_local, + machine, + remote_files + ) + + # read pickle file as cgyroOutput object + c={} + for i, folder_remote in enumerate(folders_remote): + with open(f"{folder_local}/{folder_remote.rstrip('/').split('/')[-1]}_data.pkl", "rb") as f: + c[folder_remote] = pickle.load(f) + print(f"Retrieved CGYRO output from {folder_remote}") + + if delete_local: + import os + for i, folder_remote in enumerate(folders_remote): + os.remove(f"{folder_local}/{folder_remote.rstrip('/').split('/')[-1]}_data.pkl") + print(f"Deleted local pickle file {folder_local}/{folder_remote.rstrip('/').split('/')[-1]}_data.pkl") + + return c + +if __name__ == "__main__": + c = fetch_CGYROoutput(folder_local=".", folders_remote=["/cosmos/vast/scratch/hallefkt/arc_low_current_v3a_n8_fast_+20%_alne_sugama", "/cosmos/vast/scratch/hallefkt/arc_low_current_v3a_n8_fast_-20%_alne_sugama"], machine="cosmos", minimal=True) + c + embed() \ No newline at end of file diff --git a/src/mitim_tools/gacode_tools/utils/GACODEdefaults.py b/src/mitim_tools/gacode_tools/utils/GACODEdefaults.py index adae8bb2..73fe2859 100644 --- a/src/mitim_tools/gacode_tools/utils/GACODEdefaults.py +++ b/src/mitim_tools/gacode_tools/utils/GACODEdefaults.py @@ -7,7 +7,7 @@ from mitim_tools.misc_tools.LOGtools import printMsg as print -def addTGLFcontrol(TGLFsettings, NS=2, minimal=False): +def addTGLFcontrol(code_settings, NS=2, minimal=False): """ ******************************************************************************** Define dictionary to start with @@ -15,8 +15,8 @@ def addTGLFcontrol(TGLFsettings, NS=2, minimal=False): """ # Minimum working set - if minimal or TGLFsettings == 0: - TGLFoptions = { + if minimal or code_settings == 0: + options = { "USE_MHD_RULE": True, "USE_BPER": False, "USE_BPAR": False, @@ -31,10 +31,8 @@ def addTGLFcontrol(TGLFsettings, NS=2, minimal=False): # Define every flag else: - TGLFoptions = IOtools.generateMITIMNamelist( - __mitimroot__ / "templates" / "input.tglf.controls", caseInsensitive=False - ) - TGLFoptions["NMODES"] = NS + 2 + options = IOtools.generateMITIMNamelist(__mitimroot__ / "templates" / "input.tglf.controls", caseInsensitive=False) + options["NMODES"] = NS + 2 """ ******************************************************************************** @@ -43,34 +41,65 @@ def addTGLFcontrol(TGLFsettings, NS=2, minimal=False): ******************************************************************************** """ - with open(__mitimroot__ / "templates" / "input.tglf.models.json", "r") as f: - settings = json.load(f) + options = add_code_settings(options, code_settings, models_file="input.tglf.models.yaml") - if str(TGLFsettings) in settings: - sett = settings[str(TGLFsettings)] - label = sett["label"] - for ikey in sett["controls"]: - TGLFoptions[ikey] = sett["controls"][ikey] - else: - print("\t- TGLFsettings not found in input.tglf.models.json, using defaults",typeMsg="w",) - label = "unspecified" + return options + +def addNEOcontrol(code_settings,*args, **kwargs): + + options = IOtools.generateMITIMNamelist(__mitimroot__ / "templates" / "input.neo.controls", caseInsensitive=False) + options = add_code_settings(options, code_settings, models_file="input.neo.models.yaml") + + return options + +def addGXcontrol(code_settings,*args, **kwargs): + + options = IOtools.generateMITIMNamelist(__mitimroot__ / "templates" / "input.gx.controls", caseInsensitive=False) + options = add_code_settings(options, code_settings, models_file="input.gx.models.yaml") - # -------------------------------- - # From dictionary to text - # -------------------------------- + return options - TGLFinput = [""] - for ikey in TGLFoptions: - TGLFinput.append(f"{ikey} = {TGLFoptions[ikey]}") - TGLFinput.append("") - TGLFinput.append("# -- Begin overlay") - TGLFinput.append("") +def addCGYROcontrol(code_settings, rmin=None, **kwargs): - return TGLFinput, TGLFoptions, label + options = IOtools.generateMITIMNamelist(__mitimroot__ / "templates" / "input.cgyro.controls", caseInsensitive=False) + options = add_code_settings(options, code_settings, models_file="input.cgyro.models.yaml") + + return options +def add_code_settings(options,code_settings, models_file = "input.tglf.models.yaml"): -def TGLFinTRANSP(TGLFsettings, NS=3): - _, TGLFoptions, label = addTGLFcontrol(TGLFsettings, NS=NS) + settings = IOtools.read_mitim_yaml(__mitimroot__ / "templates" / models_file) + + code_settings = str(code_settings) + + found = False + + # Search by label first + if str(code_settings) in settings: + sett = settings[str(code_settings)] + for ikey in sett["controls"]: + options[ikey] = sett["controls"][ikey] + found = True + else: + # Search by deprecated descriptor (if available) + for ikey in settings: + if "deprecated_descriptor" in settings[ikey]: + if settings[ikey]["deprecated_descriptor"] == code_settings: + sett = settings[ikey] + for jkey in sett["controls"]: + options[jkey] = sett["controls"][jkey] + found = True + break + + if not found: + print(f"\t- {code_settings = } not found in {models_file}, using defaults",typeMsg="w") + + return options + + +def TGLFinTRANSP(code_settings, NS=3): + + TGLFoptions = addTGLFcontrol(code_settings, NS=NS) """ ------------------------------------------------------------------------------------------------------ @@ -103,74 +132,21 @@ def TGLFinTRANSP(TGLFsettings, NS=3): # **** New ones that are TRANSP-specific TGLFoptions["TGLFMOD"] = 1 - TGLFoptions["NSPEC"] = ( - NS # Number of species used in tglf model (maximum 10 species allowed) - ) + TGLFoptions["NSPEC"] = NS # Number of species used in tglf model (maximum 10 species allowed) TGLFoptions["NLGRAD"] = False # Output flux gradients TGLFoptions["ALPHA_N"] = 0.0 # Scaling factor for vn shear TGLFoptions["ALPHA_T"] = 0.0 # Scaling factor for vt shear # TGLFoptions['ALPHA_DIA'] = 0.0 # Scaling factor for diamagnetic terms to exb shear TGLFoptions["CBETAE"] = 1.0 # Betae multiplier (needed for e-m calcs) TGLFoptions["CXNU"] = 1.0 # Collisionality multiplier - TGLFoptions["EM_STAB"] = ( - 0.0 # EM factor for the ion temperature gradient --- Is this the right default? - ) - TGLFoptions["PEVOLVING"] = ( - 0 # Evolving temperature and its gradients --- Is this the right default? - ) - TGLFoptions["kinetic_fast_ion"] = ( - 0 # Fast ion species model in TGLF --- Is this the right default? - ) + TGLFoptions["EM_STAB"] = 0.0 # EM factor for the ion temperature gradient --- Is this the right default? + TGLFoptions["PEVOLVING"] = 0 # Evolving temperature and its gradients --- Is this the right default? + TGLFoptions["kinetic_fast_ion"] = 0 # Fast ion species model in TGLF --- Is this the right default? # **** Other modifications TGLFoptions["UNITS"] = f"'{TGLFoptions['UNITS']}'" - return TGLFoptions, label - - -def addCGYROcontrol(Settings, rmin): - - CGYROoptions = IOtools.generateMITIMNamelist( - __mitimroot__ / "templates" / "input.cgyro.controls", caseInsensitive=False - ) - - """ - ******************************************************************************** - Standard sets of TGLF control parameters - (rest of parameters are as defaults) - ******************************************************************************** - """ - - with open( - __mitimroot__ / "templates" / "input.cgyro.models.json", "r" - ) as f: - settings = json.load(f) - - if str(Settings) in settings: - sett = settings[str(Settings)] - label = sett["label"] - for ikey in sett["controls"]: - CGYROoptions[ikey] = sett["controls"][ikey] - else: - print( - "\t- Settings not found in input.cgyro.models.json, using defaults", - typeMsg="w", - ) - label = "unspecified" - - CGYROoptions["RMIN"] = rmin - - # -------------------------------- - # From dictionary to text - # -------------------------------- - - CGYROinput = [""] - for ikey in CGYROoptions: - CGYROinput.append(f"{ikey} = {CGYROoptions[ikey]}") - CGYROinput.append("") - - return CGYROinput, CGYROoptions, label - + return TGLFoptions def addTGYROcontrol( num_it=0, @@ -237,74 +213,42 @@ def addTGYROcontrol( f"{int(cold_start)}" # 0: Start from beginning, 1: Continue from last iteration ) TGYROoptions["TGYRO_RELAX_ITERATIONS"] = f"{num_it}" # Number of iterations - TGYROoptions["TGYRO_WRITE_PROFILES_FLAG"] = ( - "1" # 1: Create new input.profiles at end, 0: Nothing, -1: At all iterations - ) + TGYROoptions["TGYRO_WRITE_PROFILES_FLAG"] = "1" # 1: Create new input.profiles at end, 0: Nothing, -1: At all iterations # ----------- Optimization - TGYROoptions["LOC_RESIDUAL_METHOD"] = ( - f"{solver_options['res_method']}" # 2: |F|, 3: |F|^2 - ) - TGYROoptions["TGYRO_ITERATION_METHOD"] = ( - f"{solver_options['tgyro_method']}" # 1: Standard local residual, 2 3 4 5 6 - ) - TGYROoptions["LOC_DX"] = ( - f"{solver_options['step_jac']}" # Step length for Jacobian calculation (df: 0.1), units of a/Lx - ) - TGYROoptions["LOC_DX_MAX"] = ( - f"{solver_options['step_max']}" # Max length for any Newton step (df: 1.0) - ) - TGYROoptions["LOC_RELAX"] = ( - f"{solver_options['relax_param']}" # Parameter 𝐶𝜂 controlling shrinkage of relaxation parameter - ) + TGYROoptions["LOC_RESIDUAL_METHOD"] = f"{solver_options['res_method']}" # 2: |F|, 3: |F|^2 + TGYROoptions["TGYRO_ITERATION_METHOD"] = f"{solver_options['tgyro_method']}" # 1: Standard local residual, 2 3 4 5 6 + TGYROoptions["LOC_DX"] = f"{solver_options['step_jac']}" # Step length for Jacobian calculation (df: 0.1), units of a/Lx + TGYROoptions["LOC_DX_MAX"] = f"{solver_options['step_max']}" # Max length for any Newton step (df: 1.0) + TGYROoptions["LOC_RELAX"] = f"{solver_options['relax_param']}" # Parameter 𝐶𝜂 controlling shrinkage of relaxation parameter # ----------- Prediction Options - TGYROoptions["LOC_SCENARIO"] = ( - f"{physics_options['TypeTarget']}" # 1: Static targets, 2: dynamic exchange, 3: alpha, rad, exchange change - ) + TGYROoptions["LOC_SCENARIO"] = f"{physics_options['TypeTarget']}" # 1: Static targets, 2: dynamic exchange, 3: alpha, rad, exchange change TGYROoptions["LOC_TI_FEEDBACK_FLAG"] = f"{Tipred}" # Evolve Ti? TGYROoptions["LOC_TE_FEEDBACK_FLAG"] = f"{Tepred}" # Evolve Te? TGYROoptions["LOC_ER_FEEDBACK_FLAG"] = f"{Erpred}" # Evolve Er? TGYROoptions["TGYRO_DEN_METHOD0"] = f"{nepred}" # Evolve ne? - TGYROoptions["LOC_PFLUX_METHOD"] = ( - f"{physics_options['ParticleFlux']}" # Particle flux method. 1 = zero target flux, 2 = beam, 3 = beam+wall - ) + TGYROoptions["LOC_PFLUX_METHOD"] = f"{physics_options['ParticleFlux']}" # Particle flux method. 1 = zero target flux, 2 = beam, 3 = beam+wall TGYROoptions["TGYRO_RMIN"] = f"{fromRho}" TGYROoptions["TGYRO_RMAX"] = f"{ToRho}" - TGYROoptions["TGYRO_USE_RHO"] = ( - f"{solver_options['UseRho']}" # 1: Grid provided in input.tgyro is for rho values - ) + TGYROoptions["TGYRO_USE_RHO"] = f"{solver_options['UseRho']}" # 1: Grid provided in input.tgyro is for rho values # ----------- Physics TGYROoptions["TGYRO_ROTATION_FLAG"] = "1" # Trigger rotation physics? - TGYROoptions["TGYRO_NEO_METHOD"] = ( - f"{physics_options['neoclassical']}" # 0: None, 1: H&H, 2: NEO - ) - TGYROoptions["TGYRO_TGLF_REVISION"] = ( - "0" # 0: Use input.tglf in folders, instead of GA defaults. - ) - TGYROoptions["TGYRO_EXPWD_FLAG"] = ( - f"{physics_options['TurbulentExchange']}" # Add turbulent exchange to exchange powers in targets? - ) + TGYROoptions["TGYRO_NEO_METHOD"] = f"{physics_options['neoclassical']}" # 0: None, 1: H&H, 2: NEO + TGYROoptions["TGYRO_TGLF_REVISION"] = "0" # 0: Use input.tglf in folders, instead of GA defaults. + TGYROoptions["TGYRO_EXPWD_FLAG"] = f"{physics_options['TurbulentExchange']}" # Add turbulent exchange to exchange powers in targets? # TGYROoptions['TGYRO_ZEFF_FLAG'] = '1' # 1: Use Zeff from input.gacode # ----------- Assumptions for i in physics_options["quasineutrality"]: - TGYROoptions[f"TGYRO_DEN_METHOD{i}"] = ( - "-1" # Species used to ensure quasineutrality - ) + TGYROoptions[f"TGYRO_DEN_METHOD{i}"] = "-1" # Species used to ensure quasineutrality # TGYROoptions['LOC_NUM_EQUIL_FLAG'] = f"{physics_options['usingINPUTgeo']}" # 0: Use Miller, 1: Use numerical equilibrium (not valid for TGLF_scans) #DEPRECATED IN LATEST VERSIONS - TGYROoptions["LOC_LOCK_PROFILE_FLAG"] = ( - f"{physics_options['InputType']}" # 0: Re-compute profiles from coarse gradients grid, 1: Use exact profiles (only valid at first iteration) - ) - TGYROoptions["TGYRO_CONSISTENT_FLAG"] = ( - f"{physics_options['GradientsType']}" # 0: Finite-difference gradients used from input.gacode, 1: Gradients from coarse profiles? - ) + TGYROoptions["LOC_LOCK_PROFILE_FLAG"] = f"{physics_options['InputType']}" # 0: Re-compute profiles from coarse gradients grid, 1: Use exact profiles (only valid at first iteration) + TGYROoptions["TGYRO_CONSISTENT_FLAG"] = f"{physics_options['GradientsType']}" # 0: Finite-difference gradients used from input.gacode, 1: Gradients from coarse profiles? TGYROoptions["LOC_EVOLVE_GRAD_ONLY_FLAG"] = "0" # 1: Do not change absolute values - TGYROoptions["TGYRO_PTOT_FLAG"] = ( - f"{physics_options['PtotType']}" # 0: Compute pressure from profiles, 1: correct from input.gacode PTOT profile - ) + TGYROoptions["TGYRO_PTOT_FLAG"] = f"{physics_options['PtotType']}" # 0: Compute pressure from profiles, 1: correct from input.gacode PTOT profile # ----------- Radii @@ -414,14 +358,14 @@ def convolution_CECE(d_perp_dict, dRdx=1.0): return fun, factorTot_to_Perp -def review_controls(TGLFoptions): +def review_controls(TGLFoptions, control = "input.tglf.controls"): - TGLFoptions_check = IOtools.generateMITIMNamelist(__mitimroot__ / "templates" / "input.tglf.controls", caseInsensitive=False) + options_check = IOtools.generateMITIMNamelist(__mitimroot__ / "templates" / control, caseInsensitive=False) # Add plasma too potential_flags = ['NS', 'SIGN_BT', 'SIGN_IT', 'VEXB', 'VEXB_SHEAR', 'BETAE', 'XNUE', 'ZEFF', 'DEBYE'] for flag in potential_flags: - TGLFoptions_check[flag] = None + options_check[flag] = None for option in TGLFoptions: @@ -430,5 +374,5 @@ def review_controls(TGLFoptions): # Do not fail with e.g. P_PRIME_LOC isGeometry = option.split('_')[-1] in ['LOC'] - if (not isSpecie) and (not isGeometry) and (option not in TGLFoptions_check): - print(f"\t- TGLF option {option} not in input.tglf.controls, prone to errors", typeMsg="q") + if (not isSpecie) and (not isGeometry) and (option not in options_check): + print(f"\t- Option {option} not in {control}, prone to errors", typeMsg="q") diff --git a/src/mitim_tools/gacode_tools/utils/GACODEplotting.py b/src/mitim_tools/gacode_tools/utils/GACODEplotting.py index 0777a2be..78520f31 100644 --- a/src/mitim_tools/gacode_tools/utils/GACODEplotting.py +++ b/src/mitim_tools/gacode_tools/utils/GACODEplotting.py @@ -89,13 +89,15 @@ def plotTGLFspectrum( elif limity: ax.set_ylim(bottom=0) + freq_coeff = 0 # The real frequencies should not be normalized by ky + if freq is not None and type(axs) == list: plot_spec( axF, kys, freq, markers=markers, - coeff=coeff, + coeff=freq_coeff, c=c, lw=lw, label=label, @@ -105,9 +107,9 @@ def plotTGLFspectrum( ) if ylabel: - axF.set_ylabel(decorateLabel("$\\omega$", coeff)) + axF.set_ylabel(decorateLabel("$\\omega$", freq_coeff)) - if coeff == 0: + if freq_coeff == 0: axF.set_yscale("symlog", linthresh=thr_symlog) elif limity: axF.set_ylim(bottom=0) diff --git a/src/mitim_tools/gacode_tools/utils/GACODErun.py b/src/mitim_tools/gacode_tools/utils/GACODErun.py index 74e7f0d0..ccce9612 100644 --- a/src/mitim_tools/gacode_tools/utils/GACODErun.py +++ b/src/mitim_tools/gacode_tools/utils/GACODErun.py @@ -1,14 +1,14 @@ import shutil -import os import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d -from mitim_tools.gacode_tools.utils import GACODEdefaults -from mitim_tools.transp_tools.utils import PLASMASTATEtools +from mitim_tools.transp_tools.utils import NTCCtools from mitim_tools.misc_tools import FARMINGtools, IOtools, MATHtools, GRAPHICStools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed +from mitim_tools.misc_tools.PLASMAtools import md_u + def runTGYRO( folderWork, outputFiles=None, @@ -93,123 +93,6 @@ def runTGYRO( tgyro_job.run() - -def modifyInputs( - input_class, - Settings=None, - extraOptions={}, - multipliers={}, - position_change=0, - addControlFunction=None, - **kwargs_to_function, -): - - # Check that those are valid flags - GACODEdefaults.review_controls(extraOptions) - GACODEdefaults.review_controls(multipliers) - # ------------------------------------------- - - if Settings is not None: - _, CodeOptions, label = addControlFunction(Settings, **kwargs_to_function) - - # ~~~~~~~~~~ Change with presets - print(f" \t- Using presets Settings = {Settings} ({label})", typeMsg="i") - input_class.controls = CodeOptions - - else: - print("\t- Input file was not modified by Settings, using what was there before",typeMsg="i") - - # Make all upper case - extraOptions = {ikey.upper(): value for ikey, value in extraOptions.items()} - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Change with external options -> Input directly, not as multiplier - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - if len(extraOptions) > 0: - print("\t- External options:") - for ikey in extraOptions: - if isinstance(extraOptions[ikey], (list, np.ndarray)): - value_to_change_to = extraOptions[ikey][position_change] - else: - value_to_change_to = extraOptions[ikey] - - # is a specie one? - try: - isspecie = ikey.split("_")[0] in input_class.species[1] - except: - isspecie = False - - if isspecie: - specie = int(ikey.split("_")[-1]) - varK = "_".join(ikey.split("_")[:-1]) - var_orig = input_class.species[specie][varK] - var_new = value_to_change_to - input_class.species[specie][varK] = var_new - else: - if ikey in input_class.controls: - var_orig = input_class.controls[ikey] - var_new = value_to_change_to - input_class.controls[ikey] = var_new - elif ikey in input_class.geom: - var_orig = input_class.geom[ikey] - var_new = value_to_change_to - input_class.geom[ikey] = var_new - elif ikey in input_class.plasma: - var_orig = input_class.plasma[ikey] - var_new = value_to_change_to - input_class.plasma[ikey] = var_new - else: - # If the variable in extraOptions wasn't in there, consider it a control param - print( - "\t\t- Variable to change did not exist previously, creating now", - typeMsg="i", - ) - var_orig = None - var_new = value_to_change_to - input_class.controls[ikey] = var_new - - print( - f"\t\t- Changing {ikey} from {var_orig} to {var_new}", - typeMsg="i", - ) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Change with multipliers -> Input directly, not as multiplier - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - if len(multipliers) > 0: - print("\t\t- Variables change:") - for ikey in multipliers: - # is a specie one? - if ikey.split("_")[0] in input_class.species[1]: - specie = int(ikey.split("_")[-1]) - varK = "_".join(ikey.split("_")[:-1]) - var_orig = input_class.species[specie][varK] - var_new = var_orig * multipliers[ikey] - input_class.species[specie][varK] = var_new - else: - if ikey in input_class.controls: - var_orig = input_class.controls[ikey] - var_new = var_orig * multipliers[ikey] - input_class.controls[ikey] = var_new - elif ikey in input_class.geom: - var_orig = input_class.geom[ikey] - var_new = var_orig * multipliers[ikey] - input_class.geom[ikey] = var_new - elif ikey in input_class.plasma: - var_orig = input_class.plasma[ikey] - var_new = var_orig * multipliers[ikey] - input_class.plasma[ikey] = var_new - else: - print( - "\t- Variable to scan did not exist in original file, add it as extraOptions first", - typeMsg="w", - ) - - print(f"\t\t\t- Changing {ikey} from {var_orig} to {var_new} (x{multipliers[ikey]})") - - return input_class - - def findNamelist(LocationCDF, folderWork=None, nameRunid="10000", ForceFirst=True): # ----------------------------------------------------------- # Find namelist @@ -236,7 +119,6 @@ def findNamelist(LocationCDF, folderWork=None, nameRunid="10000", ForceFirst=Tru return LocationNML, dummy - def prepareTGYRO( LocationCDF, LocationNML, @@ -276,7 +158,6 @@ def prepareTGYRO( includeGEQ=includeGEQ, ) - def CDFtoTRXPLoutput( LocationCDF, LocationNML, @@ -330,60 +211,6 @@ def CDFtoTRXPLoutput( grids=grids, ) - -def executeCGYRO( - FolderCGYRO, - linesCGYRO, - fileProfiles, - outputFiles=["out.cgyro.run"], - name="", - numcores=32, -): - FolderCGYRO.mkdir(parents=True, exist_ok=True) - - cgyro_job = FARMINGtools.mitim_job(FolderCGYRO) - - cgyro_job.define_machine( - "cgyro", - f"mitim_cgyro_{name}", - slurm_settings={ - "minutes": 60, - "ntasks": numcores, - "name": name, - }, - ) - - # --------------- - # Prepare files - # --------------- - - fileCGYRO = FolderCGYRO / f"input.cgyro" - with open(fileCGYRO, "w") as f: - f.write("\n".join(linesCGYRO)) - - # --------------- - # Execution command - # --------------- - - folderExecution = cgyro_job.machineSettings["folderWork"] - CGYROcommand = f"cgyro -e . -n {numcores} -p {folderExecution}" - - shellPreCommands = [] - - # --------------- - # Execute - # --------------- - - cgyro_job.prep( - CGYROcommand, - input_files=[fileProfiles, fileCGYRO], - output_files=outputFiles, - shellPreCommands=shellPreCommands, - ) - - cgyro_job.run() - - def runTRXPL( FolderTRXPL, timeRun, @@ -445,7 +272,6 @@ def runTRXPL( ) trxpl_job.run() - def runPROFILES_GEN( FolderTGLF, nameFiles="10001", @@ -457,7 +283,7 @@ def runPROFILES_GEN( if UseMITIMmodification: print("\t\t- Running modifyPlasmaState") shutil.copy2(FolderTGLF / f"{nameFiles}.cdf", FolderTGLF / f"{nameFiles}.cdf_old") - pls = PLASMASTATEtools.Plasmastate(FolderTGLF / f"{nameFiles}.cdf_old") + pls = NTCCtools.Plasmastate(FolderTGLF / f"{nameFiles}.cdf_old") pls.modify_default(FolderTGLF / f"{nameFiles}.cdf") inputFiles = [ @@ -474,7 +300,7 @@ def runPROFILES_GEN( txt += f" -g {nameFiles}.geq\n" else: txt += "\n" - with open(FolderTGLF + "profiles_gen.sh", "w") as f: + with open(FolderTGLF / "profiles_gen.sh", "w") as f: f.write(txt) # ****************** @@ -512,7 +338,6 @@ def runPROFILES_GEN( print(f"\t\t- Proceeding to run PROFILES_GEN with: {txt}") pgen_job.run() - def runVGEN( workingFolder, numcores=32, @@ -589,45 +414,10 @@ def runVGEN( return file_new - -def buildDictFromInput(inputFile): - parsed = {} - - lines = inputFile.split("\n") - for line in lines: - if "=" in line: - splits = [i.split()[0] for i in line.split("=")] - if ("." in splits[1]) and (splits[1][0].split()[0] != "."): - parsed[splits[0].split()[0]] = float(splits[1].split()[0]) - else: - try: - parsed[splits[0].split()[0]] = int(splits[1].split()[0]) - except: - parsed[splits[0].split()[0]] = splits[1].split()[0] - - for i in parsed: - if isinstance(parsed[i], str): - if ( - parsed[i].lower() == "t" - or parsed[i].lower() == "true" - or parsed[i].lower() == ".true." - ): - parsed[i] = True - elif ( - parsed[i].lower() == "f" - or parsed[i].lower() == "false" - or parsed[i].lower() == ".false." - ): - parsed[i] = False - - return parsed - - # ---------------------------------------------------------------------- # Reading/Writing routines # ---------------------------------------------------------------------- - def obtainFluctuationLevel( ky, Amplitude, @@ -669,7 +459,6 @@ def obtainFluctuationLevel( return fluctSim * 100.0 * factorTot_to_Perp - def obtainNTphase( ky, nTphase, @@ -690,7 +479,6 @@ def obtainNTphase( return neTe - def integrateSpectrum( xOriginal, yOriginal, @@ -832,7 +620,6 @@ def integrateSpectrum( return integ - def defineNewGrid( xOriginal1, yOriginal1, @@ -898,237 +685,3 @@ def defineNewGrid( return x[imin:imax], y[imin:imax] - -def runTGLF( - FolderGACODE, - tglf_executor, - minutes=5, - cores_tglf=4, - extraFlag="", - filesToRetrieve=["out.tglf.gbflux"], - name="", - launchSlurm=True, - attempts_execution=1, -): - """ - launchSlurm = True -> Launch as a batch job in the machine chosen - launchSlurm = False -> Launch locally as a bash script - """ - - tmpFolder = FolderGACODE / "tmp_tglf" - IOtools.askNewFolder(tmpFolder, force=True) - - tglf_job = FARMINGtools.mitim_job(tmpFolder) - - tglf_job.define_machine_quick("tglf",f"mitim_{name}") - - folders, folders_red = [], [] - for subFolderTGLF in tglf_executor: - - rhos = list(tglf_executor[subFolderTGLF].keys()) - - # --------------------------------------------- - # Prepare files and folders - # --------------------------------------------- - - for i, rho in enumerate(rhos): - print(f"\t- Preparing TGLF ({subFolderTGLF}) at rho={rho:.4f}") - - folderTGLF_this = tmpFolder / subFolderTGLF / f"rho_{rho:.4f}" - folders.append(folderTGLF_this) - - folderTGLF_this_rel = folderTGLF_this.relative_to(tmpFolder) - folders_red.append(folderTGLF_this_rel.as_posix() if tglf_job.machineSettings['machine'] != 'local' else str(folderTGLF_this_rel)) - - folderTGLF_this.mkdir(parents=True, exist_ok=True) - - fileTGLF = folderTGLF_this / "input.tglf" - with open(fileTGLF, "w") as f: - f.write(tglf_executor[subFolderTGLF][rho]["inputs"]) - - # --------------------------------------------- - # Prepare command - # --------------------------------------------- - - # Grab machine local limits ------------------------------------------------- - max_cores_per_node = FARMINGtools.mitim_job.grab_machine_settings("tglf")["cores_per_node"] - - # If the run is local and not slurm, let's check the number of cores - if (FARMINGtools.mitim_job.grab_machine_settings("tglf")["machine"] == "local") and not (launchSlurm and ("partition" in tglf_job.machineSettings["slurm"])): - - cores_in_machine = int(os.cpu_count()) - cores_allocated = int(os.environ.get('SLURM_CPUS_PER_TASK')) if os.environ.get('SLURM_CPUS_PER_TASK') is not None else None - - if cores_allocated is not None: - if max_cores_per_node is None or (cores_allocated < max_cores_per_node): - print(f"\t - Detected {cores_allocated} cores allocated by SLURM, using this value as maximum for local execution (vs {max_cores_per_node} specified)",typeMsg="i") - max_cores_per_node = cores_allocated - elif cores_in_machine is not None: - if max_cores_per_node is None or (cores_in_machine < max_cores_per_node): - print(f"\t - Detected {cores_in_machine} cores in machine, using this value as maximum for local execution (vs {max_cores_per_node} specified)",typeMsg="i") - max_cores_per_node = cores_in_machine - else: - # Default to just 16 just in case - if max_cores_per_node is None: - max_cores_per_node = 16 - else: - # For remote execution, default to just 16 just in case - if max_cores_per_node is None: - max_cores_per_node = 16 - # --------------------------------------------------------------------------- - - # Grab the total number of cores of this job -------------------------------- - total_tglf_executions = len(rhos) * len(tglf_executor) - total_cores_required = int(cores_tglf) * total_tglf_executions - # --------------------------------------------------------------------------- - - # Simply bash, no slurm - if not (launchSlurm and ("partition" in tglf_job.machineSettings["slurm"])): - - max_parallel_execution = max_cores_per_node // cores_tglf # Make sure we don't overload the machine when running locally (assuming no farming trans-node) - - print(f"\t- TGLF will be executed as bash script (total cores: {total_cores_required}, cores per TGLF: {cores_tglf}). MITIM will launch {total_tglf_executions // max_parallel_execution+1} sequential executions",typeMsg="i") - - # Build the bash script with job control enabled and a loop to limit parallel jobs - TGLFcommand = "#!/usr/bin/env bash\n" - TGLFcommand += "set -m\n" # Enable job control even in non-interactive mode - TGLFcommand += f"max_parallel_execution={max_parallel_execution}\n\n" # Set the maximum number of parallel processes - - # Create a bash array of folders - TGLFcommand += "folders=(\n" - for folder in folders_red: - TGLFcommand += f' "{folder}"\n' - TGLFcommand += ")\n\n" - - # Loop over each folder and launch tglf, waiting if we've reached max_parallel_execution - TGLFcommand += "for folder in \"${folders[@]}\"; do\n" - TGLFcommand += f" tglf -e \"$folder\" -n {cores_tglf} -p {tglf_job.folderExecution} &\n" - TGLFcommand += " while (( $(jobs -r | wc -l) >= max_parallel_execution )); do sleep 1; done\n" - TGLFcommand += "done\n\n" - TGLFcommand += "wait\n" - - # Slurm setup - array_list = None - shellPreCommands = None - shellPostCommands = None - ntasks = total_cores_required - cpuspertask = cores_tglf - - else: - - # Job array - if total_cores_required < max_cores_per_node: - - print(f"\t- TGLF will be executed in SLURM as standard job (cpus: {total_cores_required})",typeMsg="i") - - # TGLF launches - TGLFcommand = "" - for folder in folders_red: - TGLFcommand += f"tglf -e {folder} -n {cores_tglf} -p {tglf_job.folderExecution} &\n" - TGLFcommand += "\nwait" # This is needed so that the script doesn't end before each job - - # Slurm setup - array_list = None - shellPreCommands = None - shellPostCommands = None - ntasks = total_tglf_executions - cpuspertask = cores_tglf - - # Standard job - else: - #raise Exception("TGLF array not implemented yet") - print(f"\t- TGLF will be executed in SLURM as job array due to its size (cpus: {total_cores_required})",typeMsg="i") - - # As a pre-command, organize all folders in a simpler way - shellPreCommands = [] - shellPostCommands = [] - array_list = [] - for i, folder in enumerate(folders_red): - array_list.append(f"{i}") - folder_temp_array = f"run{i}" - folder_actual = folder - shellPreCommands.append(f"mkdir {tglf_job.folderExecution}/{folder_temp_array}; cp {tglf_job.folderExecution}/{folder_actual}/* {tglf_job.folderExecution}/{folder_temp_array}/.") - shellPostCommands.append(f"cp {tglf_job.folderExecution}/{folder_temp_array}/* {tglf_job.folderExecution}/{folder_actual}/.; rm -r {tglf_job.folderExecution}/{folder_temp_array}") - - # TGLF launches - indexed_folder = 'run"$SLURM_ARRAY_TASK_ID"' - TGLFcommand = f'tglf -e {indexed_folder} -n {cores_tglf} -p {tglf_job.folderExecution} 1> {tglf_job.folderExecution}/{indexed_folder}/slurm_output.dat 2> {tglf_job.folderExecution}/{indexed_folder}/slurm_error.dat\n' - - # Slurm setup - array_list = ",".join(array_list) - ntasks = 1 - cpuspertask = cores_tglf - - # --------------------------------------------- - # Execute - # --------------------------------------------- - - tglf_job.define_machine( - "tglf", - f"mitim_{name}", - launchSlurm=launchSlurm, - slurm_settings={ - "minutes": minutes, - "ntasks": ntasks, - "name": name, - "cpuspertask": cpuspertask, - "job_array": array_list, - #"nodes": 1, - }, - ) - - # I would like the mitim_job to check if the retrieved folders were complete - check_files_in_folder = {} - for folder in folders_red: - check_files_in_folder[folder] = filesToRetrieve - # --------------------------------------------- - - tglf_job.prep( - TGLFcommand, - input_folders=folders, - output_folders=folders_red, - check_files_in_folder=check_files_in_folder, - shellPreCommands=shellPreCommands, - shellPostCommands=shellPostCommands, - ) - - tglf_job.run( - removeScratchFolders=True, - attempts_execution=attempts_execution - ) - - # --------------------------------------------- - # Organize - # --------------------------------------------- - - print("\t- Retrieving files and changing names for storing") - fineall = True - for subFolderTGLF in tglf_executor: - - for i, rho in enumerate(tglf_executor[subFolderTGLF].keys()): - for file in filesToRetrieve: - original_file = f"{file}_{rho:.4f}{extraFlag}" - final_destination = ( - tglf_executor[subFolderTGLF][rho]['folder'] / f"{original_file}" - ) - final_destination.unlink(missing_ok=True) - - temp_file = tmpFolder / subFolderTGLF / f"rho_{rho:.4f}" / f"{file}" - temp_file.replace(final_destination) - - fineall = fineall and final_destination.exists() - - if not final_destination.exists(): - print( - f"\t!! file {file} ({original_file}) could not be retrived", - typeMsg="w", - ) - - if fineall: - print("\t\t- All files were successfully retrieved") - - # Remove temporary folder - shutil.rmtree(tmpFolder) - - else: - print("\t\t- Some files were not retrieved", typeMsg="w") diff --git a/src/mitim_tools/gacode_tools/utils/NORMtools.py b/src/mitim_tools/gacode_tools/utils/NORMtools.py index 86ea444e..b17c44c2 100644 --- a/src/mitim_tools/gacode_tools/utils/NORMtools.py +++ b/src/mitim_tools/gacode_tools/utils/NORMtools.py @@ -87,6 +87,8 @@ def normalizations_tgyro(tgyro, rho, roa): "rho": rho, "q_gb": np.interp(rho, x_tgyro, tgyro.Q_GB[iteration]), "g_gb": np.interp(rho, x_tgyro, tgyro.Gamma_GB[iteration]), + "pi_gb": np.interp(rho, x_tgyro, tgyro.Pi_GB[iteration]), + "s_gb": np.interp(rho, x_tgyro, tgyro.S_GB[iteration]), "c_s": np.interp(rho, x_tgyro, tgyro.c_s[iteration]), } @@ -94,6 +96,7 @@ def normalizations_tgyro(tgyro, rho, roa): def normalizations_profiles(profiles): + if profiles is not None: Set_norm = { "rho": profiles.profiles["rho(-)"], @@ -101,26 +104,18 @@ def normalizations_profiles(profiles): "rmin": np.abs(profiles.profiles["rmin(m)"]), "q_gb": np.abs(profiles.derived["q_gb"]), "g_gb": np.abs(profiles.derived["g_gb"]), - "exp_Qe": np.abs(profiles.derived["qe"]), - "exp_Qi": np.abs(profiles.derived["qi"]), - "exp_Ge": np.abs(profiles.derived["ge"]), + "pi_gb": np.abs(profiles.derived["pi_gb"]), + "s_gb": np.abs(profiles.derived["s_gb"]), "B_unit": np.abs(profiles.derived["B_unit"]), "rho_s": np.abs(profiles.derived["rho_s"]), "c_s": np.abs(profiles.derived["c_s"]), - "Te_keV": np.abs( - profiles.profiles[ - "te(keV)" if "te(keV)" in profiles.profiles else "Te(keV)" - ] - ), + "Te_keV": np.abs(profiles.profiles["te(keV)"]), "ne_20": np.abs(profiles.profiles["ne(10^19/m^3)"]) * 1e-1, "Ti_keV": np.abs(profiles.profiles["ti(keV)"][:, 0]), "ni_20": np.abs(profiles.derived["ni_thrAll"]) * 1e-1, - "exp_Qe": profiles.derived["qe_MWmiller"] - / profiles.derived["surfGACODE_miller"], # This is the same as qe_MWm2 - "exp_Qi": profiles.derived["qi_MWmiller"] - / profiles.derived["surfGACODE_miller"], - "exp_Ge": profiles.derived["ge_10E20miller"] - / profiles.derived["surfGACODE_miller"], + "exp_Qe": profiles.derived["qe_MW"] / profiles.derived["surfGACODE_geo"], # This is the same as qe_MWm2 + "exp_Qi": profiles.derived["qi_MW"] / profiles.derived["surfGACODE_geo"], + "exp_Ge": profiles.derived["ge_10E20"] / profiles.derived["surfGACODE_geo"], "mi_ref": profiles.derived["mi_ref"], } @@ -252,6 +247,7 @@ def plotNormalizations( colors=["b", "r", "g"], legYN=True, extralab="", + fn = None, ): if NormalizationSets is not None: if axs is None: diff --git a/src/mitim_tools/gacode_tools/utils/PORTALSinteraction.py b/src/mitim_tools/gacode_tools/utils/PORTALSinteraction.py deleted file mode 100644 index b4e7d657..00000000 --- a/src/mitim_tools/gacode_tools/utils/PORTALSinteraction.py +++ /dev/null @@ -1,559 +0,0 @@ -import torch -import numpy as np -from mitim_tools.misc_tools import PLASMAtools -from mitim_modules.portals import PORTALStools -from mitim_tools.misc_tools.LOGtools import printMsg as print -from IPython import embed - -def parabolizePlasma(self): - _, T = PLASMAtools.parabolicProfile( - Tbar=self.derived["Te_vol"], - nu=self.derived["Te_peaking"], - rho=self.profiles["rho(-)"], - Tedge=self.profiles["te(keV)"][-1], - ) - _, Ti = PLASMAtools.parabolicProfile( - Tbar=self.derived["Ti_vol"], - nu=self.derived["Ti_peaking"], - rho=self.profiles["rho(-)"], - Tedge=self.profiles["ti(keV)"][-1, 0], - ) - _, n = PLASMAtools.parabolicProfile( - Tbar=self.derived["ne_vol20"] * 1e1, - nu=self.derived["ne_peaking"], - rho=self.profiles["rho(-)"], - Tedge=self.profiles["ne(10^19/m^3)"][-1], - ) - - self.profiles["te(keV)"] = T - - self.profiles["ti(keV)"][:, 0] = Ti - self.makeAllThermalIonsHaveSameTemp(refIon=0) - - factor_n = n / self.profiles["ne(10^19/m^3)"] - self.profiles["ne(10^19/m^3)"] = n - self.scaleAllThermalDensities(scaleFactor=factor_n) - - self.deriveQuantities() - - -def changeRFpower(self, PrfMW=25.0): - """ - keeps same partition - """ - print(f"- Changing the RF power from {self.derived['qRF_MWmiller'][-1]:.1f} MW to {PrfMW:.1f} MW",typeMsg="i",) - - if self.derived["qRF_MWmiller"][-1] == 0.0: - raise Exception("No RF power in the input.gacode, cannot modify the RF power") - - for i in ["qrfe(MW/m^3)", "qrfi(MW/m^3)"]: - self.profiles[i] = self.profiles[i] * PrfMW / self.derived["qRF_MWmiller"][-1] - -def imposeBCtemps(self, TkeV=0.5, rho=0.9, typeEdge="linear", Tesep=0.1, Tisep=0.2): - ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) - - self.profiles["te(keV)"] = ( - self.profiles["te(keV)"] * TkeV / self.profiles["te(keV)"][ix] - ) - - print( - f"- Producing {typeEdge} boundary condition @ rho = {rho}, T = {TkeV} keV", - typeMsg="i", - ) - - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - self.profiles["ti(keV)"][:, sp] = ( - self.profiles["ti(keV)"][:, sp] - * TkeV - / self.profiles["ti(keV)"][ix, sp] - ) - - if typeEdge == "linear": - self.profiles["te(keV)"][ix:] = np.linspace( - TkeV, Tesep, len(self.profiles["rho(-)"][ix:]) - ) - - for sp in range(len(self.Species)): - if self.Species[sp]["S"] == "therm": - self.profiles["ti(keV)"][ix:, sp] = np.linspace( - TkeV, Tisep, len(self.profiles["rho(-)"][ix:]) - ) - - elif typeEdge == "same": - pass - else: - raise Exception("no edge") - - -def imposeBCdens(self, n20=2.0, rho=0.9, typeEdge="linear", nedge20=0.5): - ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) - - print( - f"- Changing the initial average density from {self.derived['ne_vol20']:.1f} 1E20/m3 to {n20:.1f} 1E20/m3", - typeMsg="i", - ) - - factor = n20 / self.derived["ne_vol20"] - - for i in ["ne(10^19/m^3)", "ni(10^19/m^3)"]: - self.profiles[i] = self.profiles[i] * factor - - if typeEdge == "linear": - factor_x = ( - np.linspace( - self.profiles["ne(10^19/m^3)"][ix], - nedge20 * 1e1, - len(self.profiles["rho(-)"][ix:]), - ) - / self.profiles["ne(10^19/m^3)"][ix:] - ) - - self.profiles["ne(10^19/m^3)"][ix:] = ( - self.profiles["ne(10^19/m^3)"][ix:] * factor_x - ) - for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): - self.profiles["ni(10^19/m^3)"][ix:, i] = ( - self.profiles["ni(10^19/m^3)"][ix:, i] * factor_x - ) - elif typeEdge == "same": - pass - else: - raise Exception("no edge") - - -# ------------------------------------------------------------------------------------------------------------------------------------------------------ -# This is where the definitions for the summation variables happen for mitim and PORTALSplot -# ------------------------------------------------------------------------------------------------------------------------------------------------------ - -def TGYROmodeledVariables(TGYROresults, - powerstate, - useConvectiveFluxes=False, - forceZeroParticleFlux=False, - includeFast=False, - impurityPosition=1, - UseFineGridTargets=False, - OriginalFimp=1.0, - provideTurbulentExchange=False, - provideTargets=False - ): - """ - This function is used to extract the TGYRO results and store them in the powerstate object, from numpy arrays to torch tensors. - """ - - if "tgyro_stds" not in TGYROresults.__dict__: - TGYROresults.tgyro_stds = False - - if UseFineGridTargets: - TGYROresults.useFineGridTargets(impurityPosition=impurityPosition) - - - nr = powerstate.plasma['rho'].shape[-1] - if powerstate.plasma['rho'].shape[-1] != TGYROresults.rho.shape[-1]: - print('\t- TGYRO was run with an extra point in the grid, treating it carefully now') - - - # ********************************** - # *********** Electron Energy Fluxes - # ********************************** - - powerstate.plasma["Pe_tr_turb"] = torch.Tensor(TGYROresults.Qe_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pe_tr_neo"] = torch.Tensor(TGYROresults.Qe_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Pe_tr_turb_stds"] = torch.Tensor(TGYROresults.Qe_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Pe_tr_neo_stds"] = torch.Tensor(TGYROresults.Qe_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Pe"] = torch.Tensor(TGYROresults.Qe_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pe_stds"] = torch.Tensor(TGYROresults.Qe_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Ion Energy Fluxes - # ********************************** - - if includeFast: - - powerstate.plasma["Pi_tr_turb"] = torch.Tensor(TGYROresults.QiIons_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pi_tr_neo"] = torch.Tensor(TGYROresults.QiIons_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Pi_tr_turb_stds"] = torch.Tensor(TGYROresults.QiIons_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Pi_tr_neo_stds"] = torch.Tensor(TGYROresults.QiIons_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - else: - - powerstate.plasma["Pi_tr_turb"] = torch.Tensor(TGYROresults.QiIons_sim_turb_thr[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pi_tr_neo"] = torch.Tensor(TGYROresults.QiIons_sim_neo_thr[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Pi_tr_turb_stds"] = torch.Tensor(TGYROresults.QiIons_sim_turb_thr_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Pi_tr_neo_stds"] = torch.Tensor(TGYROresults.QiIons_sim_neo_thr_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Pi"] = torch.Tensor(TGYROresults.Qi_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Pi_stds"] = torch.Tensor(TGYROresults.Qi_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Momentum Fluxes - # ********************************** - - powerstate.plasma["Mt_tr_turb"] = torch.Tensor(TGYROresults.Mt_sim_turb[:, :nr]).to(powerstate.dfT) # So far, let's include fast in momentum - powerstate.plasma["Mt_tr_neo"] = torch.Tensor(TGYROresults.Mt_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Mt_tr_turb_stds"] = torch.Tensor(TGYROresults.Mt_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Mt_tr_neo_stds"] = torch.Tensor(TGYROresults.Mt_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Mt"] = torch.Tensor(TGYROresults.Mt_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Mt_stds"] = torch.Tensor(TGYROresults.Mt_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Particle Fluxes - # ********************************** - - # Store raw fluxes for better plotting later - powerstate.plasma["Ce_raw_tr_turb"] = torch.Tensor(TGYROresults.Ge_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_raw_tr_neo"] = torch.Tensor(TGYROresults.Ge_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Ce_raw_tr_turb_stds"] = torch.Tensor(TGYROresults.Ge_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Ce_raw_tr_neo_stds"] = torch.Tensor(TGYROresults.Ge_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Ce_raw"] = torch.Tensor(TGYROresults.Ge_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_raw_stds"] = torch.Tensor(TGYROresults.Ge_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if not useConvectiveFluxes: - - powerstate.plasma["Ce_tr_turb"] = powerstate.plasma["Ce_raw_tr_turb"] - powerstate.plasma["Ce_tr_neo"] = powerstate.plasma["Ce_raw_tr_neo"] - - powerstate.plasma["Ce_tr_turb_stds"] = powerstate.plasma["Ce_raw_tr_turb_stds"] - powerstate.plasma["Ce_tr_neo_stds"] = powerstate.plasma["Ce_raw_tr_neo_stds"] - - if provideTargets: - powerstate.plasma["Ce"] = powerstate.plasma["Ce_raw"] - powerstate.plasma["Ce_stds"] = powerstate.plasma["Ce_raw_stds"] - - else: - - powerstate.plasma["Ce_tr_turb"] = torch.Tensor(TGYROresults.Ce_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_tr_neo"] = torch.Tensor(TGYROresults.Ce_sim_neo[:, :nr]).to(powerstate.dfT) - - powerstate.plasma["Ce_tr_turb_stds"] = torch.Tensor(TGYROresults.Ce_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["Ce_tr_neo_stds"] = torch.Tensor(TGYROresults.Ce_sim_neo_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["Ce"] = torch.Tensor(TGYROresults.Ce_tar[:, :nr]).to(powerstate.dfT) - powerstate.plasma["Ce_stds"] = torch.Tensor(TGYROresults.Ce_tar_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Impurity Fluxes - # ********************************** - - # Store raw fluxes for better plotting later - powerstate.plasma["CZ_raw_tr_turb"] = torch.Tensor(TGYROresults.Gi_sim_turb[impurityPosition, :, :nr]).to(powerstate.dfT) - powerstate.plasma["CZ_raw_tr_neo"] = torch.Tensor(TGYROresults.Gi_sim_neo[impurityPosition, :, :nr]).to(powerstate.dfT) - - powerstate.plasma["CZ_raw_tr_turb_stds"] = torch.Tensor(TGYROresults.Gi_sim_turb_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - powerstate.plasma["CZ_raw_tr_neo_stds"] = torch.Tensor(TGYROresults.Gi_sim_neo_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["CZ_raw"] = torch.Tensor(TGYROresults.Gi_tar[impurityPosition, :, :nr]).to(powerstate.dfT) - powerstate.plasma["CZ_raw_stds"] = torch.Tensor(TGYROresults.Gi_tar_stds[impurityPosition, :, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - - if not useConvectiveFluxes: - - powerstate.plasma["CZ_tr_turb"] = powerstate.plasma["CZ_raw_tr_turb"] / OriginalFimp - powerstate.plasma["CZ_tr_neo"] = powerstate.plasma["CZ_raw_tr_neo"] / OriginalFimp - - powerstate.plasma["CZ_tr_turb_stds"] = powerstate.plasma["CZ_raw_tr_turb_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None - powerstate.plasma["CZ_tr_neo_stds"] = powerstate.plasma["CZ_raw_tr_neo_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["CZ"] = powerstate.plasma["CZ_raw"] / OriginalFimp - powerstate.plasma["CZ_stds"] = powerstate.plasma["CZ_raw_stds"] / OriginalFimp if TGYROresults.tgyro_stds else None - - else: - - powerstate.plasma["CZ_tr_turb"] = torch.Tensor(TGYROresults.Ci_sim_turb[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp - powerstate.plasma["CZ_tr_neo"] = torch.Tensor(TGYROresults.Ci_sim_neo[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp - - powerstate.plasma["CZ_tr_turb_stds"] = torch.Tensor(TGYROresults.Ci_sim_turb_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None - powerstate.plasma["CZ_tr_neo_stds"] = torch.Tensor(TGYROresults.Ci_sim_neo_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None - - if provideTargets: - powerstate.plasma["CZ"] = torch.Tensor(TGYROresults.Ci_tar[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp - powerstate.plasma["CZ_stds"] = torch.Tensor(TGYROresults.Ci_tar_stds[impurityPosition, :, :nr]).to(powerstate.dfT) / OriginalFimp if TGYROresults.tgyro_stds else None - - # ********************************** - # *********** Energy Exchange - # ********************************** - - if provideTurbulentExchange: - powerstate.plasma["PexchTurb"] = torch.Tensor(TGYROresults.EXe_sim_turb[:, :nr]).to(powerstate.dfT) - powerstate.plasma["PexchTurb_stds"] = torch.Tensor(TGYROresults.EXe_sim_turb_stds[:, :nr]).to(powerstate.dfT) if TGYROresults.tgyro_stds else None - else: - powerstate.plasma["PexchTurb"] = powerstate.plasma["Pe_tr_turb"] * 0.0 - powerstate.plasma["PexchTurb_stds"] = powerstate.plasma["Pe_tr_turb"] * 0.0 - - # ********************************** - # *********** Traget extra - # ********************************** - - if forceZeroParticleFlux and provideTargets: - powerstate.plasma["Ce"] = powerstate.plasma["Ce"] * 0.0 - powerstate.plasma["Ce_stds"] = powerstate.plasma["Ce_stds"] * 0.0 - - # ------------------------------------------------------------------------------------------------------------------------ - # Sum here turbulence and neoclassical, after modifications - # ------------------------------------------------------------------------------------------------------------------------ - - quantities = ['Pe', 'Pi', 'Ce', 'CZ', 'Mt', 'Ce_raw', 'CZ_raw'] - for ikey in quantities: - powerstate.plasma[ikey+"_tr"] = powerstate.plasma[ikey+"_tr_turb"] + powerstate.plasma[ikey+"_tr_neo"] - - return powerstate - - -def calculate_residuals(powerstate, PORTALSparameters, specific_vars=None): - """ - Notes - ----- - - Works with tensors - - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs - """ - - # Case where I have already constructed the dictionary (i.e. in scalarized objective) - if specific_vars is not None: - var_dict = specific_vars - # Prepare dictionary from powerstate (for use in Analysis) - else: - var_dict = {} - - mapper = { - "QeTurb": "Pe_tr_turb", - "QiTurb": "Pi_tr_turb", - "GeTurb": "Ce_tr_turb", - "GZTurb": "CZ_tr_turb", - "MtTurb": "Mt_tr_turb", - "QeNeo": "Pe_tr_neo", - "QiNeo": "Pi_tr_neo", - "GeNeo": "Ce_tr_neo", - "GZNeo": "CZ_tr_neo", - "MtNeo": "Mt_tr_neo", - "QeTar": "Pe", - "QiTar": "Pi", - "GeTar": "Ce", - "GZTar": "CZ", - "MtTar": "Mt", - "PexchTurb": "PexchTurb" - } - - for ikey in mapper: - var_dict[ikey] = powerstate.plasma[mapper[ikey]][..., 1:] - if mapper[ikey] + "_stds" in powerstate.plasma: - var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][..., 1:] - else: - var_dict[ikey + "_stds"] = None - - dfT = list(var_dict.values())[0] # as a reference for sizes - - # ------------------------------------------------------------------------- - # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added - # ------------------------------------------------------------------------- - - if PORTALSparameters["surrogateForTurbExch"]: - PexchTurb_integrated = PORTALStools.computeTurbExchangeIndividual( - var_dict["PexchTurb"], powerstate - ) - else: - PexchTurb_integrated = torch.zeros(dfT.shape).to(dfT) - - # ------------------------------------------------------------------------ - # Go through each profile that needs to be predicted, calculate components - # ------------------------------------------------------------------------ - - of, cal, res = ( - torch.Tensor().to(dfT), - torch.Tensor().to(dfT), - torch.Tensor().to(dfT), - ) - for prof in powerstate.ProfilesPredicted: - if prof == "te": - var = "Qe" - elif prof == "ti": - var = "Qi" - elif prof == "ne": - var = "Ge" - elif prof == "nZ": - var = "GZ" - elif prof == "w0": - var = "Mt" - - """ - ----------------------------------------------------------------------------------- - Transport (Turb+Neo) - ----------------------------------------------------------------------------------- - """ - of0 = var_dict[f"{var}Turb"] + var_dict[f"{var}Neo"] - - """ - ----------------------------------------------------------------------------------- - Target (Sum here the turbulent exchange power) - ----------------------------------------------------------------------------------- - """ - if var == "Qe": - cal0 = var_dict[f"{var}Tar"] + PexchTurb_integrated - elif var == "Qi": - cal0 = var_dict[f"{var}Tar"] - PexchTurb_integrated - else: - cal0 = var_dict[f"{var}Tar"] - - """ - ----------------------------------------------------------------------------------- - Ad-hoc modifications for different weighting - ----------------------------------------------------------------------------------- - """ - - if var == "Qe": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][0], - cal0 * PORTALSparameters["Pseudo_multipliers"][0], - ) - elif var == "Qi": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][1], - cal0 * PORTALSparameters["Pseudo_multipliers"][1], - ) - elif var == "Ge": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][2], - cal0 * PORTALSparameters["Pseudo_multipliers"][2], - ) - elif var == "GZ": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][3], - cal0 * PORTALSparameters["Pseudo_multipliers"][3], - ) - elif var == "Mt": - of0, cal0 = ( - of0 * PORTALSparameters["Pseudo_multipliers"][4], - cal0 * PORTALSparameters["Pseudo_multipliers"][4], - ) - - of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) - - # ----------- - # Composition - # ----------- - - # Source term is (TARGET - TRANSPORT) - source = cal - of - - # Residual is defined as the negative (bc it's maximization) normalized (1/N) norm of radial & channel residuals -> L2 - res = -1 / source.shape[-1] * torch.norm(source, p=2, dim=-1) - - return of, cal, source, res - - -def calculate_residuals_distributions(powerstate, PORTALSparameters): - """ - - Works with tensors - - It should be independent on how many dimensions it has, except that the last dimension is the multi-ofs - """ - - # Prepare dictionary from powerstate (for use in Analysis) - - mapper = { - "QeTurb": "Pe_tr_turb", - "QiTurb": "Pi_tr_turb", - "GeTurb": "Ce_tr_turb", - "GZTurb": "CZ_tr_turb", - "MtTurb": "Mt_tr_turb", - "QeNeo": "Pe_tr_neo", - "QiNeo": "Pi_tr_neo", - "GeNeo": "Ce_tr_neo", - "GZNeo": "CZ_tr_neo", - "MtNeo": "Mt_tr_neo", - "QeTar": "Pe", - "QiTar": "Pi", - "GeTar": "Ce", - "GZTar": "CZ", - "MtTar": "Mt", - "PexchTurb": "PexchTurb" - } - - var_dict = {} - for ikey in mapper: - var_dict[ikey] = powerstate.plasma[mapper[ikey]][:, 1:] - if mapper[ikey] + "_stds" in powerstate.plasma: - var_dict[ikey + "_stds"] = powerstate.plasma[mapper[ikey] + "_stds"][:, 1:] - else: - var_dict[ikey + "_stds"] = None - - dfT = var_dict["QeTurb"] # as a reference for sizes - - # ------------------------------------------------------------------------- - # Volume integrate energy exchange from MW/m^3 to a flux MW/m^2 to be added - # ------------------------------------------------------------------------- - - if PORTALSparameters["surrogateForTurbExch"]: - PexchTurb_integrated = PORTALStools.computeTurbExchangeIndividual( - var_dict["PexchTurb"], powerstate - ) - PexchTurb_integrated_stds = PORTALStools.computeTurbExchangeIndividual( - var_dict["PexchTurb_stds"], powerstate - ) - else: - PexchTurb_integrated = torch.zeros(dfT.shape).to(dfT) - PexchTurb_integrated_stds = torch.zeros(dfT.shape).to(dfT) - - # ------------------------------------------------------------------------ - # Go through each profile that needs to be predicted, calculate components - # ------------------------------------------------------------------------ - - of, cal = torch.Tensor().to(dfT), torch.Tensor().to(dfT) - ofE, calE = torch.Tensor().to(dfT), torch.Tensor().to(dfT) - for prof in powerstate.ProfilesPredicted: - if prof == "te": - var = "Qe" - elif prof == "ti": - var = "Qi" - elif prof == "ne": - var = "Ge" - elif prof == "nZ": - var = "GZ" - elif prof == "w0": - var = "Mt" - - """ - ----------------------------------------------------------------------------------- - Transport (Turb+Neo) - ----------------------------------------------------------------------------------- - """ - of0 = var_dict[f"{var}Turb"] + var_dict[f"{var}Neo"] - of0E = ( - var_dict[f"{var}Turb_stds"] ** 2 + var_dict[f"{var}Neo_stds"] ** 2 - ) ** 0.5 - - """ - ----------------------------------------------------------------------------------- - Target (Sum here the turbulent exchange power) - ----------------------------------------------------------------------------------- - """ - if var == "Qe": - cal0 = var_dict[f"{var}Tar"] + PexchTurb_integrated - cal0E = ( - var_dict[f"{var}Tar_stds"] ** 2 + PexchTurb_integrated_stds**2 - ) ** 0.5 - elif var == "Qi": - cal0 = var_dict[f"{var}Tar"] - PexchTurb_integrated - cal0E = ( - var_dict[f"{var}Tar_stds"] ** 2 + PexchTurb_integrated_stds**2 - ) ** 0.5 - else: - cal0 = var_dict[f"{var}Tar"] - cal0E = var_dict[f"{var}Tar_stds"] - - of, cal = torch.cat((of, of0), dim=-1), torch.cat((cal, cal0), dim=-1) - ofE, calE = torch.cat((ofE, of0E), dim=-1), torch.cat((calE, cal0E), dim=-1) - - return of, cal, ofE, calE diff --git a/src/mitim_tools/gs_tools/GEQtools.py b/src/mitim_tools/gs_tools/GEQtools.py index 2e173604..a4f7479e 100644 --- a/src/mitim_tools/gs_tools/GEQtools.py +++ b/src/mitim_tools/gs_tools/GEQtools.py @@ -8,68 +8,28 @@ from mitim_tools.gacode_tools import PROFILEStools from mitim_tools.gs_tools.utils import GEQplotting from shapely.geometry import LineString -from scipy.integrate import quad +from scipy.integrate import quad, cumulative_trapezoid +import megpy import freegs from freegs import geqdsk from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed """ -Note that this module relies on OMFIT classes (https://omfit.io/classes.html) procedures to intrepret the content of g-eqdsk files. -Modifications are made in MITINM for visualizations and a few extra derivations. +Note that this module relies on megpy to intrepret the content of g-eqdsk files. +Modifications are made in MITIM for visualizations and a few extra derivations. """ -def fix_file(filename): - - with open(filename, "r") as f: - lines = f.readlines() - - # ----------------------------------------------------------------------- - # Remove coils (chatGPT 4o as of 08/24/24) - # ----------------------------------------------------------------------- - # Use StringIO to simulate the file writing - noCoils_file = io.StringIO() - for cont, line in enumerate(lines): - if cont > 0 and line[:2] == " ": - break - noCoils_file.write(line) - - # Reset cursor to the start of StringIO - noCoils_file.seek(0) - - # Write the StringIO content to a temporary file - with tempfile.NamedTemporaryFile(delete=False) as tmp_file: - tmp_file.write(noCoils_file.getvalue().encode('utf-8')) - noCoils_file = tmp_file.name - # ----------------------------------------------------------------------- - - with open(filename, 'r') as file1, open(noCoils_file, 'r') as file2: - file1_content = file1.read() - file2_content = file2.read() - - if file1_content != file2_content: - print(f"\t- geqdsk file {IOtools.clipstr(filename)} had coils, I have removed them") - - filename = noCoils_file - - return filename - class MITIMgeqdsk: def __init__(self, filename): - # Fix file by removing coils if it has them - filename = fix_file(filename) - - # Read GEQDSK file using OMFIT - import omfit_classes.omfit_eqdsk - self.g = omfit_classes.omfit_eqdsk.OMFITgeqdsk(filename, forceFindSeparatrix=True) + self.g = megpy.Equilibrium() + self.g.read_geqdsk(f_path=filename) + self.g.add_derived(incl_fluxsurfaces=True, analytic_shape=True, incl_B=True) # Extra derivations in MITIM self.derive() - # Remove temporary file - os.remove(filename) - @classmethod def timeslices(cls, filename, **kwargs): print("\n...Opening GEQ file with several time slices") @@ -111,61 +71,65 @@ def timeslices(cls, filename, **kwargs): def derive(self, debug=False): - self.Jt = self.g.surfAvg("Jt") * 1e-6 - self.Jt_fb = self.g.surfAvg("Jt_fb") * 1e-6 + zero_vector = np.zeros(self.g.derived["rho_pol"].shape) + + self.rho_pol = self.g.derived["rho_pol"].copy() + self.rho_tor = self.g.derived["rho_tor"].copy() + self.psi_pol_norm = self.rho_pol ** 2 + self.psi_tor_norm = self.rho_tor ** 2 + + self.Jt = self.g.derived["j_tor"] * 1e-6 + self.Jt_fb = zero_vector.copy() self.Jerror = np.abs(self.Jt - self.Jt_fb) - self.Ip = self.g["CURRENT"] + self.Ip = self.g.raw["current"] # Parameterizations of LCFS - self.kappa = self.g["fluxSurfaces"]["geo"]["kap"][-1] - self.kappaU = self.g["fluxSurfaces"]["geo"]["kapu"][-1] - self.kappaL = self.g["fluxSurfaces"]["geo"]["kapl"][-1] - - self.delta = self.g["fluxSurfaces"]["geo"]["delta"][-1] - self.deltaU = self.g["fluxSurfaces"]["geo"]["dell"][-1] - self.deltaL = self.g["fluxSurfaces"]["geo"]["dell"][-1] - - self.zeta = self.g["fluxSurfaces"]["geo"]["zeta"][-1] - - self.a = self.g["fluxSurfaces"]["geo"]["a"][-1] - self.Rmag = self.g["fluxSurfaces"]["geo"]["R"][0] - self.Zmag = self.g["fluxSurfaces"]["geo"]["Z"][0] - self.Rmajor = np.mean( - [ - self.g["fluxSurfaces"]["geo"]["Rmin_centroid"][-1], - self.g["fluxSurfaces"]["geo"]["Rmax_centroid"][-1], - ] - ) + self.kappa = self.g.derived["miller_geo"]["kappa"][-1] + self.kappaU = self.g.derived["miller_geo"]["kappa_u"][-1] + self.kappaL = self.g.derived["miller_geo"]["kappa_l"][-1] - self.Zmajor = self.Zmag + self.delta = self.g.derived["miller_geo"]["delta"][-1] + self.deltaU = self.g.derived["miller_geo"]["delta_u"][-1] + self.deltaL = self.g.derived["miller_geo"]["delta_l"][-1] + + self.zeta = self.g.derived["miller_geo"]["zeta"][-1] + + self.a = self.g.derived["r"][-1] + self.Rmag = self.g.derived["Ro"][0] + self.Zmag = self.g.derived["Zo"][0] + + self.Rmajor = self.g.derived["Ro"][-1] + self.Zmajor = self.Zmag #self.g.derived["Zo"][-1] TODO: check which Z0 to use, perhaps switch to MXH definition self.eps = self.a / self.Rmajor # Core values - - self.kappa_a = self.g["fluxSurfaces"]["geo"]["cxArea"][-1] / (np.pi * self.a**2) + vp = np.array(self.g.fluxsurfaces["Vprime"]).flatten() + ir = np.array(self.g.fluxsurfaces["1/R"]).flatten() + self.cx_area = abs(cumulative_trapezoid(vp * ir, self.g.derived["psi"], initial=0.0)) + self.kappa_a = self.cx_area[-1] / (np.pi * self.a**2) self.kappa995 = np.interp( 0.995, - self.g["AuxQuantities"]["PSI_NORM"], - self.g["fluxSurfaces"]["geo"]["kap"], + self.psi_pol_norm, + self.g.derived["miller_geo"]["kappa"], ) self.kappa95 = np.interp( 0.95, - self.g["AuxQuantities"]["PSI_NORM"], - self.g["fluxSurfaces"]["geo"]["kap"], + self.psi_pol_norm, + self.g.derived["miller_geo"]["kappa"], ) self.delta995 = np.interp( 0.995, - self.g["AuxQuantities"]["PSI_NORM"], - self.g["fluxSurfaces"]["geo"]["delta"], + self.psi_pol_norm, + self.g.derived["miller_geo"]["delta"], ) self.delta95 = np.interp( 0.95, - self.g["AuxQuantities"]["PSI_NORM"], - self.g["fluxSurfaces"]["geo"]["delta"], + self.psi_pol_norm, + self.g.derived["miller_geo"]["delta"], ) """ @@ -173,26 +137,23 @@ def derive(self, debug=False): Boundary -------------------------------------------------------------------------------------------------------------------------------------- Note that the RBBS and ZBBS values in the gfile are often too scattered and do not reproduce the boundary near x-points. - The shaping parameters calculated using fluxSurfaces are correct though. + The shaping parameters calculated using fluxsurfaces are correct though. """ - self.Rb_gfile, self.Yb_gfile = self.g["RBBBS"], self.g["ZBBBS"] - self.Rb, self.Yb = self.g["fluxSurfaces"].sep.transpose() + self.Rb_gfile, self.Yb_gfile = self.g.raw["rbbbs"].copy(), self.g.raw["zbbbs"].copy() + self.Rb, self.Yb = self.g.fluxsurfaces["R"][-1], self.g.fluxsurfaces["Z"][-1] if len(self.Rb) == 0: - print("\t- MITIM > No separatrix found in the OMFIT fluxSurfaces, increasing resolution and going all in!",typeMsg='i') + print("\t- MITIM > No separatrix found in the megpy fluxsurfaces, using explicit boundary in g-eqdsk file!",typeMsg='i') - flx = copy.deepcopy(self.g['fluxSurfaces']) - flx._changeResolution(6) - flx.findSurfaces([0.0,0.5,1.0]) - fs = flx['flux'][list(flx['flux'].keys())[-1]] - self.Rb, self.Yb = fs['R'], fs['Z'] + self.Rb = self.Rb_gfile.copy() + self.Yb = self.Yb_gfile.copy() if debug: fig, ax = plt.subplots() - # OMFIT - ax.plot(self.Rb, self.Yb, "-s", c="r", label="OMFIT") + # megpy + ax.plot(self.Rb, self.Yb, "-s", c="r", label="megpy") # GFILE ax.plot(self.Rb_gfile, self.Yb_gfile, "-s", c="y", label="GFILE") @@ -245,10 +206,7 @@ def write(self, filename=None): If filename is None, use the original one """ - if filename is not None: - self.g.filename = filename - - self.g.save() + self.g.write_geqdsk(f_path=filename) # ----------------------------------------------------------------------------- # Parameterizations @@ -256,8 +214,8 @@ def write(self, filename=None): def get_MXH_coeff_new(self, n_coeff=7, plotYN=False): - psis = self.g["AuxQuantities"]["PSI_NORM"] - flux_surfaces = self.g['fluxSurfaces']['flux'] + psis = self.psi_pol_norm + flux_surfaces = self.g.fluxsurfaces["psi"] # Cannot parallelize because different number of points? kappa, rmin, rmaj, zmag, sn, cn = [],[],[],[],[],[] @@ -266,7 +224,21 @@ def get_MXH_coeff_new(self, n_coeff=7, plotYN=False): if flux == len(flux_surfaces)-1: Rf, Zf = self.Rb, self.Yb else: - Rf, Zf = flux_surfaces[flux]['R'],flux_surfaces[flux]['Z'] + Rf, Zf = self.g.fluxsurfaces["R"][flux], self.g.fluxsurfaces["Z"][flux] + + # To avoid the following code to fail if only one point is found + if Rf.shape[0] == 1: + + min_value = 1E-7 + + kappa.append(min_value) + rmin.append(min_value) + rmaj.append(Rf[0]) + zmag.append(Zf[0]) + + sn.append(np.ones(n_coeff)*min_value) + cn.append(np.ones(n_coeff)*min_value) + continue # Perform the MXH decompositionusing the MITIM surface class surfaces = mitim_flux_surfaces() @@ -311,24 +283,22 @@ def get_MXH_coeff_new(self, n_coeff=7, plotYN=False): # ----------------------------------------------------------------------------- # For MAESTRO and TRANSP converstions # ----------------------------------------------------------------------------- - def to_profiles(self, ne0_20 = 1.0, Zeff = 1.5, PichT = 1.0, Z = 9, coeffs_MXH = 7, plotYN = False): # ------------------------------------------------------------------------------------------------------- # Quantities from the equilibrium # ------------------------------------------------------------------------------------------------------- - rhotor = self.g['RHOVN'] - psi = self.g['AuxQuantities']['PSI'] # Wb/rad - torfluxa = self.g['AuxQuantities']['PHI'][-1] / (2*np.pi) # Wb/rad - q = self.g['QPSI'] - pressure = self.g['PRES'] # Pa - Ip = self.g['CURRENT']*1E-6 # MA + rhotor = self.g.derived['rho_tor'] + psi = self.g.derived['psi'] # Wb/rad + torfluxa = self.g.derived['phi'][-1] / (2*np.pi) # Wb/rad + q = self.g.raw['qpsi'] + pressure = self.g.raw['pres'] # Pa + Ip = self.g.raw['current']*1E-6 # MA RZ = np.array([self.Rb,self.Yb]).T R0 = (RZ.max(axis=0)[0] + RZ.min(axis=0)[0])/2 - - B0 = self.g['RCENTR']*self.g['BCENTR'] / R0 + B0 = self.g.raw['rcentr']*self.g.raw['bcentr'] / R0 # Ensure positive quantities #TODO: Check if this is necessary, pass directions rhotor = np.array([np.abs(i) for i in rhotor]) @@ -364,7 +334,6 @@ def to_profiles(self, ne0_20 = 1.0, Zeff = 1.5, PichT = 1.0, Z = 9, coeffs_MXH profiles['ze'] = np.array([-1.0]) profiles['z'] = np.array([1.0, Z]) - profiles['torfluxa(Wb/radian)'] = np.array([torfluxa]) profiles['rcentr(m)'] = np.array([R0]) profiles['bcentr(T)'] = np.array([B0]) @@ -419,16 +388,16 @@ def to_profiles(self, ne0_20 = 1.0, Zeff = 1.5, PichT = 1.0, Z = 9, coeffs_MXH _, profiles["qrfe(MW/m^3)"] = PLASMAtools.parabolicProfile(Tbar=1.0,nu=5.0,rho=rhotor,Tedge=0.0) - p = PROFILEStools.PROFILES_GACODE.scratch(profiles) + p = PROFILEStools.gacode_state.scratch(profiles) - p.profiles["qrfe(MW/m^3)"] = p.profiles["qrfe(MW/m^3)"] * PichT/p.derived['qRF_MWmiller'][-1] /2 + p.profiles["qrfe(MW/m^3)"] = p.profiles["qrfe(MW/m^3)"] * PichT/p.derived['qRF_MW'][-1] /2 p.profiles["qrfi(MW/m^3)"] = p.profiles["qrfe(MW/m^3)"] # ------------------------------------------------------------------------------------------------------- # Ready to go # ------------------------------------------------------------------------------------------------------- - p.deriveQuantities() + p.derive_quantities() # ------------------------------------------------------------------------------------------------------- # Plotting @@ -439,7 +408,7 @@ def to_profiles(self, ne0_20 = 1.0, Zeff = 1.5, PichT = 1.0, Z = 9, coeffs_MXH fig, ax = plt.subplots() ff = np.linspace(0, 1, 11) self.plotFluxSurfaces(ax=ax, fluxes=ff, rhoPol=False, sqrt=True, color="r", plot1=False) - p.plotGeometry(ax=ax, surfaces_rho=ff, color="b") + p.plot_state_flux_surfaces(ax=ax, surfaces_rho=ff, color="b") plt.show() return p @@ -451,7 +420,7 @@ def to_transp(self, folder = '~/scratch/', shot = '12345', runid = 'P01', ne0_20 folder.mkdir(parents=True, exist_ok=True) p = self.to_profiles(ne0_20 = ne0_20, Zeff = Zeff, PichT = PichT_MW) - p.writeCurrentStatus(folder / 'input.gacode') + p.write_state(folder / 'input.gacode') transp = p.to_transp(folder = folder, shot = shot, runid = runid, times = times, Vsurf = Vsurf) @@ -544,6 +513,7 @@ def _to_mxh(self, n_coeff=6): self.cn = np.zeros((self.R.shape[0],n_coeff)) self.sn = np.zeros((self.R.shape[0],n_coeff)) self.gn = np.zeros((self.R.shape[0],4)) + self.gn[-1] = 1.0 for i in range(self.R.shape[0]): self.cn[i,:], self.sn[i,:], self.gn[i,:] = from_RZ_to_mxh(self.R[i,:], self.Z[i,:], n_coeff=n_coeff) @@ -563,8 +533,8 @@ def _to_miller(self): # Elongations - self.kappa_u = (Zmax - self.Z0) / self.a - self.kappa_l = (self.Z0 - Zmin) / self.a + self.kappa_u = (Zmax - self.Z0) / self.a if self.Z.shape[1] > 1 else np.ones(self.Z0.shape) + self.kappa_l = (self.Z0 - Zmin) / self.a if self.Z.shape[1] > 1 else np.ones(self.Z0.shape) self.kappa = (self.kappa_u + self.kappa_l) / 2 # Triangularities @@ -582,7 +552,7 @@ def _to_miller(self): for i in range(self.R0.shape[0]): try: Ri, Zi, zeta_uo = find_squareness_points(self.R[i,:], self.Z[i,:]) - except AttributeError: + except: zeta_uo = np.nan self.zeta[i] = zeta_uo @@ -953,9 +923,9 @@ def check(self, warning_error = 0.01, plotYN = False): max_error = np.max([max_error, error]) if max_error > warning_error: - print(f"\t\t- Maximum error is {100*max_error:.2f}%", typeMsg='w') + print(f"\t\t- Maximum error in equilibrium quantities is {100*max_error:.2f}%", typeMsg='w') else: - print(f"\t\t- Maximum error is {100*max_error:.2f}%", typeMsg='i') + print(f"\t\t- Maximum error in equilibrium quantities is {100*max_error:.2f}%") # -------------------------------------------------------------- # Plotting diff --git a/src/mitim_tools/gs_tools/scripts/mxh.py b/src/mitim_tools/gs_tools/scripts/mxh.py index dd7a0289..0bb969aa 100644 --- a/src/mitim_tools/gs_tools/scripts/mxh.py +++ b/src/mitim_tools/gs_tools/scripts/mxh.py @@ -22,7 +22,7 @@ ff = np.linspace(0, 1, 11) for i, (coeffs_MXH, p) in enumerate(pc.items()): - p.plotGeometry(ax=ax[i], surfaces_rho=ff, color="b") + p.plot_state_flux_surfaces(ax=ax[i], surfaces_rho=ff, color="b") g.plotFluxSurfaces(ax=ax[i], fluxes=ff, rhoPol=False, sqrt=True, color="r", plot1=False) ax[i].set_title(f'coeffs_MXH = {coeffs_MXH}') diff --git a/src/mitim_tools/gs_tools/utils/GEQplotting.py b/src/mitim_tools/gs_tools/utils/GEQplotting.py index fe6531b4..db545acd 100644 --- a/src/mitim_tools/gs_tools/utils/GEQplotting.py +++ b/src/mitim_tools/gs_tools/utils/GEQplotting.py @@ -4,6 +4,7 @@ from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed +#TODO: add current profiles and flux-surface average fields to megpy and restore plots def compareGeqdsk(geqdsks, fn=None, extraLabel="", plotAll=True, labelsGs=None): @@ -229,8 +230,8 @@ def plotFS(self, axs=None, color="b", label=""): ax.set_ylabel("Z (m)") ax = axs[2] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g["AuxQuantities"]["RHO"] + x = self.psi_pol_norm + y = self.rho_tor ax.plot(x, y, lw=2, ls="-", c=color, label=label) ax.plot([0, 1], [0, 1], ls="--", c="k", lw=0.5) @@ -240,8 +241,8 @@ def plotFS(self, axs=None, color="b", label=""): ax.set_ylim([0, 1]) ax = axs[3] - x = self.g["AuxQuantities"]["RHO"] - y = self.g["AuxQuantities"]["RHOp"] + x = self.rho_tor + y = self.rho_pol ax.plot(x, y, lw=2, ls="-", c=color) ax.plot([0, 1], [0, 1], ls="--", c="k", lw=0.5) @@ -256,8 +257,8 @@ def plotCurrents(self, axs=None, zlims_thr=[-1, 1]): fig, axs = plt.subplots(ncols=10) ax = axs[0] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Jr") * 1e-6 + x = self.psi_pol_norm + y = np.zeros(x.shape) ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_ylabel("FSA $\\langle J\\rangle$ ($MA/m^2$)") @@ -267,14 +268,14 @@ def plotCurrents(self, axs=None, zlims_thr=[-1, 1]): zlims = GRAPHICStools.aroundZeroLims(zlims) ax.set_ylim(zlims) - ax = axs[1] - plot2Dquantity(self, - ax=ax, var="Jr", title="Radial Current Jr", zlims=zlims, factor=1e-6 - ) + #ax = axs[1] + #plot2Dquantity(self, + # ax=ax, var="Jr", title="Radial Current Jr", zlims=zlims, factor=1e-6 + #) ax = axs[2] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Jz") * 1e-6 + x = self.psi_pol_norm + y = np.zeros(x.shape) ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -282,14 +283,14 @@ def plotCurrents(self, axs=None, zlims_thr=[-1, 1]): zlims = GRAPHICStools.aroundZeroLims(zlims) ax.set_ylim(zlims) - ax = axs[3] - plot2Dquantity(self, - ax=ax, var="Jz", title="Vertical Current Jz", zlims=zlims, factor=1e-6 - ) + #ax = axs[3] + #plot2Dquantity(self, + # ax=ax, var="Jz", title="Vertical Current Jz", zlims=zlims, factor=1e-6 + #) ax = axs[4] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Jt") * 1e-6 + x = self.psi_pol_norm + y = self.Jt ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -297,14 +298,14 @@ def plotCurrents(self, axs=None, zlims_thr=[-1, 1]): zlims = GRAPHICStools.aroundZeroLims(zlims) ax.set_ylim(zlims) - ax = axs[5] - plot2Dquantity(self, - ax=ax, var="Jt", title="Toroidal Current Jt", zlims=zlims, factor=1e-6 - ) + #ax = axs[5] + #plot2Dquantity(self, + # ax=ax, var="Jt", title="Toroidal Current Jt", zlims=zlims, factor=1e-6 + #) ax = axs[6] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Jp") * 1e-6 + x = self.psi_pol_norm + y = np.zeros(x.shape) ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -312,14 +313,14 @@ def plotCurrents(self, axs=None, zlims_thr=[-1, 1]): zlims = GRAPHICStools.aroundZeroLims(zlims) ax.set_ylim(zlims) - ax = axs[7] - plot2Dquantity(self, - ax=ax, var="Jp", title="Poloidal Current Jp", zlims=zlims, factor=1e-6 - ) + #ax = axs[7] + #plot2Dquantity(self, + # ax=ax, var="Jp", title="Poloidal Current Jp", zlims=zlims, factor=1e-6 + #) ax = axs[8] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Jpar") * 1e-6 + x = self.psi_pol_norm + y = np.zeros(x.shape) ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -327,10 +328,10 @@ def plotCurrents(self, axs=None, zlims_thr=[-1, 1]): zlims = GRAPHICStools.aroundZeroLims(zlims) ax.set_ylim(zlims) - ax = axs[9] - plot2Dquantity(self, - ax=ax, var="Jpar", title="Parallel Current Jpar", zlims=zlims, factor=1e-6 - ) + #ax = axs[9] + #plot2Dquantity(self, + # ax=ax, var="Jpar", title="Parallel Current Jpar", zlims=zlims, factor=1e-6 + #) def plotFields(self, axs=None, zlims_thr=[-1, 1]): if axs is None: @@ -338,8 +339,8 @@ def plotFields(self, axs=None, zlims_thr=[-1, 1]): fig, axs = plt.subplots(ncols=10) ax = axs[0] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Br") + x = self.psi_pol_norm + y = np.zeros(x.shape) # self.g.surfAvg("Br") ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_ylabel("FSA $\\langle B\\rangle$ ($T$)") @@ -351,12 +352,12 @@ def plotFields(self, axs=None, zlims_thr=[-1, 1]): ax = axs[1] plot2Dquantity(self, - ax=ax, var="Br", title="Radial Field Br", zlims=zlims, titlebar="B ($T$)" + ax=ax, var="B_r", title="Radial Field Br", zlims=zlims, titlebar="B ($T$)" ) ax = axs[2] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Bz") + x = self.psi_pol_norm + y = np.zeros(x.shape) # self.g.surfAvg("Bz") ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -366,12 +367,12 @@ def plotFields(self, axs=None, zlims_thr=[-1, 1]): ax = axs[3] plot2Dquantity(self, - ax=ax, var="Bz", title="Vertical Field Bz", zlims=zlims, titlebar="B ($T$)" + ax=ax, var="B_z", title="Vertical Field Bz", zlims=zlims, titlebar="B ($T$)" ) ax = axs[4] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Bt") + x = self.psi_pol_norm + y = np.zeros(x.shape) # self.g.surfAvg("Bt") ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -380,14 +381,14 @@ def plotFields(self, axs=None, zlims_thr=[-1, 1]): # zlims = GRAPHICStools.aroundZeroLims(zlims) ax.set_ylim(zlims) - ax = axs[5] - plot2Dquantity(self, - ax=ax, var="Jt", title="Toroidal Field Bt", zlims=zlims, titlebar="B ($T$)" - ) + #ax = axs[5] + #plot2Dquantity(self, + # ax=ax, var="Jt", title="Toroidal Field Bt", zlims=zlims, titlebar="B ($T$)" + #) ax = axs[6] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g.surfAvg("Bp") + x = self.psi_pol_norm + y = np.zeros(x.shape) # self.g.surfAvg("Bp") ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -397,32 +398,32 @@ def plotFields(self, axs=None, zlims_thr=[-1, 1]): ax = axs[7] plot2Dquantity(self, - ax=ax, var="Bp", title="Poloidal Field Bp", zlims=zlims, titlebar="B ($T$)" + ax=ax, var="B_pol_rz", title="Poloidal Field Bp", zlims=zlims, titlebar="B ($T$)" ) ax = axs[8] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g["fluxSurfaces"]["avg"]["Bp**2"] + x = self.psi_pol_norm + y = np.zeros(x.shape) # self.g["fluxSurfaces"]["avg"]["Bp**2"] ax.plot(x, y, lw=2, ls="-", c="r") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) ax.set_ylabel("$\\langle B_{\\theta}^2\\rangle$") - ax = axs[9] - x = self.g["fluxSurfaces"]["midplane"]["R"] - y = self.g["fluxSurfaces"]["midplane"]["Bt"] - ax.plot(x, y, lw=2, ls="-", c="r", label="$B_{t}$") - y = self.g["fluxSurfaces"]["midplane"]["Bp"] - ax.plot(x, y, lw=2, ls="-", c="b", label="$B_{p}$") - y = self.g["fluxSurfaces"]["midplane"]["Bz"] - ax.plot(x, y, lw=2, ls="-", c="g", label="$B_{z}$") - y = self.g["fluxSurfaces"]["midplane"]["Br"] - ax.plot(x, y, lw=2, ls="-", c="m", label="$B_{r}$") - y = self.g["fluxSurfaces"]["geo"]["bunit"] - ax.plot(x, y, lw=2, ls="-", c="c", label="$B_{unit}$") - ax.set_xlabel("$R$ LF midplane") - ax.set_ylabel("$B$ (T)") - ax.legend() + #ax = axs[9] + #x = self.g["fluxSurfaces"]["midplane"]["R"] + #y = self.g["fluxSurfaces"]["midplane"]["Bt"] + #ax.plot(x, y, lw=2, ls="-", c="r", label="$B_{t}$") + #y = self.g["fluxSurfaces"]["midplane"]["Bp"] + #ax.plot(x, y, lw=2, ls="-", c="b", label="$B_{p}$") + #y = self.g["fluxSurfaces"]["midplane"]["Bz"] + #ax.plot(x, y, lw=2, ls="-", c="g", label="$B_{z}$") + #y = self.g["fluxSurfaces"]["midplane"]["Br"] + #ax.plot(x, y, lw=2, ls="-", c="m", label="$B_{r}$") + #y = self.g["fluxSurfaces"]["geo"]["bunit"] + #ax.plot(x, y, lw=2, ls="-", c="c", label="$B_{unit}$") + #ax.set_xlabel("$R$ LF midplane") + #ax.set_ylabel("$B$ (T)") + #ax.legend() def plotChecks(self, axs=None): if axs is None: @@ -430,7 +431,7 @@ def plotChecks(self, axs=None): fig, axs = plt.subplots(ncols=8) ax = axs[0] - x = self.g["AuxQuantities"]["PSI_NORM"] + x = self.psi_pol_norm y1 = self.Jt ax.plot(x, np.abs(y1), lw=2, ls="-", c="b", label="$\\langle Jt\\rangle$") zmax = y1.max() @@ -463,10 +464,10 @@ def plotChecks(self, axs=None): ax.legend() ax = axs[2] - x = self.g["AuxQuantities"]["PSI_NORM"] - y1 = self.g["FFPRIM"] + x = self.psi_pol_norm + y1 = self.g.raw["ffprim"] ax.plot(x, y1, lw=2, ls="-", c="r", label="$FF'$") - y2 = self.g["PPRIME"] * (4 * np.pi * 1e-7) + y2 = self.g.raw["pprime"] * (4 * np.pi * 1e-7) ax.plot(x, y2, lw=2, ls="-", c="b", label="$p'*\\mu_0$") ax.set_ylabel("") @@ -474,40 +475,40 @@ def plotChecks(self, axs=None): ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) - ax = axs[3] - plot2Dquantity(self, - ax=ax, - var="Jt", - title="Toroidal Current Jt", - zlims=[zmin, zmax], - cmap="viridis", - factor=1e-6, - ) - - ax = axs[4] - plot2Dquantity(self, - ax=ax, - var="Jt_fb", - title="Toroidal Current Jt (FB)", - zlims=[zmin, zmax], - cmap="viridis", - factor=1e-6, - ) - - ax = axs[5] - z = ( - np.abs(self.g["AuxQuantities"]["Jt"] - self.g["AuxQuantities"]["Jt_fb"]) - * 1e-6 - ) - zmaxx = np.max([np.abs(zmax), np.abs(zmin)]) - plot2Dquantity(self, - ax=ax, - var=z, - title="Absolute Error", - zlims=[0, zmaxx], - cmap="viridis", - direct=True, - ) + #ax = axs[3] + #plot2Dquantity(self, + # ax=ax, + # var="Jt", + # title="Toroidal Current Jt", + # zlims=[zmin, zmax], + # cmap="viridis", + # factor=1e-6, + #) + + #ax = axs[4] + #plot2Dquantity(self, + # ax=ax, + # var="Jt_fb", + # title="Toroidal Current Jt (FB)", + # zlims=[zmin, zmax], + # cmap="viridis", + # factor=1e-6, + #) + + #ax = axs[5] + #z = ( + # np.abs(self.g["AuxQuantities"]["Jt"] - self.g["AuxQuantities"]["Jt_fb"]) + # * 1e-6 + #) + #zmaxx = np.max([np.abs(zmax), np.abs(zmin)]) + #plot2Dquantity(self, + # ax=ax, + # var=z, + # title="Absolute Error", + # zlims=[0, zmaxx], + # cmap="viridis", + # direct=True, + #) def plotParameterization(self, axs=None): if axs is None: @@ -520,22 +521,23 @@ def plotParameterization(self, axs=None): ) # Boundary, axis and limiter ax.plot(self.Rb, self.Yb, lw=1, c="r") - ax.plot(self.g["RMAXIS"], self.g["ZMAXIS"], "+", markersize=10, c="r") + ax.plot(self.g.raw["rmaxis"], self.g.raw["zmaxis"], "+", markersize=10, c="r") ax.plot([self.Rmag], [self.Zmag], "o", markersize=5, c="m") ax.plot([self.Rmajor], [self.Zmag], "+", markersize=10, c="k") - ax.plot(self.g["RLIM"], self.g["ZLIM"], lw=1, c="k") + if 'rlim' in self.g.raw and 'zlim' in self.g.raw: + ax.plot(self.g.raw["rlim"], self.g.raw["zlim"], lw=1, c="k") - import matplotlib + import matplotlib - path = matplotlib.path.Path( - np.transpose(np.array([self.g["RLIM"], self.g["ZLIM"]])) - ) - patch = matplotlib.patches.PathPatch(path, facecolor="none") - ax.add_patch(patch) - # for col in cs.collections: - # col.set_clip_path(patch) - # for col in csA.collections: - # col.set_clip_path(patch) + path = matplotlib.path.Path( + np.transpose(np.array([self.g.raw["rlim"], self.g.raw["zlim"]])) + ) + patch = matplotlib.patches.PathPatch(path, facecolor="none") + ax.add_patch(patch) + # for col in cs.collections: + # col.set_clip_path(patch) + # for col in csA.collections: + # col.set_clip_path(patch) self.plotEnclosingBox(ax=ax) @@ -545,12 +547,12 @@ def plotParameterization(self, axs=None): ax.set_ylabel("Z (m)") ax = axs[1] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g["fluxSurfaces"]["geo"]["kap"] + x = self.psi_pol_norm + y = self.g.derived["miller_geo"]["kappa"].copy() ax.plot(x, y, label="$\\kappa$") - y = self.g["fluxSurfaces"]["geo"]["kapl"] + y = self.g.derived["miller_geo"]["kappa_l"].copy() ax.plot(x, y, ls="--", label="$\\kappa_L$") - y = self.g["fluxSurfaces"]["geo"]["kapu"] + y = self.g.derived["miller_geo"]["kappa_u"].copy() ax.plot(x, y, ls="--", label="$\\kappa_U$") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -558,12 +560,12 @@ def plotParameterization(self, axs=None): ax.legend() ax = axs[2] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g["fluxSurfaces"]["geo"]["delta"] + x = self.psi_pol_norm + y = self.g.derived["miller_geo"]["delta"].copy() ax.plot(x, y, label="$\\delta$") - y = self.g["fluxSurfaces"]["geo"]["dell"] + y = self.g.derived["miller_geo"]["delta_l"].copy() ax.plot(x, y, ls="--", label="$\\delta_L$") - y = self.g["fluxSurfaces"]["geo"]["delu"] + y = self.g.derived["miller_geo"]["delta_u"].copy() ax.plot(x, y, ls="--", label="$\\delta_U$") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -571,16 +573,16 @@ def plotParameterization(self, axs=None): ax.legend() ax = axs[3] - x = self.g["AuxQuantities"]["PSI_NORM"] - y = self.g["fluxSurfaces"]["geo"]["zeta"] + x = self.psi_pol_norm + y = self.g.derived["miller_geo"]["zeta"].copy() ax.plot(x, y, label="$\\zeta$") - y = self.g["fluxSurfaces"]["geo"]["zetail"] + y = self.g.derived["miller_geo"]["zeta_li"].copy() ax.plot(x, y, ls="--", label="$\\zeta_{IL}$") - y = self.g["fluxSurfaces"]["geo"]["zetaiu"] + y = self.g.derived["miller_geo"]["zeta_ui"].copy() ax.plot(x, y, ls="--", label="$\\zeta_{IU}$") - y = self.g["fluxSurfaces"]["geo"]["zetaol"] + y = self.g.derived["miller_geo"]["zeta_lo"].copy() ax.plot(x, y, ls="--", label="$\\zeta_{OL}$") - y = self.g["fluxSurfaces"]["geo"]["zetaou"] + y = self.g.derived["miller_geo"]["zeta_uo"].copy() ax.plot(x, y, ls="--", label="$\\zeta_{OU}$") ax.set_xlabel("$\\Psi_n$") ax.set_xlim([0, 1]) @@ -697,8 +699,8 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): ax = ax_plasma[0] ax.plot( - self.g["AuxQuantities"]["RHO"], - self.g["PRES"] * 1e-6, + self.rho_tor, + self.g.raw["pres"] * 1e-6, "-s", c=color, lw=2, @@ -712,8 +714,8 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): ax = ax_plasma[1] ax.plot( - self.g["AuxQuantities"]["RHO"], - -self.g["PPRIME"] * 1e-6, + self.rho_tor, + -self.g.raw["pprime"] * 1e-6, c=color, lw=2, ls="-", @@ -724,13 +726,13 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): ax.axhline(y=0.0, ls="--", lw=0.5, c="k") ax = ax_plasma[2] - ax.plot(self.g["AuxQuantities"]["RHO"], self.g["FPOL"], c=color, lw=2, ls="-") + ax.plot(self.rho_tor, self.g.raw["fpol"], c=color, lw=2, ls="-") ax.set_xlim([0, 1]) ax.set_xlabel("$\\sqrt{\\phi_n}$ (RHO)") ax.set_ylabel("$F = RB_{\\phi}$ (T*m)") ax = ax_plasma[3] - ax.plot(self.g["AuxQuantities"]["RHO"], self.g["FFPRIM"], c=color, lw=2, ls="-") + ax.plot(self.rho_tor, self.g.raw["ffprim"], c=color, lw=2, ls="-") ax.set_xlim([0, 1]) ax.set_xlabel("$\\sqrt{\\phi_n}$ (RHO)") ax.set_ylabel("FF' (T*m/[])") @@ -738,8 +740,8 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): ax = ax_plasma[4] ax.plot( - self.g["AuxQuantities"]["RHO"], - np.abs(self.g["QPSI"]), + self.rho_tor, + np.abs(self.g.raw["qpsi"]), "-s", c=color, lw=2, @@ -754,8 +756,8 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): ax = ax_plasma[5] ax.plot( - self.g["AuxQuantities"]["RHO"], - np.abs(self.g.surfAvg("Jt") * 1e-6), + self.rho_tor, + np.abs(self.Jt), "-s", c=color, lw=2, @@ -763,8 +765,8 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): label=label + "geqdsk Jt", ) ax.plot( - self.g["AuxQuantities"]["RHO"], - np.abs(self.g.surfAvg("Jt_fb") * 1e-6), + self.rho_tor, + np.abs(self.Jt_fb), "--o", c=color, lw=2, @@ -779,27 +781,27 @@ def plotPlasma(self, axs=None, legendYN=False, color="r", label=""): if legendYN: ax.legend() - ax = ax_plasma[6] - ax.plot( - self.g["fluxSurfaces"]["midplane"]["R"], - np.abs(self.g["fluxSurfaces"]["midplane"]["Bt"]), - "-s", - c=color, - lw=2, - markersize=3, - label=label + "geqdsk Bt", - ) - ax.plot( - self.g["fluxSurfaces"]["midplane"]["R"], - np.abs(self.g["fluxSurfaces"]["midplane"]["Bp"]), - "--o", - c=color, - lw=2, - markersize=3, - label=label + "geqdsk Bp", - ) - ax.set_xlabel("R (m) midplane") - ax.set_ylabel("Midplane fields (abs())") + #ax = ax_plasma[6] + #ax.plot( + # self.g["fluxSurfaces"]["midplane"]["R"], + # np.abs(self.g["fluxSurfaces"]["midplane"]["Bt"]), + # "-s", + # c=color, + # lw=2, + # markersize=3, + # label=label + "geqdsk Bt", + #) + #ax.plot( + # self.g["fluxSurfaces"]["midplane"]["R"], + # np.abs(self.g["fluxSurfaces"]["midplane"]["Bp"]), + # "--o", + # c=color, + # lw=2, + # markersize=3, + # label=label + "geqdsk Bp", + #) + #ax.set_xlabel("R (m) midplane") + #ax.set_ylabel("Midplane fields (abs())") if legendYN: ax.legend() @@ -812,9 +814,11 @@ def plotGeometry(self, axs=None, color="r"): fig, axs = plt.subplots(ncols=4) ax = axs[0] + x = self.rho_tor + y = self.cx_area ax.plot( - self.g["AuxQuantities"]["RHO"], - self.g["fluxSurfaces"]["geo"]["cxArea"], + x, + y, "-", c=color, lw=2, @@ -825,9 +829,11 @@ def plotGeometry(self, axs=None, color="r"): ax.set_ylabel("CX Area ($m^2$)") ax = axs[1] + x = self.rho_tor + y = np.zeros(x.shape) ax.plot( - self.g["AuxQuantities"]["RHO"], - self.g["fluxSurfaces"]["geo"]["surfArea"], + x, # self.rho_tor, + y, # self.g["fluxSurfaces"]["geo"]["surfArea"], "-", c=color, lw=2, @@ -838,9 +844,11 @@ def plotGeometry(self, axs=None, color="r"): ax.set_ylabel("Surface Area ($m^2$)") ax = axs[2] + x = self.rho_tor + y = np.zeros(x.shape) ax.plot( - self.g["AuxQuantities"]["RHO"], - self.g["fluxSurfaces"]["geo"]["vol"], + x, # self.rho_tor, + y, # self.g["fluxSurfaces"]["geo"]["vol"], "-", c=color, lw=2, @@ -863,13 +871,13 @@ def plotFluxSurfaces( plot1=True, label = '', ): - x = self.g["AuxQuantities"]["R"] - y = self.g["AuxQuantities"]["Z"] + x = self.g.derived["R"] + y = self.g.derived["Z"] if rhoPol: - z = self.g["AuxQuantities"]["RHOpRZ"] + z = self.g.derived["rhorz_pol"] else: - z = self.g["AuxQuantities"]["RHORZ"] + z = self.g.derived["rhorz_tor"] if not sqrt: z = z**2 @@ -905,10 +913,10 @@ def plot2Dquantity( if ax is None: fig, ax = plt.subplots() - x = self.g["AuxQuantities"]["R"] - y = self.g["AuxQuantities"]["Z"] + x = self.g.derived["R"] + y = self.g.derived["Z"] if not direct: - z = self.g["AuxQuantities"][var] * factor + z = self.g.derived[var] * factor else: z = var diff --git a/src/mitim_tools/misc_tools/CONFIGread.py b/src/mitim_tools/misc_tools/CONFIGread.py index b759c969..e59f64ba 100644 --- a/src/mitim_tools/misc_tools/CONFIGread.py +++ b/src/mitim_tools/misc_tools/CONFIGread.py @@ -125,6 +125,7 @@ def machineSettings( "folderWork": scratch, "slurm": {}, "cores_per_node": s[machine].get("cores_per_node", None), + "gpus_per_node": s[machine].get("gpus_per_node", 0), "isTunnelSameMachine": ( bool(s[machine]["isTunnelSameMachine"]) if "isTunnelSameMachine" in s[machine] diff --git a/src/mitim_tools/misc_tools/FARMINGtools.py b/src/mitim_tools/misc_tools/FARMINGtools.py index a21c9fb9..93265fc6 100644 --- a/src/mitim_tools/misc_tools/FARMINGtools.py +++ b/src/mitim_tools/misc_tools/FARMINGtools.py @@ -2,6 +2,7 @@ Set of tools to farm out simulations to run in either remote clusters or locally, serially or parallel """ +from math import log from tqdm import tqdm import os import shutil @@ -56,18 +57,24 @@ """ class mitim_job: - def __init__(self, folder_local): + def __init__( + self, + folder_local, + log_simulation_file = None # If not None, log information of how the simulation went to this file + ): + if not isinstance(folder_local, (str, Path)): raise TypeError('MITIM job folder must be a valid string or pathlib.Path object to a local directory') self.folder_local = IOtools.expandPath(folder_local) self.jobid = None + self.log_simulation_file = log_simulation_file def define_machine( self, code, nameScratch, launchSlurm=True, - slurm_settings={}, + slurm_settings=None, ): # Separated in case I need to quickly grab the machine settings self.define_machine_quick(code, nameScratch, slurm_settings=slurm_settings) @@ -81,7 +88,7 @@ def define_machine( # Print Slurm info if self.launchSlurm: print("\t- Slurm Settings:") - print("\t\t- Job settings:") + print("\t\t- Job settings (different than MITIM default):") for key in self.slurm_settings: if self.slurm_settings[key] is not None: print(f"\t\t\t- {key}: {self.slurm_settings[key]}") @@ -91,20 +98,13 @@ def define_machine( for key in self.machineSettings["slurm"]: print(f'\t\t\t- {key}: {self.machineSettings["slurm"][key]}') - def define_machine_quick(self, code, nameScratch, slurm_settings={}): - self.slurm_settings = slurm_settings + def define_machine_quick(self, code, nameScratch, slurm_settings=None): - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # Defaults for slurm + self.slurm_settings = slurm_settings if slurm_settings is not None else {} + + # In case there's no name, I need it self.slurm_settings.setdefault("name", "mitim_job") - self.slurm_settings.setdefault("minutes", 10) - self.slurm_settings.setdefault("cpuspertask", 1) - self.slurm_settings.setdefault("ntasks", 1) - self.slurm_settings.setdefault("nodes", None) - self.slurm_settings.setdefault("job_array", None) - self.slurm_settings.setdefault("mem", None) - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - + self.machineSettings = CONFIGread.machineSettings( code=code, nameScratch=nameScratch, @@ -141,6 +141,10 @@ def prep( # Pass to class self.command = command + + if not isinstance(self.command, list): + self.command = [self.command] + self.input_files = input_files if isinstance(input_files, list) else [] self.input_folders = input_folders if isinstance(input_folders, list) else [] self.output_files = output_files if isinstance(output_files, list) else [] @@ -156,35 +160,43 @@ def run( waitYN=True, timeoutSecs=1e6, removeScratchFolders=True, + removeScratchFolders_goingIn=None, check_if_files_received=True, attempts_execution=1, + helper_lostconnection=False, ): + + ''' + if helper_lostconnection is True, it means that the connection to the remote machine was lost, but the files are there, + so I just want to retrieve them. In that case, I do not remove the scratch folder going in, and I do not execute the commands. + ''' + + removeScratchFolders_goingOut = removeScratchFolders + if removeScratchFolders_goingIn is None: + removeScratchFolders_goingIn = removeScratchFolders if not waitYN: - removeScratchFolders = False + removeScratchFolders_goingOut = False # Always start by going to the folder (inside sbatch file) - command_str_mod = [f"cd {self.folderExecution}", f"{self.command}"] + command_str_mod = [f"cd {self.folderExecution}"] + for command in self.command: + command_str_mod += [command] + # ****** Prepare SLURM job ***************************** comm, fileSBATCH, fileSHELL = create_slurm_execution_files( command_str_mod, self.folderExecution, modules_remote=self.machineSettings["modules"], - job_array=self.slurm_settings["job_array"] if "job_array" in self.slurm_settings else None, folder_local=self.folder_local, shellPreCommands=self.shellPreCommands, shellPostCommands=self.shellPostCommands, - nameJob=self.slurm_settings["name"] if "name" in self.slurm_settings else "test", - minutes=self.slurm_settings["minutes"] if "minutes" in self.slurm_settings else 5, - nodes=self.slurm_settings["nodes"] if "nodes" in self.slurm_settings else None, - ntasks=self.slurm_settings["ntasks"] if "ntasks" in self.slurm_settings else 1, - cpuspertask=self.slurm_settings["cpuspertask"] if "cpuspertask" in self.slurm_settings else 4, - slurm=self.machineSettings["slurm"], - memory_req_by_job=self.slurm_settings["mem"] if "mem" in self.slurm_settings else None, - launchSlurm=self.launchSlurm, label_log_files=self.label_log_files, wait_until_sbatch=waitYN, + slurm=self.machineSettings["slurm"], + launchSlurm=self.launchSlurm, + slurm_settings=self.slurm_settings, ) # ****************************************************** @@ -196,21 +208,19 @@ def run( self.output_files = curateOutFiles(self.output_files) # Relative paths - self.input_files = [ - path.relative_to(self.folder_local) for path in self.input_files - ] - self.input_folders = [ - path.relative_to(self.folder_local) for path in self.input_folders - ] + self.input_files = [IOtools.expandPath(path).relative_to(self.folder_local) for path in self.input_files] + self.input_folders = [IOtools.expandPath(path).relative_to(self.folder_local) for path in self.input_folders] # Process self.full_process( comm, - removeScratchFolders=removeScratchFolders, + removeScratchFolders_goingIn=removeScratchFolders_goingIn and (not helper_lostconnection), + removeScratchFolders_goingOut=removeScratchFolders_goingOut, timeoutSecs=timeoutSecs, check_if_files_received=waitYN and check_if_files_received, check_files_in_folder=self.check_files_in_folder, attempts_execution=attempts_execution, + execute_flag=not helper_lostconnection ) # Get jobid @@ -234,15 +244,22 @@ def full_process( self, comm, timeoutSecs=1e6, - removeScratchFolders=True, + removeScratchFolders_goingIn=True, + removeScratchFolders_goingOut=True, check_if_files_received=True, check_files_in_folder={}, attempts_execution = 1, + execute_flag=True, ): """ My philosophy is to always wait for the execution of all commands. If I need to not wait, that's handled by a slurm submission without --wait, but I still want to finish the sbatch launch process. + + Notes: + - If execute_flag is False, the commands will not be executed. This is useful, + together with removeScratchFolders_goingIn=False if the results exist in the remote + but the connection failed with your local machine. You can then just retrieve the results. """ wait_for_all_commands = True @@ -253,7 +270,7 @@ def full_process( self.connect(log_file=self.folder_local / "paramiko.log") # ~~~~~~ Prepare scratch folder - if removeScratchFolders: + if removeScratchFolders_goingIn: self.remove_scratch_folder() self.create_scratch_folder() @@ -264,12 +281,18 @@ def full_process( execution_counter = 0 while execution_counter < attempts_execution: - output, error = self.execute( - comm, - wait_for_all_commands=wait_for_all_commands, - printYN=True, - timeoutSecs=timeoutSecs if timeoutSecs < 1e6 else None, - ) + + if execute_flag: + output, error = self.execute( + comm, + wait_for_all_commands=wait_for_all_commands, + printYN=True, + timeoutSecs=timeoutSecs if timeoutSecs < 1e6 else None, + log_file=self.log_simulation_file + ) + else: + output, error = b"", b"" + print("\t* Not executing commands, just retrieving files (execute_flag=False)", typeMsg="i") # ~~~~~~ Retrieve received = self.retrieve( @@ -286,14 +309,14 @@ def full_process( # ~~~~~~ Remove scratch folder if received: - if wait_for_all_commands and removeScratchFolders: + if wait_for_all_commands and removeScratchFolders_goingOut: self.remove_scratch_folder() else: # If not received, write output and error to files self._write_debugging_files(output, error) - cont = print("\t* Not all expected files received, not removing scratch folder (mitim_farming.out and mitim_farming.err written)",typeMsg="q") + cont = print(f"\t* Not all expected files received, not removing scratch folder (mitim_farming.out and mitim_farming.err written in '{self.folder_local / 'mitim_farming.err'}')",typeMsg="q") if not cont: print("[MITIM] Stopped with embed(), you can look at output and error",typeMsg="w",) embed() @@ -362,7 +385,7 @@ def define_server(self, disabled_algorithms=None): self.target_host, username=self.target_user, disabled_algorithms=disabled_algorithms, - key_filename=self.key_filename, + key_filename=str(self.key_filename) if self.key_filename is not None else None, port=self.port, sock=self.sock, allow_agent=True, @@ -374,7 +397,7 @@ def define_server(self, disabled_algorithms=None): self.target_host, username=self.target_user, disabled_algorithms=disabled_algorithms, - key_filename=self.key_filename, + key_filename=str(self.key_filename) if self.key_filename is not None else None, port=self.port, sock=self.sock, allow_agent=True, @@ -466,9 +489,7 @@ def create_scratch_folder(self): return output, error def send(self): - print( - f'\t* Sending files{" to remote server" if self.ssh is not None else ""}:' - ) + print(f'\t* Sending files{" to remote server" if self.ssh is not None else ""}:') # Create a tarball of the local directory print("\t\t- Tarballing (local side)") @@ -515,12 +536,87 @@ def send(self): print("\t\t- Removing tarball (remote side)") self.execute(f"rm {self.folderExecution}/mitim_send.tar.gz") - def execute(self, command_str, **kwargs): + def execute(self, command_str, log_file=None, **kwargs): if self.ssh is not None: - return self.execute_remote(command_str, **kwargs) + output, error = self.execute_remote(command_str, **kwargs) else: - return self.execute_local(command_str, **kwargs) + output, error = self.execute_local(command_str, **kwargs) + + # Write information file about where and how the run took place + if log_file is not None: + self.write_information_file(command_str, output, error, file=log_file) + + return output, error + + def write_information_file(self, command, output, error, file = 'mitim_simulation.log'): + """ + Write a log file with information about where the simulation happened (local/remote), + user, host, ssh settings if remote, and head/tail of output and error. + """ + import getpass + import platform + from datetime import datetime + + # Prepare context info + now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + is_remote = self.ssh is not None + lines = [] + lines.append("==================== MITIM Simulation Execution Log ====================\n") + lines.append(f"Date (finished): {now}") + lines.append(f"Execution Type: {'Remote' if is_remote else 'Local'}\n") + lines.append("--- Execution Details ---") + if is_remote: + lines.append(f"SSH User: {getattr(self, 'target_user', 'N/A')}") + lines.append(f"SSH Host: {getattr(self, 'target_host', 'N/A')}") + lines.append(f"Remote Folder: {self.folderExecution}") + else: + lines.append(f"User: {getpass.getuser()}") + lines.append(f"Host: {platform.node()}") + lines.append(f"Folder: {self.folderExecution}") + lines.append("") + + def get_head_tail(data, n=20): + try: + text = data.decode("utf-8", errors="replace") + except Exception: + text = str(data) + lines_ = text.splitlines() + head = lines_[:n] + tail = lines_[-n:] if len(lines_) > n else [] + return head, tail + + out_head, out_tail = get_head_tail(output) + err_head, err_tail = get_head_tail(error) + + lines.append("--- Output (Head) ---") + lines.extend(out_head if out_head else [""]) + lines.append("") + lines.append("--- Output (Tail) ---") + lines.extend(out_tail if out_tail else [""]) + lines.append("") + lines.append("--- Error (Head) ---") + lines.extend(err_head if err_head else [""]) + lines.append("") + lines.append("--- Error (Tail) ---") + lines.extend(err_tail if err_tail else [""]) + lines.append("") + lines.append(f"--- Command ---") + lines.append(f"{command}") + lines.append(f"\n--- Input Files ---") + lines.extend([str(file) for file in self.input_files]) + lines.append(f"\n--- Input Folders ---") + lines.extend([str(folder) for folder in self.input_folders]) + lines.append(f"\n--- Output Files ---") + lines.extend([str(file) for file in self.output_files]) + lines.append(f"\n--- Output Folders ---") + lines.extend([str(folder) for folder in self.output_folders]) + lines.append("\n=======================================================================\n") + + # Write to file (file can be Path or str) + file_path = file if isinstance(file, (str, Path)) else str(file) + with open(file_path, "w", encoding="utf-8") as f: + f.write("\n".join(lines)) def execute_remote( self, @@ -563,14 +659,10 @@ def execute_local(self, command_str, printYN=False, timeoutSecs=None, **kwargs): return output, error def retrieve(self, check_if_files_received=True, check_files_in_folder={}): - print( - f'\t* Retrieving files{" from remote server" if self.ssh is not None else ""}:' - ) + print(f'\t* Retrieving files{" from remote server" if self.ssh is not None else ""}:') # Create a tarball of the output files & folders on the remote machine - print( - "\t\t- Removing local output files & folders that potentially exist from previous runs" - ) + print("\t\t- Removing local output files & folders that potentially exist from previous runs") for file in self.output_files: (self.folder_local / file).unlink(missing_ok=True) for folder in self.output_folders: @@ -627,7 +719,7 @@ def retrieve(self, check_if_files_received=True, check_files_in_folder={}): if received: print("\t\t- All correct", typeMsg="i") else: - print("\t* Not all received, trying once again", typeMsg="w") + print("\t* Not all received, trying once again", typeMsg="i") time.sleep(10) _ = self.retrieve(check_if_files_received=False) received = self.check_all_received(check_files_in_folder=check_files_in_folder) @@ -724,9 +816,7 @@ def interpret_status(self, file_output = "slurm_output.dat"): else: self.infoSLURM = {} for i in range(len(output_squeue[0].split())): - self.infoSLURM[output_squeue[0].split()[i]] = output_squeue[1].split()[ - i - ] + self.infoSLURM[output_squeue[0].split()[i]] = output_squeue[1].split()[i] self.jobid_found = self.infoSLURM["JOBID"] @@ -736,9 +826,7 @@ def interpret_status(self, file_output = "slurm_output.dat"): if self.infoSLURM["STATE"] == "PENDING": self.status = 0 - elif (self.infoSLURM["STATE"] == "RUNNING") or ( - self.infoSLURM["STATE"] == "COMPLETING" - ): + elif (self.infoSLURM["STATE"] == "RUNNING") or (self.infoSLURM["STATE"] == "COMPLETING"): self.status = 1 elif self.infoSLURM["STATE"] == "NOT FOUND": self.status = 2 @@ -981,39 +1069,46 @@ def SerialProcedure(Function, Params, howmany): def create_slurm_execution_files( command, - folder_remote, + folderExecution, modules_remote=None, - slurm={}, folder_local=None, shellPreCommands=None, shellPostCommands=None, - launchSlurm=True, - nameJob="test", - minutes=5, - ntasks=1, - cpuspertask=4, - memory_req_by_job=None, - job_array=None, - nodes=None, label_log_files="", wait_until_sbatch=True, + slurm={}, + launchSlurm=True, + slurm_settings = None ): - if isinstance(command, str): - command = [command] - - if shellPostCommands is None: - shellPostCommands = [] - - if shellPreCommands is None: - shellPreCommands = [] - - folderExecution = folder_remote + fileSBATCH = folder_local / f"mitim_bash{label_log_files}.src" fileSHELL = folder_local / f"mitim_shell_executor{label_log_files}.sh" fileSBATCH_remote = f"{folderExecution}/mitim_bash{label_log_files}.src" + + # --------------------------------------------------- + # slurm_settings indicate the job resource allocation + # --------------------------------------------------- - minutes = int(minutes) + if slurm_settings is None: + slurm_settings = {} + nameJob = slurm_settings.setdefault("name", "mitim_job") + minutes = int(slurm_settings.setdefault("minutes", 10)) + memory_req_by_job = slurm_settings.setdefault("memory_req_by_job", None) + + nodes = slurm_settings.setdefault("nodes", None) + ntasks = slurm_settings.setdefault("ntasks", None) + cpuspertask = slurm_settings.setdefault("cpuspertask", None) + ntaskspernode = slurm_settings.setdefault("ntaskspernode", None) + gpuspertask = slurm_settings.setdefault("gpuspertask", None) + + job_array = slurm_settings.setdefault("job_array", None) + job_array_limit = slurm_settings.setdefault("job_array_limit", None) + + # --------------------------------------------------- + # slurm indicate the machine specifications as given by the config instead of individual job + # --------------------------------------------------- + partition = slurm.setdefault("partition", None) email = slurm.setdefault("email", None) exclude = slurm.setdefault("exclude", None) @@ -1021,7 +1116,7 @@ def create_slurm_execution_files( constraint = slurm.setdefault("constraint", None) memory_req_by_config = slurm.setdefault("mem", None) request_exclusive_node = slurm.setdefault("exclusive", False) - + if memory_req_by_job == 0 : print("\t\t- Entire node memory requested by job, overwriting memory requested by config file", typeMsg="i") memory_req = memory_req_by_job @@ -1032,6 +1127,13 @@ def create_slurm_execution_files( if memory_req_by_config is not None: print(f"\t\t- Memory requested by config file ({memory_req_by_config})", typeMsg="i") memory_req = memory_req_by_config + + if minutes >= 60: + hours = minutes // 60 + minutes = minutes - hours * 60 + time_com = f"{str(hours).zfill(2)}:{str(minutes).zfill(2)}:00" + else: + time_com = f"{str(minutes).zfill(2)}:00" """ ******************************************************************************************** @@ -1039,84 +1141,67 @@ def create_slurm_execution_files( ******************************************************************************************** """ - if minutes >= 60: - hours = minutes // 60 - minutes = minutes - hours * 60 - time_com = f"{str(hours).zfill(2)}:{str(minutes).zfill(2)}:00" - else: - time_com = f"{str(minutes).zfill(2)}:00" + command = [command] if isinstance(command, str) else command + shellPreCommands = [] if shellPreCommands is None else shellPreCommands + shellPostCommands = [] if shellPostCommands is None else shellPostCommands + # ~~~~ Construct SLURM header ~~~~~~~~~~~~~~~ commandSBATCH = [] - # ******* Basics commandSBATCH.append("#!/usr/bin/env bash") commandSBATCH.append(f"#SBATCH --job-name {nameJob}") - commandSBATCH.append( - f"#SBATCH --output {folderExecution}/slurm_output{label_log_files}.dat" - ) - commandSBATCH.append( - f"#SBATCH --error {folderExecution}/slurm_error{label_log_files}.dat" - ) + commandSBATCH.append(f"#SBATCH --output {folderExecution}/slurm_output{label_log_files}.dat") + commandSBATCH.append(f"#SBATCH --error {folderExecution}/slurm_error{label_log_files}.dat") + commandSBATCH.append(f"#SBATCH --time {time_com}") if email is not None: commandSBATCH.append("#SBATCH --mail-user=" + email) - - # ******* Partition / Billing - commandSBATCH.append(f"#SBATCH --partition {partition}") - + if partition is not None: + commandSBATCH.append(f"#SBATCH --partition {partition}") if account is not None: commandSBATCH.append(f"#SBATCH --account {account}") if constraint is not None: commandSBATCH.append(f"#SBATCH --constraint {constraint}") - if memory_req is not None: commandSBATCH.append(f"#SBATCH --mem {memory_req}") - - commandSBATCH.append(f"#SBATCH --time {time_com}") - if job_array is not None: - commandSBATCH.append(f"#SBATCH --array={job_array}") + commandSBATCH.append(f"#SBATCH --array={job_array}{f'%{job_array_limit} ' if job_array_limit is not None else ''}") elif request_exclusive_node: commandSBATCH.append("#SBATCH --exclusive") - - # ******* CPU setup if nodes is not None: commandSBATCH.append(f"#SBATCH --nodes {nodes}") - commandSBATCH.append(f"#SBATCH --ntasks {ntasks}") - commandSBATCH.append(f"#SBATCH --cpus-per-task {cpuspertask}") - + if ntasks is not None: + commandSBATCH.append(f"#SBATCH --ntasks {ntasks}") + if ntaskspernode is not None: + commandSBATCH.append(f"#SBATCH --ntasks-per-node {ntaskspernode}") + if cpuspertask is not None: + commandSBATCH.append(f"#SBATCH --cpus-per-task {cpuspertask}") + if gpuspertask is not None: + commandSBATCH.append(f"#SBATCH --gpus-per-task {gpuspertask}") if exclude is not None: commandSBATCH.append(f"#SBATCH --exclude={exclude}") commandSBATCH.append("#SBATCH --profile=all") - - - commandSBATCH.append("export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK") + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - # ******* Commands + # ~~~~ Commands ~~~~~~~~~~~~~~~ commandSBATCH.append("") - commandSBATCH.append( - 'echo "MITIM: Submitting SLURM job $SLURM_JOBID in $HOSTNAME (host: $SLURM_SUBMIT_HOST)"' - ) - commandSBATCH.append( - 'echo "MITIM: Nodes have $SLURM_CPUS_ON_NODE cores and $SLURM_JOB_NUM_NODES node(s) were allocated for this job"' - ) - commandSBATCH.append( - 'echo "MITIM: Each of the $SLURM_NTASKS tasks allocated will run with $SLURM_CPUS_PER_TASK cores, allocating $SRUN_CPUS_PER_TASK CPUs per srun"' - ) - commandSBATCH.append( - 'echo "***********************************************************************************************"' - ) - commandSBATCH.append( - 'echo ""' - ) + commandSBATCH.append("export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK") + commandSBATCH.append('echo "MITIM: Submitting SLURM job $SLURM_JOBID in $HOSTNAME (host: $SLURM_SUBMIT_HOST)"') + commandSBATCH.append('echo "MITIM: Nodes have $SLURM_CPUS_ON_NODE cores and $SLURM_JOB_NUM_NODES node(s) were allocated for this job"') + commandSBATCH.append('echo "MITIM: Each of the $SLURM_NTASKS tasks allocated will run with $SLURM_CPUS_PER_TASK cores, allocating $SRUN_CPUS_PER_TASK CPUs per srun"') + commandSBATCH.append('echo "***********************************************************************************************"') + commandSBATCH.append('echo ""') commandSBATCH.append("") - full_command = [modules_remote] if (modules_remote is not None) else [] + # If modules, add them, but also make sure I expand the potential aliases that they may have! + full_command = ["shopt -s expand_aliases",modules_remote] if (modules_remote is not None) else [] + full_command.extend(command) for c in full_command: commandSBATCH.append(c) commandSBATCH.append("") + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ wait_txt = " --wait" if wait_until_sbatch else "" if launchSlurm: @@ -1135,10 +1220,13 @@ def create_slurm_execution_files( """ commandSHELL = ["#!/usr/bin/env bash"] - commandSHELL.extend(copy.deepcopy(shellPreCommands)) + commandSHELL.append("") if modules_remote is not None: commandSHELL.append(modules_remote) + + commandSHELL.extend(copy.deepcopy(shellPreCommands)) + commandSHELL.append(f"{launch} {fileSBATCH_remote}") commandSHELL.append("") for i in range(len(shellPostCommands)): @@ -1154,7 +1242,7 @@ def create_slurm_execution_files( ******************************************************************************************** """ - comm = f"cd {folder_remote} && chmod +x {fileSBATCH_remote} && chmod +x mitim_shell_executor{label_log_files}.sh && ./mitim_shell_executor{label_log_files}.sh > mitim.out" + comm = f"cd {folderExecution} && chmod +x {fileSBATCH_remote} && chmod +x mitim_shell_executor{label_log_files}.sh && ./mitim_shell_executor{label_log_files}.sh > mitim.out" return comm, fileSBATCH.resolve(), fileSHELL.resolve() @@ -1236,7 +1324,15 @@ def perform_quick_remote_execution( job.run(check_if_files_received=check_if_files_received) -def retrieve_files_from_remote(folder_local, machine, files_remote = [], folders_remote = [], purge_tmp_files = False, ensure_files = True): +def retrieve_files_from_remote( + folder_local, + machine, + files_remote = [], + folders_remote = [], + only_folder_structure_with_files = None, # If not None, only the folder structure is retrieved, with files in the list + purge_tmp_files = False, + ensure_files = True + ): ''' Quick routine for file retrieval from remote machine (assumes remote machine is linux) @@ -1263,7 +1359,19 @@ def retrieve_files_from_remote(folder_local, machine, files_remote = [], folders output_files.append(file0) for folder in folders_remote: folder0 = f'{IOtools.expandPath(folder)}'.split('/')[-1] - command += f'cp -r {folder} {machineSettings["folderWork"]}/{folder0}\n' + + folder_source = folder + folder_destination = f'{machineSettings["folderWork"]}/{folder0}' + if only_folder_structure_with_files is None: + # Normal full copy + command += f'cp -r {folder_source} {folder_destination}\n' + else: + retrieve_files = '' + for file in only_folder_structure_with_files: + retrieve_files += f'-f"+ {file}" ' + # Only copy the folder structure with a few files + command += f'rsync -av -f"+ */" {retrieve_files}-f"- *" {folder_source}/ {folder_destination}/\n' + output_folders.append(folder0) # ------------------------------------------------ diff --git a/src/mitim_tools/misc_tools/GRAPHICStools.py b/src/mitim_tools/misc_tools/GRAPHICStools.py index 04be8a6c..ebc52a16 100644 --- a/src/mitim_tools/misc_tools/GRAPHICStools.py +++ b/src/mitim_tools/misc_tools/GRAPHICStools.py @@ -666,6 +666,14 @@ def fillGraph( ms=None, ls="-", ): + + if IOtools.isnum(y): + y = np.array([y] * len(x)) + if IOtools.isnum(y_down): + y_down = np.array([y_down] * len(x)) + if IOtools.isnum(y_up): + y_up = np.array([y_up] * len(x)) + if y_up is not None: l = ax.fill_between(x, y, y_up, facecolor=color, alpha=alpha, label=label) if y_down is not None: @@ -690,6 +698,19 @@ def fillGraph( return l +def adjust_subplots(fig = None, axs=None, vertical=0.4, horizontal=0.3): + + if fig is None and axs is None: + raise ValueError("Either fig or axs must be provided") + + if axs is not None: + fig = next(iter(axs.values())).get_figure() + + fig.subplots_adjust( + hspace=vertical, # vertical spacing between rows + wspace=horizontal # horizontal spacing between columns + ) + def listColors(): col = [ @@ -1072,44 +1093,6 @@ def colorTableFade(num, startcolor="b", endcolor="r", alphalims=[1.0, 1.0]): return cn, cpick -def createAnimation( - fig, FunctionToAnimate, framesCalc, FramesPerSecond, BITrate, MovieFile, DPIs -): - plt.rcParams["animation.ffmpeg_path"] = "/usr/local/bin/ffmpeg" - if "mfews" in socket.gethostname(): - plt.rcParams["animation.ffmpeg_path"] = "/usr/bin/ffmpeg" - ani = animation.FuncAnimation( - fig, FunctionToAnimate, frames=framesCalc, repeat=True - ) - - Writer = animation.writers["ffmpeg"] - writer = Writer(fps=FramesPerSecond, metadata=dict(artist="PRF"), bitrate=BITrate) - ani.save(writer=writer, filename=MovieFile, dpi=DPIs) - - -def animageFunction( - plottingFunction, - axs, - fig, - MovieFile, - HowManyFrames, - framePS=50, - BITrate=1200, - DPIs=150, -): - if type(axs) not in [np.ndarray, list]: - axs = [axs] - - def animate(i): - for j in range(len(axs)): - axs[j].clear() - plottingFunction(axs, i) - print(f"\t~~ Frame {i + 1}/{HowManyFrames}") - - print(" --> Creating animation") - createAnimation(fig, animate, HowManyFrames, framePS, BITrate, MovieFile, DPIs) - - def reduceVariable(var, howmanytimes, t=None, trange=[0, 100]): if t is not None: var = var[np.argmin(np.abs(t - trange[0])) : np.argmin(np.abs(t - trange[1]))] @@ -1493,4 +1476,43 @@ def PSFCcolors(): colors["Heated"] = "#F25757" colors["Orange Edge"] = "#FFA630" - return colors \ No newline at end of file + return colors + + +''' +******************************************************************** +Capabilities to create animations using matplotlib's FuncAnimation. +******************************************************************** +''' + +def animateFunction( + plottingFunction, # Function that plots the data, must receive axs and frame index as arguments + axs, # List of axes to plot on + fig, # Figure object to create the animation in + MovieFile, # Output filename for the movie + HowManyFrames: int, # Total number of frames in the animation + framePS: int = 50, # Frames per second for the animation + BITrate: int = 1200, # Bitrate for the output video + DPIs: int = 150, # Dots per inch for the output video + ffmpeg_path = None # Path to ffmpeg executable, if None it uses the default. e.g. /usr/local/bin/ffmpeg, /usr/bin/ffmpeg +): + if type(axs) not in [np.ndarray, list]: + axs = [axs] + + def animate(i): + for j in range(len(axs)): + axs[j].clear() + plottingFunction(axs, i) + print(f"\t~~ Frame {i + 1}/{HowManyFrames}") + + print(" --> Creating animation") + + if ffmpeg_path is not None: + plt.rcParams["animation.ffmpeg_path"] = ffmpeg_path + + ani = animation.FuncAnimation(fig, animate, frames=HowManyFrames, repeat=True) + + Writer = animation.writers["ffmpeg"] + writer = Writer(fps=framePS, metadata=dict(artist="PRF"), bitrate=BITrate) + ani.save(writer=writer, filename=MovieFile, dpi=DPIs) + diff --git a/src/mitim_tools/misc_tools/IOtools.py b/src/mitim_tools/misc_tools/IOtools.py index ae1b5a6c..c431b689 100644 --- a/src/mitim_tools/misc_tools/IOtools.py +++ b/src/mitim_tools/misc_tools/IOtools.py @@ -1,7 +1,8 @@ import os +import re import shutil import psutil -import copy +from typing import Callable import dill as pickle_dill import pandas as pd from mitim_tools.misc_tools import GRAPHICStools @@ -19,6 +20,10 @@ import json import functools import hashlib +import io +from contextlib import redirect_stdout +import yaml, importlib +from typing import Any, Mapping from collections import OrderedDict from pathlib import Path import platform @@ -59,46 +64,272 @@ def _get_time(self): self.timeDiff = getTimeDifference(self.timeBeginning, niceText=False) self.profiler.dump_stats(self.file) - print( - f'Script took {createTimeTXT(self.timeDiff)}, profiler stats dumped to {self.file} (open with "python3 -m snakeviz {self.file}")' - ) - -class timer(object): - - def __init__(self, name="\t* Script", name_timer = '\t* Start time: '): - self.name = name - self.name_timer = name_timer + print(f'Script took {createTimeTXT(self.timeDiff)}, profiler stats dumped to {self.file} (open with "python3 -m snakeviz {self.file}")') +class timer: + ''' + Context manager to time a script or function execution. + ''' + # ──────────────────────────────────────────────────────────────────── + def __init__(self, + name: str = "Script", # Name of the script for printing, visualization + print_at_entering: str | None = None, # Prefix printed right before the timer starts + log_file: Path | None = None): # File to log the timing information in JSON format + self.name = name + self.print_at_entering = print_at_entering + self.log_file = log_file + + # ──────────────────────────────────────────────────────────────────── def __enter__(self): - self.timeBeginning = datetime.datetime.now() - if self.name_timer is not None: print(f'{self.name_timer}{self.timeBeginning.strftime("%Y-%m-%d %H:%M:%S")}') - return self - - def __exit__(self, *args): - self._get_time() + # high-resolution timer + wall-clock stamp + + self.t0_wall = time.perf_counter() + self.t0 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - def _get_time(self): + if self.print_at_entering: + print(f'{self.print_at_entering}{self.t0}') + return self - self.timeDiff = getTimeDifference(self.timeBeginning, niceText=False) + # ──────────────────────────────────────────────────────────────────── + def __exit__(self, exc_type, exc, tb): + self._finish() + return False # propagate any exception - print(f'{self.name} took {createTimeTXT(self.timeDiff)}') + # ──────────────────────────────────────────────────────────────────── + def _finish(self): + + dt = time.perf_counter() - self.t0_wall + t1 = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + print(f'\t\t* {self.name} took {createTimeTXT(dt)}') + + if self.log_file: + record = { + "script" : self.name, + "t_start" : self.t0, + "ts_end" : t1, + "duration_s" : dt, + } + with Path(self.log_file).open("a", buffering=1) as f: + f.write(json.dumps(record) + "\n") # Decorator to time functions -def mitim_timer(name="\t* Script",name_timer = '\t* Start time: '): +def mitim_timer( + name: str | None = None, + print_at_entering: str | None = None, + log_file: str | Path | Callable[[object], str | Path] | None = None + ): + """ + Decorator that times a function / method and optionally appends one JSON + line to *log_file* after the call finishes. + + Parameters + ---------- + name : str | None + Human-readable beat name. If None, defaults to the wrapped function's __name__. + print_at_entering : str + Prefix printed right before the timer starts + log_file : str | Path | callable(self) -> str | Path | None + • str / Path → literal path written every time the beat finishes + • callable → called **at call time** with the bound instance + (`self`) and must return the path to use + • None → no file is written, only console timing is printed + + Notes + ----- + *When* the wrapper runs it has access to the bound instance (`self`), so + callable argument values let you access self variables. + """ + def decorator_timer(func): + script_name = name or func.__name__ + @functools.wraps(func) def wrapper_timer(*args, **kwargs): - with timer(name,name_timer=name_timer): + # -------------------- resolve name -------------------------- + if callable(script_name): + # assume first positional arg is `self` for bound methods + instance = args[0] if args else None + chosen_script_name = script_name(instance) + else: + chosen_script_name = script_name + # --------------------------------------------------------------- + # -------------------- resolve log_file -------------------------- + if callable(log_file): + # assume first positional arg is `self` for bound methods + instance = args[0] if args else None + chosen_log_file = log_file(instance) + else: + chosen_log_file = log_file + # --------------------------------------------------------------- + + # Your original context-manager timer class: + with timer(chosen_script_name, + print_at_entering=print_at_entering, + log_file=chosen_log_file): return func(*args, **kwargs) + return wrapper_timer + return decorator_timer +# --------------------------------------------------------------------------- +def plot_timings(jsonl_path, axs = None, unit: str = "min", color = "b", label= '', log=False): + """ + Plot cumulative durations from a .jsonl timing ledger written by @mitim_timer, + with vertical lines when the beat number changes. + + Parameters + ---------- + jsonl_path : str | Path + File with one JSON record per line. + unit : {"s", "min", "h"} + Unit for the y-axis. + """ + multiplier = {"s": 1, "min": 1 / 60, "h": 1 / 3600}[unit] + + scripts, script_time, cumulative, beat_nums, script_restarts = [], [], [], [], [] + running = 0.0 + beat_pat = re.compile(r"Beat\s*#\s*(\d+)") + + # ── read the file ─────────────────────────────────────────────────────── + with Path(jsonl_path).expanduser().open() as f: + for line in f: + if not line.strip(): + continue + rec = json.loads(line) + + if rec["script"] not in scripts: + + scripts.append(rec["script"]) + script_time.append(rec["duration_s"] * multiplier) + running += rec["duration_s"]* multiplier + cumulative.append(running) + + m = beat_pat.search(rec["script"]) + beat_nums.append(int(m.group(1)) if m else None) + + script_restarts.append(0.0) + + else: + # If the script is already in the list, it means it was restarted + idx = scripts.index(rec["script"]) + script_restarts[idx] += rec["duration_s"] * multiplier + + cumulative[-1] += script_restarts[idx] + running += script_restarts[idx] + + + if not scripts: + raise ValueError(f"No records found in {jsonl_path}") + + beat_nums = [0] + beat_nums # Start with zero beat + scripts = ['ini'] + scripts # Add initial beat + script_time = [0.0] + script_time # Start with zero time + cumulative = [0.0] + cumulative # Start with zero time + script_restarts = [0.0] + script_restarts # Start with zero restarts + + # ── plot ──────────────────────────────────────────────────────────────── + x = list(range(len(scripts))) + + if axs is None: + plt.ion() + fig = plt.figure() + axs = fig.subplot_mosaic(""" + A + B + """) + + try: + axs = [ax for ax in axs.values()] + except: + pass + + ax = axs[0] + ax.plot(x, cumulative, "-s", markersize=8, color=color, label=label) + + # Add restarts as vertical lines + for i in range(len(script_restarts)): + if script_restarts[i] > 0: + ax.plot( + [x[i],x[i]], + [cumulative[i],cumulative[i]-script_restarts[i]], + "-.o", markersize=5, color=color) + + + for i in range(1, len(beat_nums)): + if beat_nums[i] != beat_nums[i - 1]: + ax.axvline(i - 0.5, color='k',linestyle="-.") + + #ax.set_xlim(left=0) + ax.set_ylabel(f"Cumulative time ({unit})"); #ax.set_ylim(bottom=0) + ax.set_xticks(x, scripts, rotation=10, ha="right", fontsize=8) + GRAPHICStools.addDenseAxis(ax) + ax.legend(loc='upper left', fontsize=8) + + + ax = axs[1] + for i in range(len(scripts)-1): + ax.plot([x[i], x[i+1]], [0, script_time[i+1]], "-s", markersize=8, color=color) + + # Add restarts as vertical lines + for i in range(len(script_restarts)-1): + if script_restarts[i] > 0: + ax.plot( + [x[i+1],x[i+1]], + [script_time[i+1],script_time[i+1]+script_restarts[i+1]], + "-.o", markersize=5, color=color) + + for i in range(1, len(beat_nums)): + if beat_nums[i] != beat_nums[i - 1]: + ax.axvline(i - 0.5, color='k',linestyle="-.") + + #ax.set_xlim(left=0) + ax.set_ylabel(f"Time ({unit})"); #ax.set_ylim(bottom=0) + ax.set_xticks(x, scripts, rotation=10, ha="right", fontsize=8) + GRAPHICStools.addDenseAxis(ax) + if log: + ax.set_yscale('log') + + return x, scripts + + +# ------------------------------------ + +# Decorator to hook methods before and after execution +def hook_method(before=None, after=None): + def decorator(func): + def wrapper(self, *args, **kwargs): + if before: + before(self) + result = func(self, *args, **kwargs) + if after: + after(self) + return result + return wrapper + return decorator + def clipstr(txt, chars=40): if not isinstance(txt, str): txt = f"{txt}" return f"{'...' if len(txt) > chars else ''}{txt[-chars:]}" if txt is not None else None + +def deep_dict_update(d, u): + for k, v in u.items(): + if isinstance(v, dict) and isinstance(d.get(k), dict): + deep_dict_update(d[k], v) # recurse into nested dict + else: + d[k] = v # overwrite at lowest level + return d + +def deep_grab_flags_dict(d): + keys = {} + for key in d.keys(): + keys[key] = deep_grab_flags_dict(d[key]) if isinstance(d[key], dict) else None + return keys + def receiveWebsite(url, data=None): NumTriesAfterTimeOut = 60 secWaitTimeOut = 10 @@ -367,46 +598,16 @@ def calculate_size_pickle(file): obj = pickle.load(f) calculate_sizes_obj_recursive(obj, recursion = 20) -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# MITIM optimization namelist -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -def read_mitim_nml(json_file): - jpath = Path(json_file).expanduser() - with open(jpath, 'r') as file: - optimization_options = json.load(file) - - return optimization_options - -def curate_mitim_nml(optimization_options, stopping_criteria_default = None): - - # Optimization criterion - if optimization_options['convergence_options']['stopping_criteria'] is None: - optimization_options['convergence_options']['stopping_criteria'] = stopping_criteria_default - - # Add optimization print - if optimization_options is not None: - unprint_fun = copy.deepcopy(optimization_options['convergence_options']['stopping_criteria']) - def opt_crit(*args,**kwargs): - print('\n') - print('--------------------------------------------------') - print('Convergence criteria') - print('--------------------------------------------------') - v = unprint_fun(*args,**kwargs) - print('--------------------------------------------------\n') - return v - optimization_options['convergence_options']['stopping_criteria'] = opt_crit - - # Check if the optimization options are in the namelist - from mitim_tools import __mitimroot__ - Optim_potential = read_mitim_nml(__mitimroot__ / "templates" / "main.namelist.json") - for ikey in optimization_options: - if ikey not in Optim_potential: - print(f"\t- Option {ikey} is an unexpected variable, prone to errors", typeMsg="q") - - return optimization_options - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +def check_flags_mitim_namelist(d, d_check, avoid = [], askQuestions=True): + for key in d.keys(): + if key in avoid: + continue + elif key not in d_check: + print(f"\t- {key} is an unexpected variable, prone to errors or misinterpretation",typeMsg="q" if askQuestions else "w") + elif not isinstance(d[key], dict): + continue + else: + check_flags_mitim_namelist(d[key], d_check[key], avoid=avoid, askQuestions=askQuestions) def getpythonversion(): return [ int(i.split("\n")[0].split("+")[0]) for i in sys.version.split()[0].split(".") ] @@ -719,7 +920,7 @@ def getLocInfo(locFile, with_extension=False): def findFileByExtension( - folder, extension, prefix=" ", fixSpaces=False, ForceFirst=False, agnostic_to_case=False + folder, extension, prefix=" ", fixSpaces=False, ForceFirst=False, agnostic_to_case=False, do_not_consider_files=None ): """ Retrieves the file without folder and extension @@ -731,6 +932,16 @@ def findFileByExtension( if fpath.exists(): allfiles = findExistingFiles(fpath, extension, agnostic_to_case = agnostic_to_case) + # Filter out files that contain any of the strings in do_not_consider_files + if do_not_consider_files is not None: + filtered_files = [] + for file_path in allfiles: + file_name = file_path.name + should_exclude = any(exclude_str in file_name for exclude_str in do_not_consider_files) + if not should_exclude: + filtered_files.append(file_path) + allfiles = filtered_files + if len(allfiles) > 1: # print(allfiles) if not ForceFirst: @@ -750,7 +961,7 @@ def findFileByExtension( f"\t\t\t~ Folder ...{fstr} does not exist, returning None", ) - # TODO: We really should not change return type + #TODO: We really should not change return type #retval = None #if retpath is not None: # if not provide_full_path: @@ -1754,6 +1965,36 @@ def print_machine_info(output_file=None): info_lines.append(f"OpenMP Enabled in PyTorch: {openmp_enabled.is_available() if openmp_enabled else 'N/A'}") info_lines.append(f"MKL Enabled in PyTorch: {mkl_enabled.is_available() if mkl_enabled else 'N/A'}") + for var in ["OMP_NUM_THREADS", "MKL_NUM_THREADS", "OPENBLAS_NUM_THREADS", + "NUMEXPR_NUM_THREADS", "SLURM_CPUS_PER_TASK"]: + info_lines.append(f"{var}: {os.environ.get(var, 'Not set')}") + + f = io.StringIO() + with redirect_stdout(f): + torch.__config__.show() + info_lines.append("\n=== PyTorch Build Config ===") + info_lines.append(f.getvalue()) + + info_lines.append("\n=== Package Versions ===") + for pkg in ["torch", "gpytorch", "botorch"]: + try: + mod = __import__(pkg) + info_lines.append(f"{pkg}: {mod.__version__}") + except Exception: + info_lines.append(f"{pkg}: not available") + + try: + import psutil + proc = psutil.Process() + if hasattr(proc, "cpu_affinity"): + info_lines.append(f"Process affinity (cpus): {proc.cpu_affinity()}") + else: + info_lines.append("CPU affinity not supported on this platform/psutil build") + except ImportError: + info_lines.append("psutil not installed (skipping affinity check)") + + info_lines.append("=============================\n\n") + # Output to screen or file output = '\n'.join(info_lines) if output_file: @@ -1876,6 +2117,19 @@ def shutil_rmtree(item): shutil.move(item, new_item) print(f"> Folder {clipstr(item)} could not be removed. Renamed to {clipstr(new_item)}",typeMsg='w') +def recursive_backup(file, extension='bak'): + + index = 0 + file_new = file.with_suffix(f".{extension}.{index}") + + while file_new.exists(): + index += 1 + file_new = file.with_suffix(f".{extension}.{index}") + + shutil.copy2(file, file_new) + print(f"> File {clipstr(file)} backed up to {clipstr(file_new)}", typeMsg='i') + + def unpickle_mitim(file): with open(str(file), "rb") as handle: @@ -1907,3 +2161,124 @@ def find_class(self, module, name): print(f"\t\tModule not found: {module} {name}; returning dummy", typeMsg="i") return super().find_class("torch._utils", name) +def read_mitim_yaml(path: str): + + def resolve(x): + if isinstance(x, dict): + return {k: resolve(v) for k, v in x.items()} + if isinstance(x, list): + return [resolve(v) for v in x] + if isinstance(x, str) and x.startswith("import::"): + modattr = x[len("import::"):] + module_name, attr = modattr.rsplit(".", 1) + return getattr(importlib.import_module(module_name), attr) + return x + + with open(path, "r") as f: + cfg = yaml.safe_load(f) + return resolve(cfg) + + +import yaml +import numpy as np +import inspect +from pathlib import Path +from typing import Any, Mapping + +def _as_import_string(obj: Any) -> str: + """ + Return an 'import::module.qualname' for callables/classes. + Falls back to str(obj) if module/name aren't available. + """ + # Handle bound methods + if inspect.ismethod(obj): + func = obj.__func__ + mod = func.__module__ + qn = getattr(func, "__qualname__", func.__name__) + qn = qn.replace(".", "") + return f"import::{mod}.{qn}" + # Handle functions, classes, other callables with module/name + if inspect.isfunction(obj) or inspect.isbuiltin(obj) or inspect.isclass(obj) or callable(obj): + mod = getattr(obj, "__module__", None) + name = getattr(obj, "__qualname__", getattr(obj, "__name__", None)) + if mod and name: + name = name.replace(".", "") + return f"import::{mod}.{name}" + return f"import::{str(obj)}" + + # Strings: if they already look like import::..., keep; otherwise just return + if isinstance(obj, str): + return obj + + # Fallback + return f"import::{str(obj)}" + +def _normalize_for_yaml(obj: Any) -> Any: + """ + Recursively convert objects into YAML-safe Python builtins. + - NumPy arrays/scalars -> lists/scalars + - Paths -> str + - sets -> lists + - callables/classes/methods -> 'import::module.qualname' + Leaves basic builtins as-is. + """ + # NumPy + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance(obj, np.generic): + return obj.item() + + # Simple builtins + if obj is None or isinstance(obj, (bool, int, float, str)): + return obj + + # Path-like + if isinstance(obj, (Path, )): + return str(obj) + + # Sets -> lists + if isinstance(obj, (set, frozenset)): + return [_normalize_for_yaml(v) for v in obj] + + # Mappings + if isinstance(obj, Mapping): + # ensure keys are YAML-safe (coerce to str if needed) + return {str(k): _normalize_for_yaml(v) for k, v in obj.items()} + + # Sequences + if isinstance(obj, (list, tuple)): + return [_normalize_for_yaml(v) for v in obj] + + # Anything callable or class-like -> import string + if inspect.ismethod(obj) or inspect.isfunction(obj) or inspect.isbuiltin(obj) or inspect.isclass(obj) or callable(obj): + return _as_import_string(obj) + + # Fallback to str for unknown objects + return str(obj) + +class _NoAliasDumper(yaml.SafeDumper): + def ignore_aliases(self, data): + return True + def increase_indent(self, flow=False, indentless=False): + return super().increase_indent(flow, indentless=False) + +def write_mitim_yaml(parameters: Mapping[str, Any], path: str) -> None: + """ + General YAML writer: + - No assumptions about keys (works for solution/transport/target and also optimization_options). + - Normalizes everything to YAML-safe types, including function objects. + """ + if not isinstance(parameters, Mapping): + raise TypeError("parameters must be a dict-like mapping") + clean = _normalize_for_yaml(parameters) + + with open(path, "w", encoding="utf-8") as f: + yaml.dump( + clean, + f, + Dumper=_NoAliasDumper, + sort_keys=False, + default_flow_style=False, + allow_unicode=True, + width=1000, + ) \ No newline at end of file diff --git a/src/mitim_tools/misc_tools/LOGtools.py b/src/mitim_tools/misc_tools/LOGtools.py index e3c582ae..6cb7c82f 100644 --- a/src/mitim_tools/misc_tools/LOGtools.py +++ b/src/mitim_tools/misc_tools/LOGtools.py @@ -27,7 +27,7 @@ def printMsg(*args, typeMsg=""): verbose = read_verbose_level() if verbose == 0: - return False + return True else: # ----------------------------------------------------------------------------- @@ -57,8 +57,6 @@ def printMsg(*args, typeMsg=""): # Print if typeMsg in ["w"]: print(*total) - # Question result - return False elif verbose == 2: # Print @@ -83,6 +81,8 @@ def printMsg(*args, typeMsg=""): if typeMsg == "q": return query_yes_no("\t\t>> Do you want to continue?", extra=extra) + return True # Default return value if no specific typeMsg is provided + if not sys.platform.startswith('win'): import termios @@ -120,6 +120,10 @@ def query_yes_no(question, extra=""): From https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input ''' + if not sys.stdin.isatty(): + raise Exception("Interactive terminal response required - something is wrong with this run") + + valid = {"y": True, "n": False, "e": None} prompt = " [y/n/e] (yes, no, exit)" @@ -164,9 +168,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): ''' @contextlib.contextmanager -def conditional_log_to_file(log_file=None, msg=None): +def conditional_log_to_file(log_file=None, msg=None, write_log=True): - if log_file is not None: + if log_file is not None and write_log: with log_to_file(log_file, msg) as logger: yield logger else: diff --git a/src/mitim_tools/misc_tools/MATHtools.py b/src/mitim_tools/misc_tools/MATHtools.py index cfe023ea..e192301c 100644 --- a/src/mitim_tools/misc_tools/MATHtools.py +++ b/src/mitim_tools/misc_tools/MATHtools.py @@ -273,6 +273,57 @@ def integrate_definite(x, y, rangex=None): return 0 +def integrateQuadPoly(r, s): + """ + (batch,dim) + + Computes int(s*dr), so if s is s*dV/dr, then int(s*dV), which is the full integral + + From tgyro_volume_int.f90 + r - minor radius + s - s*volp + + (Modified to avoid if statements and for loops) + + """ + + if isinstance(s, torch.Tensor): + p = torch.zeros((r.shape[0], r.shape[1])).to(r) + else: + p = np.zeros((r.shape[0], r.shape[1])) + + # First point + + x1, x2, x3 = r[..., 0], r[..., 1], r[..., 2] + f1, f2, f3 = s[..., 0], s[..., 1], s[..., 2] + + p[..., 1] = (x2 - x1) * ( + (3 * x3 - x2 - 2 * x1) * f1 / 6 / (x3 - x1) + + (3 * x3 - 2 * x2 - x1) * f2 / 6 / (x3 - x2) + - (x2 - x1) ** 2 * f3 / 6 / (x3 - x1) / (x3 - x2) + ) + + # Next points + x1, x2, x3 = r[..., :-2], r[..., 1:-1], r[..., 2:] + f1, f2, f3 = s[..., :-2], s[..., 1:-1], s[..., 2:] + + p[..., 2:] = ( + (x3 - x2) + / (x3 - x1) + / 6 + * ( + (2 * x3 + x2 - 3 * x1) * f3 + + (x3 + 2 * x2 - 3 * x1) * f2 * (x3 - x1) / (x2 - x1) + - (x3 - x2) ** 2 * f1 / (x2 - x1) + ) + ) + + if isinstance(p, torch.Tensor): + return torch.cumsum(p, 1) + else: + return np.cumsum(p, 1) + + def extrapolate(x, xp, yp, order=3): s = InterpolatedUnivariateSpline(xp, yp, k=order) diff --git a/src/mitim_tools/misc_tools/PLASMAtools.py b/src/mitim_tools/misc_tools/PLASMAtools.py index 55c64ed9..b794eecd 100644 --- a/src/mitim_tools/misc_tools/PLASMAtools.py +++ b/src/mitim_tools/misc_tools/PLASMAtools.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt from IPython import embed from mitim_tools.misc_tools import MATHtools -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.popcon_tools import FunctionalForms from mitim_tools.misc_tools.LOGtools import printMsg as print @@ -42,6 +42,7 @@ md = 3.34358e-24 me_u = 5.4488741e-04 # as in input.gacode +md_u = md*1E-3 / u factor_convection = 3 / 2 # IMPORTANT @@ -282,15 +283,17 @@ def calculatePressure(Te, Ti, ne, ni): - It only works if the vectors contain the entire plasma (i.e. roa[-1]=1.0), otherwise it will miss that contribution. """ - p, peT, piT = [], [], [] + p, peT, piT, piTall = [], [], [], [] for it in range(Te.shape[0]): pe = (Te[it, :] * 1e3 * e_J) * (ne[it, :] * 1e20) * 1e-6 # MPa + piall = (Ti[it, :, :] * 1e3 * e_J) * (ni[it, :, :] * 1e20) * 1e-6 # MPa pi = np.zeros(Te.shape[1]) for i in range(ni.shape[1]): - pi += (Ti[it, i, :] * 1e3 * e_J) * (ni[it, i, :] * 1e20) * 1e-6 # MPa + pi += piall[i, :] # Sum over all ions peT.append(pe) piT.append(pi) + piTall.append(piall) # Total pressure press = pe + pi @@ -299,16 +302,17 @@ def calculatePressure(Te, Ti, ne, ni): p = np.array(p) pe = np.array(peT) pi = np.array(piT) + pi_all = np.array(piTall) - return p, pe, pi + return p, pe, pi, pi_all def calculateVolumeAverage(rmin, var, dVdr): W, vals = [], [] for it in range(rmin.shape[0]): - W.append(CALCtools.integrateFS(var[it, :], rmin[it, :], dVdr[it, :])[-1]) + W.append(CALCtools.volume_integration(var[it, :], rmin[it, :], dVdr[it, :])[-1]) vals.append( - CALCtools.integrateFS(np.ones(rmin.shape[1]), rmin[it, :], dVdr[it, :])[-1] + CALCtools.volume_integration(np.ones(rmin.shape[1]), rmin[it, :], dVdr[it, :])[-1] ) W = np.array(W) / np.array(vals) @@ -338,13 +342,13 @@ def calculateContent(rmin, Te, Ti, ne, ni, dVdr): """ - p, pe, pi = calculatePressure(Te, Ti, ne, ni) + p, pe, pi, _ = calculatePressure(Te, Ti, ne, ni) We, Wi, Ne, Ni = [], [], [], [] for it in range(rmin.shape[0]): # Number of electrons Ne.append( - CALCtools.integrateFS(ne[it, :], rmin[it, :], dVdr[it, :])[-1] + CALCtools.volume_integration(ne[it, :], rmin[it, :], dVdr[it, :])[-1] ) # Number of particles total # Number of ions @@ -352,14 +356,14 @@ def calculateContent(rmin, Te, Ti, ne, ni, dVdr): for i in range(ni.shape[1]): ni0 += ni[it, i, :] Ni.append( - CALCtools.integrateFS(ni0, rmin[it, :], dVdr[it, :])[-1] + CALCtools.volume_integration(ni0, rmin[it, :], dVdr[it, :])[-1] ) # Number of particles total # Electron stored energy Wx = 3 / 2 * pe[it, :] - We.append(CALCtools.integrateFS(Wx, rmin[it, :], dVdr[it, :])[-1]) # MJ + We.append(CALCtools.volume_integration(Wx, rmin[it, :], dVdr[it, :])[-1]) # MJ Wx = 3 / 2 * pi[it, :] - Wi.append(CALCtools.integrateFS(Wx, rmin[it, :], dVdr[it, :])[-1]) # MJ + Wi.append(CALCtools.volume_integration(Wx, rmin[it, :], dVdr[it, :])[-1]) # MJ We = np.array(We) Wi = np.array(Wi) diff --git a/src/mitim_tools/misc_tools/scripts/compare_namelist.py b/src/mitim_tools/misc_tools/scripts/compare_namelist.py index 0897e832..7efb0d79 100644 --- a/src/mitim_tools/misc_tools/scripts/compare_namelist.py +++ b/src/mitim_tools/misc_tools/scripts/compare_namelist.py @@ -1,4 +1,4 @@ -import sys +import argparse import numpy as np from mitim_tools.misc_tools import IOtools from IPython import embed @@ -13,7 +13,7 @@ """ -def compareNML(file1, file2, commentCommand="!", separator="=", precision_of=None): +def compareNML(file1, file2, commentCommand="!", separator="=", precision_of=None, close_enough=1e-7): d1 = IOtools.generateMITIMNamelist( file1, commentCommand=commentCommand, separator=separator ) @@ -24,7 +24,7 @@ def compareNML(file1, file2, commentCommand="!", separator="=", precision_of=Non d1 = separateArrays(d1) d2 = separateArrays(d2) - diff = compareDictionaries(d1, d2, precision_of = precision_of) + diff = compareDictionaries(d1, d2, precision_of=precision_of, close_enough=close_enough) diffo = cleanDifferences(diff) @@ -77,7 +77,7 @@ def cleanDifferences(d, tol_rel=1e-7): return d_new -def compare_number(a,b,precision_of=None): +def compare_number(a,b,precision_of=None, close_enough=1e-7): if precision_of is None: a_rounded = a @@ -91,7 +91,10 @@ def compare_number(a,b,precision_of=None): else: decimal_places = 0 - b_rounded = round(b, decimal_places) + if isinstance(b, str): + b_rounded = b + else: + b_rounded = round(b, decimal_places) a_rounded = a elif precision_of == 2: @@ -107,11 +110,15 @@ def compare_number(a,b,precision_of=None): a_rounded = round(a, decimal_places) # Compare the two numbers - are_equal = (a_rounded == b_rounded) + if isinstance(a_rounded, str) or isinstance(b_rounded, str): + # If either is a string, we cannot compare numerically + are_equal = a_rounded == b_rounded + else: + are_equal = np.isclose(a_rounded, b_rounded, rtol=close_enough) return are_equal -def compareDictionaries(d1, d2, precision_of=None): +def compareDictionaries(d1, d2, precision_of=None, close_enough=1e-7): different = {} for key in d1: @@ -120,7 +127,7 @@ def compareDictionaries(d1, d2, precision_of=None): different[key] = [d1[key], None] # Values are different else: - if not compare_number(d1[key],d2[key],precision_of=precision_of): + if not compare_number(d1[key],d2[key],precision_of=precision_of, close_enough=close_enough): different[key] = [d1[key], d2[key]] for key in d2: @@ -133,46 +140,70 @@ def compareDictionaries(d1, d2, precision_of=None): def printTable(diff, warning_percent=1e-1): + # Compute percent differences first so we can sort by them + percs = {} for key in diff: - if diff[key][0] is not None: - if diff[key][1] is not None: - if diff[key][0] != 0.0: - try: - perc = 100 * np.abs( - (diff[key][0] - diff[key][1]) / diff[key][0] - ) - except: - perc = np.nan + v0, v1 = diff[key] + if v0 is None or v1 is None: + # Treat missing values as 100% difference for sorting + percs[key] = 100.0 + else: + if v0 != 0.0: + try: + percs[key] = 100 * np.abs((v0 - v1) / v0) + except Exception: + percs[key] = np.nan + else: + percs[key] = np.nan + + # Sort keys by descending percent; NaNs go last + def sort_key(k): + p = percs[k] + return (1, 0) if (p is None or (isinstance(p, float) and np.isnan(p))) else (0, -p) + + for key in sorted(diff.keys(), key=sort_key): + v0, v1 = diff[key] + if v0 is not None: + if v1 is not None: + perc = percs[key] + if perc<1e-2: + perc_str = f"{perc:.2e}" + elif perc<1.0: + perc_str = f"{perc:.3f}" else: - perc = np.nan - print( - f"{key:>15}{str(diff[key][0]):>25}{str(diff[key][1]):>25} (~{perc:.0e}%)", - typeMsg="w" if perc > warning_percent else "", - ) + perc_str = f"{perc:.1f}" + print(f"{key:>15}{str(v0):>25}{str(v1):>25} ({perc_str} %)",typeMsg="i" if perc > warning_percent else "",) else: - print(f"{key:>15}{str(diff[key][0]):>25}{'':>25}") + print(f"{key:>15}{str(v0):>25}{'':>25} (100%)", typeMsg="i") else: - print(f"{key:>15}{'':>25}{str(diff[key][1]):>25}") - print( - "--------------------------------------------------------------------------------" - ) + print(f"{key:>15}{'':>25}{str(v1):>25} (100%)", typeMsg="i") + print("--------------------------------------------------------------------------------") def main(): - file1 = sys.argv[1] - file2 = sys.argv[2] - - try: - separator = sys.argv[3] - except: - separator = "=" - - diff = compareNML(file1, file2, separator=separator) + parser = argparse.ArgumentParser() + parser.add_argument("file1", type=str, help="First namelist file to compare") + parser.add_argument("file2", type=str, help="Second namelist file to compare") + parser.add_argument("--separator", type=str, required=False, default="=", + help="Separator used in the namelist files, default is '='") + parser.add_argument("--precision", type=int, required=False, default=None, + help="Precision for comparing numbers: 1 for decimal places, 2 for significant figures, None for exact comparison") + parser.add_argument("--close_enough", type=float, required=False, default=1e-7, + help="Tolerance for comparing numbers, default is 1e-7") + args = parser.parse_args() + + # Get arguments + file1 = args.file1 + file2 = args.file2 + separator = args.separator + precision = args.precision + close_enough = args.close_enough + + diff = compareNML(file1, file2, separator=separator, precision_of=precision, close_enough=close_enough) printTable(diff) print(f"Differences: {len(diff)}") - if __name__ == "__main__": main() diff --git a/src/mitim_modules/powertorch/physics/__init__.py b/src/mitim_tools/misc_tools/utils/__init__.py similarity index 100% rename from src/mitim_modules/powertorch/physics/__init__.py rename to src/mitim_tools/misc_tools/utils/__init__.py diff --git a/src/mitim_tools/misc_tools/utils/remote_tools.py b/src/mitim_tools/misc_tools/utils/remote_tools.py new file mode 100644 index 00000000..a900d440 --- /dev/null +++ b/src/mitim_tools/misc_tools/utils/remote_tools.py @@ -0,0 +1,45 @@ +import os, shutil +from mitim_tools.misc_tools import IOtools, FARMINGtools, CONFIGread +from IPython import embed + +def retrieve_remote_folders(folders_local, remote, remote_folder_parent, remote_folders, only_folder_structure_with_files): + + # Make sure folders_local is a list of complete Paths + folders_local = [IOtools.expandPath(folder).resolve() for folder in folders_local] + + if remote_folder_parent is not None: + folders_remote = [remote_folder_parent + '/' + folder.name for folder in folders_local] + elif remote_folders is not None: + folders_remote = remote_folders + else: + folders_remote = folders_local + + # Retrieve remote + s = CONFIGread.load_settings() + scratch_local_folder = s['local']['scratch'] + + if remote is not None: + + _, folders = FARMINGtools.retrieve_files_from_remote( + scratch_local_folder, + remote, + folders_remote = folders_remote, + purge_tmp_files = True, + only_folder_structure_with_files=only_folder_structure_with_files) + + # Renaming + for i in range(len(folders)): + folder = IOtools.expandPath(folders[i]) + folder_orig = IOtools.expandPath(folders_local[i]) + + if folder == folder_orig: + continue + + if folder_orig.exists(): + IOtools.shutil_rmtree(folder_orig) + + shutil.copytree(folder, folder_orig) + IOtools.shutil_rmtree(folder) + + + return folders_local \ No newline at end of file diff --git a/src/mitim_tools/opt_tools/BOTORCHtools.py b/src/mitim_tools/opt_tools/BOTORCHtools.py index d832cb95..f5c99fca 100644 --- a/src/mitim_tools/opt_tools/BOTORCHtools.py +++ b/src/mitim_tools/opt_tools/BOTORCHtools.py @@ -170,10 +170,6 @@ def __init__( self.mean_module = MITIM_LinearMeanGradients( batch_shape=self._aug_batch_shape, variables=variables, output=output ) - elif TypeMean == 3: - self.mean_module = MITIM_CriticalGradient( - batch_shape=self._aug_batch_shape, variables=variables - ) """ ----------------------------------------------------------------------- @@ -689,85 +685,56 @@ def __init__( else: mapping = { - 'Qe': 'aLte', - 'Qi': 'aLti', - 'Ge': 'aLne', - 'GZ': 'aLnZ', - 'Mt': 'dw0dr', - 'Pe': None # Referring to energy exchange + 'Qe_': 'aLte', + 'Qi_': 'aLti', + 'Ge_': 'aLne', + 'GZ_': 'aLnZ', + 'Mt_': 'dw0dr', + 'Qie': None # Referring to energy exchange } for i, variable in enumerate(variables): - if (mapping[output[:2]] is not None) and (mapping[output[:2]] == variable): + if (mapping[output[:3]] is not None) and (mapping[output[:3]] == variable): grad_vector.append(i) self.indeces_grad = tuple(grad_vector) # ---------------------------------------------------------------- - self.register_parameter( - name="weights_lin", - parameter=torch.nn.Parameter( - torch.randn(*batch_shape, len(self.indeces_grad), 1) - ), - ) - self.register_parameter( - name="bias", parameter=torch.nn.Parameter(torch.randn(*batch_shape, 1)) - ) + self.register_parameter(name="raw_weights_lin",parameter=torch.nn.Parameter(torch.randn(*batch_shape, len(self.indeces_grad), 1)),) + self.register_parameter(name="bias", parameter=torch.nn.Parameter(torch.randn(*batch_shape, 1))) # set the parameter constraint to be [0,1], when nothing is specified diffusion_constraint = gpytorch.constraints.constraints.Positive() # positive diffusion coefficient if only_diffusive: - self.register_constraint("weights_lin", diffusion_constraint) + self.register_constraint("raw_weights_lin", diffusion_constraint) def forward(self, x): - res = x[..., self.indeces_grad].matmul(self.weights_lin).squeeze(-1) + self.bias + weights_lin = self.weights_lin + res = x[..., self.indeces_grad].matmul(weights_lin).squeeze(-1) + self.bias return res + + # This follows the exact same pattern as in gpytorch's constant_mean.py + @property + def weights_lin(self): + return self._weights_lin_param(self) -class MITIM_CriticalGradient(gpytorch.means.mean.Mean): - def __init__(self, batch_shape=torch.Size(), variables=None, **kwargs): - super().__init__() - - # Indeces of variables that are gradient, so subject to CG behavior - grad_vector = [] - if variables is not None: - for i, variable in enumerate(variables): - if ("aL" in variable) or ("dw" in variable): - grad_vector.append(i) - self.indeces_grad = tuple(grad_vector) - # ---------------------------------------------------------------- - - self.register_parameter( - name="weights_lin", - parameter=torch.nn.Parameter( - torch.randn(*batch_shape, len(self.indeces_grad), 1) - ), - ) - self.register_parameter( - name="bias", parameter=torch.nn.Parameter(torch.randn(*batch_shape, 1)) - ) + @weights_lin.setter + def weights_lin(self, value): + self._weights_lin_closure(self, value) - self.NNfunc = ( - lambda x: x * (1 + torch.erf(x / 0.01)) / 2.0 - ) # https://paperswithcode.com/method/gelu + def _weights_lin_param(self, m): + if hasattr(m, "raw_weights_lin_constraint"): + return m.raw_weights_lin_constraint.transform(m.raw_weights_lin) + return m.raw_weights_lin - self.register_parameter( - name="relu_lin", - parameter=torch.nn.Parameter( - torch.randn(*batch_shape, len(self.indeces_grad), 1) - ), - ) - self.register_constraint( - "relu_lin", gpytorch.constraints.constraints.Interval(0, 1) - ) + def _weights_lin_closure(self, m, value): + if not torch.is_tensor(value): + value = torch.as_tensor(value).to(m.raw_weights_lin) - def forward(self, x): - res = ( - self.NNfunc(x[..., self.indeces_grad] - self.relu_lin.transpose(0, 1)) - .matmul(self.weights_lin) - .squeeze(-1) - + self.bias - ) - return res + if hasattr(m, "raw_weights_lin_constraint"): + m.initialize(raw_weights_lin=m.raw_weights_lin_constraint.inverse_transform(value)) + else: + m.initialize(raw_weights_lin=value) diff --git a/src/mitim_tools/opt_tools/OPTtools.py b/src/mitim_tools/opt_tools/OPTtools.py index 6dec83f6..91af55ac 100644 --- a/src/mitim_tools/opt_tools/OPTtools.py +++ b/src/mitim_tools/opt_tools/OPTtools.py @@ -221,11 +221,11 @@ def acquire_next_points( # Prepare (run more now to find more solutions, more diversity, even if later best_points is 1) if optimizer == "ga": - from mitim_tools.opt_tools.optimizers.GAtools import optimize_function + from mitim_tools.opt_tools.optimizers.evolutionary import optimize_function elif optimizer == "botorch": - from mitim_tools.opt_tools.optimizers.BOTORCHoptim import optimize_function + from mitim_tools.opt_tools.optimizers.botorch_tools import optimize_function elif optimizer == "root" or optimizer == "sr": - from mitim_tools.opt_tools.optimizers.ROOTtools import optimize_function + from mitim_tools.opt_tools.optimizers.multivariate import optimize_function if optimizer == "root": optimize_function = partial(optimize_function, method="scipy_root") elif optimizer == "sr" : diff --git a/src/mitim_tools/opt_tools/STEPtools.py b/src/mitim_tools/opt_tools/STEPtools.py index a13553e8..41f86102 100644 --- a/src/mitim_tools/opt_tools/STEPtools.py +++ b/src/mitim_tools/opt_tools/STEPtools.py @@ -184,13 +184,8 @@ def fit_step(self, avoidPoints=None, fitWithTrainingDataIfContains=None): surrogate_options = copy.deepcopy(self.surrogate_options) # Then, depending on application (e.g. targets in mitim are fitted differently) - if ( - "selectSurrogate" in surrogate_options - and surrogate_options["selectSurrogate"] is not None - ): - surrogate_options = surrogate_options["selectSurrogate"]( - outi, surrogate_options - ) + if "surrogate_selection" in surrogate_options and surrogate_options["surrogate_selection"] is not None: + surrogate_options = surrogate_options["surrogate_selection"](outi, surrogate_options) # --------------------------------------------------------------------------------------------------- # To avoid problems with fixed values (e.g. calibration terms that are fixed) diff --git a/src/mitim_tools/opt_tools/STRATEGYtools.py b/src/mitim_tools/opt_tools/STRATEGYtools.py index ad1807ab..c45de257 100644 --- a/src/mitim_tools/opt_tools/STRATEGYtools.py +++ b/src/mitim_tools/opt_tools/STRATEGYtools.py @@ -1,7 +1,9 @@ +import sys import copy import datetime import array import traceback +from typing import IO import torch from pathlib import Path from collections import OrderedDict @@ -10,6 +12,7 @@ import numpy as np import matplotlib.pyplot as plt from mitim_tools.misc_tools import IOtools, GRAPHICStools, GUItools, LOGtools +from mitim_tools.misc_tools.IOtools import mitim_timer from mitim_tools.opt_tools import OPTtools, STEPtools from mitim_tools.opt_tools.utils import ( BOgraphics, @@ -69,7 +72,7 @@ def __init__( folder, namelist=None, default_namelist_function=None, - tensor_opts = { + tensor_options = { "dtype": torch.double, "device": torch.device("cpu"), } @@ -78,7 +81,7 @@ def __init__( Namelist file can be provided and will be copied to the folder """ - self.tensor_opts = tensor_opts + self.tensor_options = tensor_options print("- Parent opt_evaluator function initialized") @@ -95,23 +98,20 @@ def __init__( IOtools.askNewFolder(self.folder / "Outputs") if namelist is not None: - print(f"\t- Namelist provided: {namelist}", typeMsg="i") + print(f"\t- Optimizaiton namelist provided: {namelist}", typeMsg="i") - self.optimization_options = IOtools.read_mitim_nml(namelist) + self.optimization_options = IOtools.read_mitim_yaml(namelist) elif default_namelist_function is not None: - print("\t- Namelist not provided, using MITIM default for this optimization sub-module", typeMsg="i") + print("\t- Optimizaiton namelist not provided, using MITIM default for this optimization sub-module", typeMsg="i") - namelist = __mitimroot__ / "templates" / "main.namelist.json" - self.optimization_options = IOtools.read_mitim_nml(namelist) + namelist = __mitimroot__ / "templates" / "namelist.optimization.yaml" + self.optimization_options = IOtools.read_mitim_yaml(namelist) self.optimization_options = default_namelist_function(self.optimization_options) else: - print( - "\t- No namelist provided (likely b/c for reading/plotting purposes)", - typeMsg="i", - ) + print("\t- No optimizaiton namelist provided (likely b/c for reading/plotting purposes)",typeMsg="i") self.optimization_options = None self.surrogate_parameters = { @@ -123,8 +123,8 @@ def __init__( } # Determine type of tensors to work with - torch.set_default_dtype(self.tensor_opts["dtype"]) # In case I forgot to specify a type explicitly, use as default (https://github.com/pytorch/botorch/discussions/1444) - self.dfT = torch.randn( (2, 2), **tensor_opts) + torch.set_default_dtype(self.tensor_options["dtype"]) # In case I forgot to specify a type explicitly, use as default (https://github.com/pytorch/botorch/discussions/1444) + self.dfT = torch.randn( (2, 2), **tensor_options) # Name of calibrated objectives (e.g. QiRes1 to represent the objective from Qi1-QiT1) self.name_objectives = None @@ -193,7 +193,6 @@ def read_optimization_results( self.fn, self.res, self.mitim_model, - self.log, self.data, ) = BOgraphics.retrieveResults( self.folder, @@ -347,7 +346,7 @@ def __init__( """ Inputs: - optimization_object : Function that is executed, - with .optimization_options in it (Dictionary with optimization parameters (must be obtained using namelist and read_mitim_nml)) + with .optimization_options in it (Dictionary with optimization parameters (must be obtained using namelist and read_mitim_yaml)) and .folder (Where the function runs) and surrogate_parameters: Parameters to pass to surrogate (e.g. for transformed function), It can be different from function_parameters because of making evaluations fast. - cold_start : If False, try to find the values from Outputs/optimization_data.csv @@ -361,26 +360,22 @@ def __init__( self.askQuestions = askQuestions self.seed = seed self.avoidPoints = [] - - if (not self.cold_start) and askQuestions: - if not print( - f"\t* Because {cold_start = }, MITIM will try to read existing results from folder", - typeMsg="q", - ): - raise Exception("[MITIM] - User requested to stop") - + if self.optimization_object.name_objectives is None: self.optimization_object.name_objectives = "y" # Folders and Logger - self.folderExecution = ( - IOtools.expandPath(self.optimization_object.folder) - if (self.optimization_object.folder is not None) - else Path("") - ) + self.folderExecution = IOtools.expandPath(self.optimization_object.folder) if (self.optimization_object.folder is not None) else Path("") self.folderOutputs = self.folderExecution / "Outputs" + if (not self.cold_start) and askQuestions: + + # Check if Outputs folder is empty (if it's empty, do not ask the user, just continue) + if self.folderOutputs.exists() and (len(list(self.folderOutputs.iterdir())) > 0): + if not print(f"\t* Because {cold_start = }, MITIM will try to read existing results from folder",typeMsg="q"): + raise Exception("[MITIM] - User requested to stop") + if optimization_object.optimization_options is not None: if not self.folderOutputs.exists(): IOtools.askNewFolder(self.folderOutputs, force=True) @@ -420,28 +415,43 @@ def __init__( self.surrogate_parameters = self.optimization_object.surrogate_parameters self.optimization_options = self.optimization_object.optimization_options - # Curate namelist --------------------------------------------------------------------------------- if self.optimization_options is not None: - self.optimization_options = IOtools.curate_mitim_nml( - self.optimization_options, - stopping_criteria_default = stopping_criteria_default + + # Check if the optimization options are in the namelist + optimization_options_default = IOtools.read_mitim_yaml(__mitimroot__ / "templates" / "namelist.optimization.yaml") + potential_flags = IOtools.deep_grab_flags_dict(optimization_options_default) + IOtools.check_flags_mitim_namelist( + self.optimization_options, potential_flags, + avoid = ["stopping_criteria_parameters"], # Because they are specific to the stopping criteria + askQuestions=askQuestions ) + + # Write the optimization parameters stored in the object, into a file + if self.optimization_object.folder is not None: + IOtools.write_mitim_yaml(self.optimization_options, self.optimization_object.folder / "optimization.namelist.yaml") + print(f" --> Optimization namelist written to {self.optimization_object.folder / 'optimization.namelist.yaml'}") + # ------------------------------------------------------------------------------------------------- if not onlyInitialize: - print("\n-----------------------------------------------------------------------------------------") - print("\t\t\t BO class module") - print("-----------------------------------------------------------------------------------------\n") - + """ ------------------------------------------------------------------------------ Grab variables ------------------------------------------------------------------------------ """ + + self.timings_file = self.folderOutputs / "timing.jsonl" # Logger - self.logFile = BOgraphics.LogFile(self.folderOutputs / "optimization_log.txt") - self.logFile.activate() + sys.stdout = LOGtools.Logger(logFile=self.folderOutputs / "optimization_log.txt", writeAlsoTerminal=True) + + print("\n-----------------------------------------------------------------------------------------") + print("\t\t\t BO class module") + print("-----------------------------------------------------------------------------------------\n") + + # Print machine resources + IOtools.print_machine_info() # Meta self.numIterations = self.optimization_options["convergence_options"]["maximum_iterations"] @@ -690,22 +700,14 @@ def run(self): current_step = self.read() if current_step is None: - print( - "\t* Because reading pkl step had problems, disabling cold_starting-from-previous from this point on", - typeMsg="w", - ) - print( - "\t* Are you aware of the consequences of continuing?", - typeMsg="q", - ) + print("\t* Because reading pkl step had problems, disabling cold_starting-from-previous from this point on",typeMsg="w") + print("\t* Are you aware of the consequences of continuing?",typeMsg="q") self.cold_start = True if not self.cold_start: # Read next from Tabular - self.x_next, _, _ = self.optimization_data.extract_points( - points=np.arange(len(self.train_X), len(self.train_X) + self.best_points) - ) + self.x_next, _, _ = self.optimization_data.extract_points(points=np.arange(len(self.train_X), len(self.train_X) + self.best_points)) self.x_next = torch.from_numpy(self.x_next).to(self.dfT) # Re-write x_next from the pkl... reason for this is that if optimization is heuristic, I may prefer what was in Tabular @@ -741,47 +743,8 @@ def run(self): --------------------------------------------------------------------------------------- """ - train_Ystd = self.train_Ystd if (self.optimization_options["evaluation_options"]["train_Ystd"] is None) else self.optimization_options["evaluation_options"]["train_Ystd"] - - current_step = STEPtools.OPTstep( - self.train_X, - self.train_Y, - train_Ystd, - bounds=self.bounds, - stepSettings=self.stepSettings, - currentIteration=self.currentIteration, - strategy_options=self.strategy_options_use, - BOmetrics=self.BOmetrics, - surrogate_parameters=self.surrogate_parameters, - ) - - # Incorporate strategy_options for later retrieving - current_step.strategy_options_use = copy.deepcopy(self.strategy_options_use) - - self.steps.append(current_step) - - # Avoid points - avoidPoints = np.append(self.avoidPoints_failed, self.avoidPoints_outside) - self.avoidPoints = np.unique([int(j) for j in avoidPoints]) + self._step() - # ***** Fit - self.steps[-1].fit_step(avoidPoints=self.avoidPoints) - - # ***** Define evaluators - self.steps[-1].defineFunctions(self.scalarized_objective) - - # Store class with the model fitted and evaluators defined - if self.storeClass: - self.save() - - # ***** Optimize - if not self.hard_finish: - self.steps[-1].optimize( - position_best_so_far=self.BOmetrics["overall"]["indBest"], - seed=self.seed, - ) - else: - self.steps[-1].x_next = None # Pass the information about next step self.x_next = self.steps[-1].x_next @@ -906,6 +869,73 @@ def read(self, name="optimization_object.pkl", iteration=None, file=None, provid return aux if provideFullClass else step + # Convenient helper methods to track timings of components + + @mitim_timer(lambda self: f'Eval @ {self.currentIteration}', log_file=lambda self: self.timings_file) + def _evaluate(self): + + y_next, ystd_next, self.numEval = EVALUATORtools.fun( + self.optimization_object, + self.x_next, + self.folderExecution, + self.bounds, + self.outputs, + self.optimization_data, + parallel=self.parallel_evaluations, + cold_start=self.cold_start, + numEval=self.numEval, + ) + + return y_next, ystd_next + + @mitim_timer(lambda self: f'Surr @ {self.currentIteration}', log_file=lambda self: self.timings_file) + def _step(self): + + train_Ystd = self.train_Ystd if (self.optimization_options["evaluation_options"]["train_Ystd"] is None) else self.optimization_options["evaluation_options"]["train_Ystd"] + + current_step = STEPtools.OPTstep( + self.train_X, + self.train_Y, + train_Ystd, + bounds=self.bounds, + stepSettings=self.stepSettings, + currentIteration=self.currentIteration, + strategy_options=self.strategy_options_use, + BOmetrics=self.BOmetrics, + surrogate_parameters=self.surrogate_parameters, + ) + + # Incorporate strategy_options for later retrieving + current_step.strategy_options_use = copy.deepcopy(self.strategy_options_use) + + self.steps.append(current_step) + + # Avoid points + avoidPoints = np.append(self.avoidPoints_failed, self.avoidPoints_outside) + self.avoidPoints = np.unique([int(j) for j in avoidPoints]) + + # ***** Fit + self.steps[-1].fit_step(avoidPoints=self.avoidPoints) + + # ***** Define evaluators + self.steps[-1].defineFunctions(self.scalarized_objective) + + # Store class with the model fitted and evaluators defined + if self.storeClass: + self.save() + + # ***** Optimize + if not self.hard_finish: + self.steps[-1].optimize( + position_best_so_far=self.BOmetrics["overall"]["indBest"], + seed=self.seed, + ) + else: + self.steps[-1].x_next = None + + # --------------------------------------------------------------------------------- + + def updateSet( self, strategy_options_use, isThisCorrected=False, ForceNotApplyCorrections=False ): @@ -949,17 +979,7 @@ def updateSet( # --- Evaluation time1 = datetime.datetime.now() - y_next, ystd_next, self.numEval = EVALUATORtools.fun( - self.optimization_object, - self.x_next, - self.folderExecution, - self.bounds, - self.outputs, - self.optimization_data, - parallel=self.parallel_evaluations, - cold_start=self.cold_start, - numEval=self.numEval, - ) + y_next, ystd_next = self._evaluate() txt_time = IOtools.getTimeDifference(time1) print(f"\t- Complete model update took {txt_time}") # ------------------ @@ -1010,12 +1030,8 @@ def updateSet( """ print("\n~~~~~~~~~~~~~~~ Entering bounds upgrade module ~~~~~~~~~~~~~~~~~~~") print("(if extrapolations were allowed during optimization)") - self.bounds = SBOcorrections.upgradeBounds( - self.bounds, self.train_X, self.avoidPoints_outside - ) - print( - "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" - ) + self.bounds = SBOcorrections.upgradeBounds(self.bounds, self.train_X, self.avoidPoints_outside) + print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n") # ~~~~~~~~~~~~~~~~~~ # Possible corrections to modeled & optimization region @@ -1066,6 +1082,7 @@ def updateSet( return y_next, ystd_next + @mitim_timer(lambda self: f'Init', log_file=lambda self: self.timings_file) def initializeOptimization(self): print("\n") print("------------------------------------------------------------") @@ -1109,29 +1126,21 @@ def initializeOptimization(self): if (not self.cold_start) and (self.optimization_data is not None): self.type_initialization = 3 - print( - "--> Since cold_start from a previous MITIM has been requested, forcing initialization type to 3 (read from optimization_data)", - typeMsg="i", - ) + print("--> Since restart from a previous MITIM has been requested, forcing initialization type to 3 (read from optimization_data)",typeMsg="i",) if self.type_initialization == 3: print("--> Initialization by reading tabular data...") try: tabExists = len(self.optimization_data.data) >= self.initial_training - print( - f"\t- optimization_data file has {len(self.optimization_data.data)} elements, and initial_training were {self.initial_training}" - ) + print(f"\t- optimization_data file has {len(self.optimization_data.data)} elements, and initial_training were {self.initial_training}") except: tabExists = False print("\n\nCould not read Tabular, because:", typeMsg="w") print(traceback.format_exc()) if not tabExists: - print( - "--> type_initialization 3 requires optimization_data but something failed. Assigning type_initialization=1 and cold_starting from scratch", - typeMsg="i", - ) + print("--> type_initialization 3 requires optimization_data but something failed. Assigning type_initialization=1 and cold_starting from scratch",typeMsg="i",) if self.askQuestions: flagger = print("Are you sure?", typeMsg="q") if not flagger: @@ -1149,9 +1158,7 @@ def initializeOptimization(self): # cold_started run from previous. Grab DVs of initial set if readCasesFromTabular: try: - self.train_X, self.train_Y, self.train_Ystd = self.optimization_data.extract_points( - points=np.arange(self.initial_training) - ) + self.train_X, self.train_Y, self.train_Ystd = self.optimization_data.extract_points(points=np.arange(self.initial_training)) # It could be the case that those points in Tabular are outside the bounds that I want to apply to this optimization, remove outside points? @@ -1165,20 +1172,14 @@ def initializeOptimization(self): self.avoidPoints_outside.append(i) except: - flagger = print( - "Error reading Tabular. Do you want to continue without cold_start and do standard initialization instead?", - typeMsg="q", - ) + flagger = print("Error reading Tabular. Do you want to continue without cold_start and do standard initialization instead?",typeMsg="q",) self.type_initialization = 1 self.cold_start = True readCasesFromTabular = False if readCasesFromTabular and IOtools.isAnyNan(self.train_X): - flagger = print( - " --> cold_start requires non-nan DVs, doing normal initialization", - typeMsg="q", - ) + flagger = print(" --> cold_start requires non-nan DVs, doing normal initialization",typeMsg="q",) if not flagger: embed() @@ -1191,10 +1192,7 @@ def initializeOptimization(self): if not readCasesFromTabular: if self.type_initialization == 1 and self.optimization_options["problem_options"]["dvs_base"] is not None: self.initial_training = self.initial_training - 1 - print( - f"--> Baseline point has been requested with LHS initialization, reducing requested initial random set to {self.initial_training}", - typeMsg="i", - ) + print(f"--> Baseline point has been requested with LHS initialization, reducing requested initial random set to {self.initial_training}",typeMsg="i",) """ Initialization @@ -1545,12 +1543,8 @@ def plot( if plotoptimization_results: # Most current state of the optimization_results.out self.optimization_results.read() - if "logFile" in self.__dict__.keys(): - logFile = self.logFile - else: - logFile = None self.optimization_results.plot( - fn=fn, doNotShow=True, log=logFile, tab_color=tab_color + fn=fn, doNotShow=True, log=self.timings_file, tab_color=tab_color ) """ @@ -1600,11 +1594,16 @@ def plotAcquisitionOptimizationSummary(self, fn=None, step_from=0, step_to=-1): # Plot acquisition evolution for i in range(len(infoOPT)-1): #no cleanup stage y_acq = infoOPT[i]['info']['acq_evaluated'].cpu().numpy() - ax.plot(y_acq,'-o', c=colors[i], markersize=1, lw = 0.5, label=f'{infoOPT[i]["method"]} (max of batch)') + + if len(y_acq.shape)>1: + for j in range(y_acq.shape[1]): + ax.plot(y_acq[:,j],'-o', c=colors[i], markersize=0.5, lw = 0.3, label=f'{infoOPT[i]["method"]} (candidate #{j})') + else: + ax.plot(y_acq,'-o', c=colors[i], markersize=1, lw = 0.5, label=f'{infoOPT[i]["method"]}') # Plot max of guesses if len(y_acq)>0: - ax.axhline(y=y_acq[0], c=colors[i], ls='--', lw=1.0, label=f'{infoOPT[i]["method"]} (max of guesses)') + ax.axhline(y=y_acq.max(axis=1)[0], c=colors[i], ls='--', lw=1.0, label=f'{infoOPT[i]["method"]} (max of guesses)') ax.set_title(f'BO Step #{step}') ax.set_ylabel('$f_{acq}$ (to max)') @@ -1614,6 +1613,7 @@ def plotAcquisitionOptimizationSummary(self, fn=None, step_from=0, step_to=-1): GRAPHICStools.addDenseAxis(ax) + def plotModelStatus( self, fn=None, boStep=-1, plotsPerFigure=20, stds=2, tab_color=None ): @@ -1696,9 +1696,7 @@ def plotSurrogateOptimization(self, fig1=None, fig2=None, boStep=-1): info, boundsRaw = step.InfoOptimization, step.bounds bounds = torch.Tensor([boundsRaw[b] for b in boundsRaw]) - boundsThis = ( - info[0]["bounds"].cpu().numpy().transpose(1, 0) if "bounds" in info[0] else None - ) + boundsThis = info[0]["bounds"].cpu().numpy().transpose(1, 0) if "bounds" in info[0] else None # ---------------------------------------------------------------------- # Prep figures @@ -1845,13 +1843,19 @@ def max_val(maximum_value_orig, maximum_value_is_rel, res_base): def stopping_criteria_default(mitim_bo, parameters = {}): + + print('\n') + print('--------------------------------------------------') + print('Convergence criteria') + print('--------------------------------------------------') + # ------------------------------------------------------------------------------------ # Determine the stopping criteria # ------------------------------------------------------------------------------------ maximum_value_is_rel = parameters["maximum_value_is_rel"] maximum_value_orig = parameters["maximum_value"] - minimum_dvs_variation = parameters["minimum_dvs_variation"] + minimum_inputs_variation = parameters["minimum_inputs_variation"] res_base = -mitim_bo.BOmetrics["overall"]["Residual"][0].item() @@ -1861,8 +1865,8 @@ def stopping_criteria_default(mitim_bo, parameters = {}): # Stopping criteria # ------------------------------------------------------------------------------------ - if minimum_dvs_variation is not None: - converged_by_dvs, yvals = stopping_criteria_by_dvs(mitim_bo, minimum_dvs_variation) + if minimum_inputs_variation is not None: + converged_by_dvs, yvals = stopping_criteria_by_dvs(mitim_bo, minimum_inputs_variation) else: converged_by_dvs = False yvals = None @@ -1874,7 +1878,7 @@ def stopping_criteria_default(mitim_bo, parameters = {}): yvals = None converged = converged_by_value or converged_by_dvs - + return converged, yvals def stopping_criteria_by_value(mitim_bo, maximum_value): @@ -1893,28 +1897,28 @@ def stopping_criteria_by_value(mitim_bo, maximum_value): return criterion_is_met, -yvals -def stopping_criteria_by_dvs(mitim_bo, minimum_dvs_variation): +def stopping_criteria_by_dvs(mitim_bo, minimum_inputs_variation): print("\t- Checking DV variations...") _, yG_max = TESTtools.DVdistanceMetric(mitim_bo.train_X) criterion_is_met = ( mitim_bo.currentIteration - >= minimum_dvs_variation[0] - + minimum_dvs_variation[1] + >= minimum_inputs_variation[0] + + minimum_inputs_variation[1] ) - for i in range(int(minimum_dvs_variation[1])): + for i in range(int(minimum_inputs_variation[1])): criterion_is_met = criterion_is_met and ( - yG_max[-1 - i] < minimum_dvs_variation[2] + yG_max[-1 - i] < minimum_inputs_variation[2] ) if criterion_is_met: print( - f"\t\t* DVs varied by less than {minimum_dvs_variation[2]}% compared to the rest of individuals for the past {int(minimum_dvs_variation[1])} iterations" + f"\t\t* DVs varied by less than {minimum_inputs_variation[2]}% compared to the rest of individuals for the past {int(minimum_inputs_variation[1])} iterations" ) else: print( - f"\t\t* DVs have varied by more than {minimum_dvs_variation[2]}% compared to the rest of individuals for the past {int(minimum_dvs_variation[1])} iterations" + f"\t\t* DVs have varied by more than {minimum_inputs_variation[2]}% compared to the rest of individuals for the past {int(minimum_inputs_variation[1])} iterations" ) return criterion_is_met, yG_max @@ -1964,6 +1968,7 @@ def clean_state(folder): aux.optimization_options['convergence_options']['stopping_criteria'] = PORTALStools.stopping_criteria_portals aux.folderOutputs = folder / "Outputs" + aux.timings_file = aux.folderOutputs / "timing.jsonl" aux.save() diff --git a/src/mitim_tools/opt_tools/SURROGATEtools.py b/src/mitim_tools/opt_tools/SURROGATEtools.py index 23f4e0a9..1aa726e9 100644 --- a/src/mitim_tools/opt_tools/SURROGATEtools.py +++ b/src/mitim_tools/opt_tools/SURROGATEtools.py @@ -102,9 +102,7 @@ def __init__( # Points to be added from file if ("extrapointsFile" in self.surrogate_options) and (self.surrogate_options["extrapointsFile"] is not None) and (self.output is not None) and (self.output in self.surrogate_options["extrapointsModels"]): - print( - f"\t* Requested extension of training set by points in file {self.surrogate_options['extrapointsFile']}" - ) + print(f"\t* Requested extension of training set by points in file {self.surrogate_options['extrapointsFile']}") df = pd.read_csv(self.surrogate_options["extrapointsFile"]) df_model = df[df['Model'] == self.output] @@ -114,6 +112,7 @@ def __init__( continueAdding = False else: continueAdding = True + print(f"\t\t- Found {len(df_model)} points for this output in the file, adding them to the training set", typeMsg="i") else: continueAdding = False @@ -299,6 +298,11 @@ def _select_transition_physics_based_params(self, ): self.surrogate_transformation_variables = None if ("surrogate_transformation_variables_alltimes" in self.surrogate_parameters) and (self.surrogate_parameters["surrogate_transformation_variables_alltimes"] is not None): + # Make sure that I can read both int or str as keys to surrogate_transformation_variables_alltimes + # Change the dictionary keys to be always integers + if not isinstance(list(self.surrogate_parameters["surrogate_transformation_variables_alltimes"].keys())[0], int): + self.surrogate_parameters["surrogate_transformation_variables_alltimes"] = {int(k): v for k, v in self.surrogate_parameters["surrogate_transformation_variables_alltimes"].items()} + transition_position = list(self.surrogate_parameters["surrogate_transformation_variables_alltimes"].keys())[ np.where(self.num_training_points < np.array(list(self.surrogate_parameters["surrogate_transformation_variables_alltimes"].keys())))[0][0]] @@ -334,17 +338,13 @@ def normalization_pass( outcome_transform_normalization._is_trained = torch.tensor(True) def fit(self): - print( - f"\t- Fitting model to {self.train_X.shape[0]+self.train_X_added.shape[0]} points" - ) + print(f"\t- Fitting model to {self.train_X.shape[0]+self.train_X_added.shape[0]} points") # --------------------------------------------------------------------------------------------------- # Define loss Function to minimize # --------------------------------------------------------------------------------------------------- - mll = gpytorch.mlls.ExactMarginalLogLikelihood( - self.gpmodel.likelihood, self.gpmodel - ) + mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.gpmodel.likelihood, self.gpmodel) # --------------------------------------------------------------------------------------------------- # Prepare for training @@ -397,17 +397,11 @@ def perform_model_fit(self, mll): (train_x,) = mll.model.train_inputs approx_mll = len(train_x) > 2000 if approx_mll: - print( - f"\t* Using approximate MLL because x has {len(train_x)} elements", - ) + print(f"\t* Using approximate MLL because x has {len(train_x)} elements") # -------------------------------------------------- # Store first MLL value - track_fval = [ - -mll.forward(mll.model(*mll.model.train_inputs), mll.model.train_targets) - .detach() - .item() - ] + track_fval = [-mll.forward(mll.model(*mll.model.train_inputs), mll.model.train_targets).detach().item()] def callback(x, y, mll=mll): track_fval.append(y.fval) @@ -447,13 +441,19 @@ def predict(self, X, produceFundamental=False, nSamples=None): - Samples if nSamples not None """ - # Fast - # with gpytorch.settings.fast_computations(), gpytorch.settings.fast_pred_samples(), \ - # gpytorch.settings.fast_pred_var(), gpytorch.settings.lazily_evaluate_kernels(): + # Accurate # with gpytorch.settings.fast_computations(log_prob=False, solves=False, covar_root_decomposition=False), \ # gpytorch.settings.eval_cg_tolerance(1E-6), gpytorch.settings.fast_pred_samples(state=False), gpytorch.settings.num_trace_samples(0): + # # Fast + # with gpytorch.settings.fast_computations(), \ + # gpytorch.settings.fast_pred_samples(), \ + # gpytorch.settings.fast_pred_var(), \ + # gpytorch.settings.lazily_evaluate_kernels(True), \ + # (fundamental_model_context(self) if produceFundamental else contextlib.nullcontext(self)) as surrogate_model: + # posterior = surrogate_model.gpmodel.posterior(X) + with ( fundamental_model_context(self) if produceFundamental diff --git a/src/mitim_tools/opt_tools/optimizers/BOTORCHoptim.py b/src/mitim_tools/opt_tools/optimizers/botorch_tools.py similarity index 97% rename from src/mitim_tools/opt_tools/optimizers/BOTORCHoptim.py rename to src/mitim_tools/opt_tools/optimizers/botorch_tools.py index efe757e3..4e8f6b6c 100644 --- a/src/mitim_tools/opt_tools/optimizers/BOTORCHoptim.py +++ b/src/mitim_tools/opt_tools/optimizers/botorch_tools.py @@ -68,7 +68,7 @@ def __call__(self, x, *args, **kwargs): seq_message = f'({"sequential" if sequential_q else "joint"}) ' if q>1 else '' print(f"\t\t- Optimizing using optimize_acqf: {q = } {seq_message}, {num_restarts = }, {raw_samples = }") - with IOtools.timer(name = "\n\t- Optimization", name_timer = '\t\t- Time: '): + with IOtools.timer(name = "\n\t- Optimization"): x_opt, _ = botorch.optim.optimize_acqf( acq_function=fun_opt, bounds=fun.bounds_mod, diff --git a/src/mitim_tools/opt_tools/optimizers/GAtools.py b/src/mitim_tools/opt_tools/optimizers/evolutionary.py similarity index 100% rename from src/mitim_tools/opt_tools/optimizers/GAtools.py rename to src/mitim_tools/opt_tools/optimizers/evolutionary.py diff --git a/src/mitim_tools/opt_tools/optimizers/ROOTtools.py b/src/mitim_tools/opt_tools/optimizers/multivariate.py similarity index 84% rename from src/mitim_tools/opt_tools/optimizers/ROOTtools.py rename to src/mitim_tools/opt_tools/optimizers/multivariate.py index 5c6fee9a..b468e90e 100644 --- a/src/mitim_tools/opt_tools/optimizers/ROOTtools.py +++ b/src/mitim_tools/opt_tools/optimizers/multivariate.py @@ -2,7 +2,7 @@ import copy import numpy as np from mitim_tools.misc_tools.LOGtools import printMsg as print -from mitim_tools.opt_tools.optimizers import optim +from mitim_tools.opt_tools.optimizers import multivariate_tools from mitim_tools.opt_tools.utils import TESTtools from IPython import embed @@ -29,7 +29,7 @@ def optimize_function(fun, optimization_params = {}, writeTrajectory=False, meth 'solver': optimization_params.get("solver","lm"), 'write_trajectory': writeTrajectory } - solver_fun = optim.scipy_root + solver_fun = multivariate_tools.scipy_root numZ = 5 elif method == "sr": @@ -37,14 +37,13 @@ def optimize_function(fun, optimization_params = {}, writeTrajectory=False, meth print("\t- Implementation of simple relaxation method") solver_options = { - "tol": optimization_params.get("tol",-1e-6), "tol_rel": optimization_params.get("relative_improvement_for_stopping",1e-4), - "maxiter": optimization_params.get("maxiter",2000), + "maxiter": optimization_params.get("maxiter",1000), "relax": optimization_params.get("relax",0.1), "relax_dyn": optimization_params.get("relax_dyn",True), - "print_each": optimization_params.get("maxiter",2000)//20, + "print_each": optimization_params.get("maxiter",1000)//20, } - solver_fun = optim.simple_relaxation + solver_fun = multivariate_tools.simple_relaxation numZ = 6 # -------------------------------------------------------------------------------------------------------- @@ -56,24 +55,13 @@ def flux_residual_evaluator(X, y_history=None, x_history=None, metric_history=No # Evaluate source term yOut, y1, y2, _ = fun.evaluators["residual_function"](X, outputComponents=True) - # ----------------------------------------- - # Post-process - # ----------------------------------------- - - # Best in batch - best_candidate = yOut.argmax().item() - # Only pass the best candidate - yRes = (y2-y1)[best_candidate, :].detach() - yMetric = yOut[best_candidate].detach() - Xpass = X[best_candidate, :].detach() - # Store values if metric_history is not None: - metric_history.append(yMetric) + metric_history.append(yOut.detach()) if x_history is not None: - x_history.append(Xpass) + x_history.append(X.detach()) if y_history is not None: - y_history.append(yRes) + y_history.append((y2-y1).detach()) return y1, y2, yOut @@ -104,7 +92,7 @@ def flux_residual_evaluator(X, y_history=None, x_history=None, metric_history=No # -------------------------------------------------------------------------------------------------------- print("************************************************************************************************") - x_res, _, _, acq_evaluated = solver_fun(flux_residual_evaluator,xGuesses,solver_options=solver_options,bounds=bounds) + x_res, y_history, x_history, acq_evaluated = solver_fun(flux_residual_evaluator,xGuesses,solver_options=solver_options,bounds=bounds) print("************************************************************************************************") # -------------------------------------------------------------------------------------------------------- diff --git a/src/mitim_tools/opt_tools/optimizers/optim.py b/src/mitim_tools/opt_tools/optimizers/multivariate_tools.py similarity index 62% rename from src/mitim_tools/opt_tools/optimizers/optim.py rename to src/mitim_tools/opt_tools/optimizers/multivariate_tools.py index f8ca9d3c..bba219ba 100644 --- a/src/mitim_tools/opt_tools/optimizers/optim.py +++ b/src/mitim_tools/opt_tools/optimizers/multivariate_tools.py @@ -1,8 +1,10 @@ +from operator import index import torch import copy import numpy as np +import matplotlib.pyplot as plt from scipy.optimize import root -from mitim_tools.misc_tools import IOtools +from mitim_tools.misc_tools import GRAPHICStools, IOtools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed @@ -17,10 +19,10 @@ def scipy_root(flux_residual_evaluator, x_initial, bounds=None, solver_options=N - flux_residual_evaluator is a function that: - Takes X (batches,dimX) - Provides Y1: transport (batches,dimY), Y2: target (batches,dimY) and M: maximization metric (batches,1) - It must also take optional arguments, to capture the best in the batch: + It must also take optional arguments, to capture the evolution of the batch: x_history - y_history () - metric_history (to maximize, similar to acquisition definition, must be 1D, best in batch) + y_history + metric_history (to maximize, similar to acquisition definition) Outputs: - Optium vector x_sol with (batches,dimX) and the trajectory of the acquisition function evaluations (best per batch) Notes: @@ -127,7 +129,7 @@ def function_for_optimizer(x, dfT1=torch.zeros(1).to(x_initial)): # Perform optimization # -------------------------------------------------------------------------------------------------------- - with IOtools.timer(name="\t- SCIPY.ROOT multi-variate root finding method"): + with IOtools.timer(name="SCIPY.ROOT multi-variate root finding method"): sol = root(function_for_optimizer, x_initial0, jac=jac_ad, method=solver, tol=tol, options=algorithm_options) # -------------------------------------------------------------------------------------------------------- @@ -171,37 +173,46 @@ def function_for_optimizer(x, dfT1=torch.zeros(1).to(x_initial)): return x_best, y_history, x_history, metric_history - # -------------------------------------------------------------------------------------------------------- # Ready to go optimization tool: Simple Relax # -------------------------------------------------------------------------------------------------------- -def simple_relaxation( flux_residual_evaluator, x_initial, bounds=None, solver_options=None ): +def simple_relaxation( flux_residual_evaluator, x_initial, bounds=None, solver_options=None, debug=False ): """ See scipy_root for the inputs and outputs """ - tol = solver_options.get("tol", -1e-6) # Tolerance for the residual (negative because I want to maximize) - tol_rel = solver_options.get("tol_rel", None) # Relative tolerance for the residual (superseeds tol) + # ******************************************************************************************** + # Solver options + # ******************************************************************************************** + + tol = solver_options.get("tol", -1e-6) # Tolerance for the residual (negative because I want to maximize) + tol_rel = solver_options.get("tol_rel", None) # Relative tolerance for the residual (superseeds tol) maxiter = solver_options.get("maxiter", 1e5) - relax = solver_options.get("relax", 0.1) # Defines relationship between flux_residual_evaluator and gradient - dx_max = solver_options.get("dx_max", 0.1) # Maximum step size in gradient, relative (e.g. a/Lx can only increase by 10% each time) - dx_max_abs = solver_options.get("dx_max_abs", None) # Maximum step size in gradient, absolute (e.g. a/Lx can only increase by 0.1 each time) - dx_min_abs = solver_options.get("dx_min_abs", None) # Minimum step size in gradient, absolute (e.g. a/Lx must at least increase by 0.01 each time) + relax0 = solver_options.get("relax", 0.1) # Defines relationship between flux_residual_evaluator and gradient + dx_max = solver_options.get("dx_max", 0.1) # Maximum step size in gradient, relative (e.g. a/Lx can only increase by 10% each time) + dx_max_abs = solver_options.get("dx_max_abs", None) # Maximum step size in gradient, absolute (e.g. a/Lx can only increase by 0.1 each time) + dx_min_abs = solver_options.get("dx_min_abs", 1E-5) # Minimum step size in gradient, absolute (e.g. a/Lx must at least increase by 0.01 each time) relax_dyn = solver_options.get("relax_dyn", False) # Dynamic relax, decreases relax if residual is not decreasing relax_dyn_decrease = solver_options.get("relax_dyn_decrease", 5) # Decrease relax by this factor - relax_dyn_num = solver_options.get("relax_dyn_num", 100) # Number of iterations to average over - relax_dyn_tol = solver_options.get("relax_dyn_tol", 1e-4) # Tolerance to consider that the residual is not decreasing + relax_dyn_num = solver_options.get("relax_dyn_num", 100) # Number of iterations to average over and check if the residual is decreasing print_each = solver_options.get("print_each", 1e2) - write_trajectory = solver_options.get("write_trajectory", True) - x_history, y_history, metric_history = [], [], [] - + thr_bounds = 1e-4 # To avoid being exactly in the bounds (relative -> 0.01%) + # ******************************************************************************************** + # Initial condition + # ******************************************************************************************** + + # Convert relax to tensor of the same dimensions as x, such that it can be dynamically changed per channel + relax = torch.ones_like(x_initial) * relax0 + + x_history, y_history, metric_history = [], [], [] + x = copy.deepcopy(x_initial) Q, QT, M = flux_residual_evaluator( x, @@ -209,64 +220,87 @@ def simple_relaxation( flux_residual_evaluator, x_initial, bounds=None, solver_o x_history = x_history if write_trajectory else None, metric_history = metric_history if write_trajectory else None ) - print(f"\t* Starting residual: {(Q-QT).abs().mean(axis=1)[0].item():.4e}, will run {int(maxiter)-1} more evaluations, printing every {print_each} iteration:",typeMsg="i") + + print(f"\t* Starting best-candidate residual: {(Q-QT).abs().mean(axis=1).min().item():.4e}, will run {int(maxiter)-1} more evaluations, printing every {print_each} iteration",typeMsg="i") if tol_rel is not None: tol = tol_rel * M.max().item() print(f"\t* Relative tolerance of {tol_rel:.1e} will be used, resulting in an absolute tolerance of {tol:.1e}") + print(f"\t* Flux-grad relationship of {relax0} and maximum gradient jump of {dx_max}") - print(f"\t* Flux-grad relationship of {relax*100.0:.1f}% and maximum gradient jump of {dx_max*100.0:.1f}%,{f' to achieve residual of {tol:.1e}' if tol is not None else ''} in maximum of {maxiter:.0f} iterations") + # ******************************************************************************************** + # Iterative strategy + # ******************************************************************************************** - # Convert relax to tensor of the same dimensions as x, such that it can be dynamically changed per channel - relax = torch.ones_like(x) * relax - - its_since_last_dyn_relax = 0 - i = 0 + hardbreak = False + relax_history, step_history = [], [] + its_since_last_dyn_relax, i = 0, 0 + for i in range(int(maxiter) - 1): - # -------------------------------------------------------------------------------------------------------- - # Iterative Strategy - # -------------------------------------------------------------------------------------------------------- - x_new = _simple_relax_iteration(x, Q, QT, relax, dx_max, dx_max_abs = dx_max_abs, dx_min_abs = dx_min_abs) - - # Clamp to bounds - if bounds is not None: - bb = bounds[1,:]-bounds[0,:] - x_new = x_new.clamp(min=bounds[0,:]+thr_bounds*bb, max=bounds[1,:]-thr_bounds*bb) + # Make a step in the gradient direction + x_new, x_step = _sr_step( + x, + Q, + QT, + relax, + dx_max, + dx_max_abs=dx_max_abs, + dx_min_abs=dx_min_abs, + bounds=bounds, + thr_bounds=thr_bounds + ) + # Make it the new point x = x_new.clone() - # -------------------------------------------------------------------------------------------------------- + # Evaluate new residual Q, QT, M = flux_residual_evaluator( x, - y_history = y_history if write_trajectory else None, - x_history = x_history if write_trajectory else None, - metric_history = metric_history - ) + y_history=y_history if write_trajectory else None, + x_history=x_history if write_trajectory else None, + metric_history=metric_history if write_trajectory else None + ) # Best metric of the batch - metric_best = M.max(axis=-1)[0].item() + metric_best = M.max().item() if (i + 1) % int(print_each) == 0: - print(f"\t\t- Metric (to maximize) @{i+1}: {metric_best:.2e}") + print(f"\t\t- Best metric (to maximize) @{i+1}: {metric_best:.2e}") # Stopping based on the best of the batch based on the metric - if tol is not None and M.max().item() > tol: - print(f"\t\t- Metric (to maximize) @{i+1}: {metric_best:.2e}",typeMsg="i") + if (tol is not None) and (M.max().item() > tol): print(f"\t* Converged in {i+1} iterations with metric of {metric_best:.2e} > {tol:.2e}",typeMsg="i") break - if relax_dyn and (i-its_since_last_dyn_relax > relax_dyn_num): - relax, changed, hardbreak = _dynamic_relaxation(relax, relax_dyn_decrease, metric_history, relax_dyn_num, relax_dyn_tol,i+1) - if changed: - its_since_last_dyn_relax = i - if hardbreak: - break + # Update the dynamic relax if needed + if relax_dyn: + relax, its_since_last_dyn_relax, hardbreak = _dynamic_relax( + x_history, + y_history, + relax, + relax_dyn_decrease, + relax_dyn_num, + i, + its_since_last_dyn_relax + ) + + # For debugging + if debug: + step_history.append(x_step.detach().clone()) + relax_history.append(relax.clone()) + + if hardbreak: + break if i == int(maxiter) - 2: print(f"\t* Did not converge in {maxiter} iterations",typeMsg="i") + # ******************************************************************************************** + # Debugging, storing and plotting + # ******************************************************************************************** + if write_trajectory: try: y_history = torch.stack(y_history) @@ -278,49 +312,68 @@ def simple_relaxation( flux_residual_evaluator, x_initial, bounds=None, solver_o x_history = torch.Tensor(x_history) try: metric_history = torch.stack(metric_history) - except(TypeError,RuntimeError): + except (TypeError,RuntimeError): metric_history = torch.Tensor(metric_history) else: y_history, x_history, metric_history = torch.Tensor(), torch.Tensor(), torch.Tensor() - index_best = metric_history.argmax() - print(f"\t* Best metric: {metric_history[index_best].mean().item():.2e} at iteration {index_best}",typeMsg="i") + if debug: + + relax_history = torch.stack(relax_history) + step_history = torch.stack(step_history) + + for candidate in range(x_history.shape[1]): + + fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 10), sharex=True) + + axs = axs.flatten() + + x = x_history[:,candidate,:].cpu().numpy() + y = y_history[:,candidate,:].cpu().numpy() + r = relax_history[:,candidate,:].cpu().numpy() + m = metric_history[:,candidate].cpu().numpy() + s = step_history[:,candidate,:].cpu().numpy() + + colors = GRAPHICStools.listColors()[:x.shape[-1]] + + xvals = np.arange(x.shape[0]) + plot_ranges = range(x.shape[1]) + + for k in plot_ranges: + axs[0].plot(xvals, x[:,k], '-o', markersize=0.5, lw=1.0, label=f"x{k}", color=colors[k]) + axs[1].plot(xvals, y[:,k], '-o', markersize=0.5, lw=1.0,color=colors[k]) + axs[2].plot(xvals[1:r.shape[0]+1], r[:,k], '-o', markersize=0.5, lw=1.0,color=colors[k]) + axs[3].plot(xvals[1:r.shape[0]+1], s[:,k], '-o', markersize=0.5, lw=1.0,color=colors[k]) + axs[5].plot(xvals, m, '-o', markersize=0.5, lw=1.0) + + for i in range(len(axs)): + GRAPHICStools.addDenseAxis(axs[i]) + axs[i].set_xlabel("Iteration") + + axs[0].set_title("x history"); axs[0].legend() + axs[1].set_title("y history") + axs[2].set_title("Relax history"); axs[2].set_yscale('log') + axs[3].set_title("Step history") + axs[5].set_title("Metric history") + + plt.tight_layout() + + plt.show() + + embed() + + # Find the best iteration of each candidate trajectory + index_bests = metric_history.argmax(dim=0) + x_best = x_history[index_bests, torch.arange(x_history.shape[1]), :] - # The best candidate, regardless of the restarts - x_best = x_history[index_best,:].unsqueeze(0) + idx_flat = metric_history.argmax() + index_best = divmod(idx_flat.item(), metric_history.shape[1]) + print(f"\t* Best metric: {metric_history[index_best].item():.2e} at iteration {index_best[0]} for candidate in position {index_best[1]}",typeMsg="i") return x_best, y_history, x_history, metric_history - -def _dynamic_relaxation(relax, relax_dyn_decrease, metric_history, relax_dyn_num, relax_dyn_tol, it, min_relax=1e-6): - ''' - Logic: If the metric is not improving enough, decrease the relax parameter. To determine - if the metric is improving enough, I will fit a line to the last relax_dyn_num points and - check if the slope is small enough. If it is, I will decrease the relax parameter. - ''' - - metric_history_considered = torch.Tensor(metric_history[-relax_dyn_num:]) - - # Linear fit to the time series - x = np.arange(len(metric_history_considered)) - y = metric_history_considered - slope, intercept = np.polyfit(x, y, 1) - metric0 = intercept - metric1 = slope * len(metric_history_considered) + intercept - change_in_metric = metric1 - metric0 - - if (change_in_metric < relax_dyn_tol): - if relax.all() > min_relax: - print(f"\t\t\t<> Metric not improving enough (@{it}), decreasing relax from {relax.max():.1e} to {relax.max()/relax_dyn_decrease:.1e}") - relax = relax / relax_dyn_decrease - return relax, True, False - else: - print(f"\t\t\t<> Metric not improving enough (@{it}), relax already at minimum of {min_relax:.1e}, not worth continuing", typeMsg="i") - return relax, False, True - else: - return relax, False, False - -def _simple_relax_iteration(x, Q, QT, relax, dx_max, dx_max_abs = None, dx_min_abs = None, threshold_zero_flux_issue=1e-10): +def _sr_step(x, Q, QT, relax, dx_max, dx_max_abs = None, dx_min_abs = None, threshold_zero_flux_issue=1e-10, bounds=None, thr_bounds=1e-4): + # Calculate step in gradient (if target > transport, dx>0 because I want to increase gradients) dx = relax * (QT - Q) / (Q**2 + QT**2).clamp(min=threshold_zero_flux_issue) ** 0.5 @@ -336,6 +389,7 @@ def _simple_relax_iteration(x, Q, QT, relax, dx_max, dx_max_abs = None, dx_min_a ix = x_step.abs() > dx_max_abs direction = torch.nan_to_num(x_step[ix] / x_step[ix].abs(), nan=1.0) x_step[ix] = dx_max_abs * direction + if dx_min_abs is not None: ix = x_step.abs() < dx_min_abs direction = torch.nan_to_num(x_step[ix] / x_step[ix].abs(), nan=1.0) @@ -344,7 +398,103 @@ def _simple_relax_iteration(x, Q, QT, relax, dx_max, dx_max_abs = None, dx_min_a # Update x_new = x + x_step - return x_new + # Clamp to bounds + if bounds is not None: + thr_bounds_abs = ( bounds[1,:] - bounds[0,:]) * thr_bounds + x_new = x_new.clamp(min=bounds[0,:]+thr_bounds_abs, max=bounds[1,:]-thr_bounds_abs) + + return x_new, x_step + +def _dynamic_relax(x, y, relax, relax_dyn_decrease, relax_dyn_num, iteration_num, iteration_applied): + + min_relax = 1e-6 + + if iteration_num - iteration_applied > relax_dyn_num: + + mask_reduction = _check_oscillation(torch.stack(x), relax_dyn_num) + + if mask_reduction.any(): + + if (relax < min_relax).all(): + print(f"\t\t\t<> Oscillatory behavior detected (@{iteration_num}), all relax already at minimum of {min_relax:.1e}, not worth continuing", typeMsg="i") + return relax, iteration_applied, True + + print(f"\t\t\t<> Oscillatory behavior detected (@{iteration_num}), decreasing relax for {mask_reduction.sum()} out of {torch.stack(x).shape[1]*torch.stack(x).shape[2]} channels") + + relax[mask_reduction] = relax[mask_reduction] / relax_dyn_decrease + + print(f"\t\t\t\t- New relax values span from {relax.min():.1e} to {relax.max():.1e}") + + iteration_applied = iteration_num + + return relax, iteration_applied, False + +def _check_oscillation(signal_raw, relax_dyn_num): + + """Check for oscillations using FFT to detect dominant frequencies""" + + # Stack batch dimension (time, batch, dim) -> (time, batch*dim) + signal = signal_raw.reshape(signal_raw.shape[0], -1) + + oscillating_dims = torch.zeros(signal.shape[1:], dtype=torch.bool) + + # fig, axs = plt.subplots(nrows=2, figsize=(6, 6)) + # colors = GRAPHICStools.listColors() + + for i in range(signal.shape[1]): + + iterations_to_consider = relax_dyn_num + + # Only consider a number of last iterations + y_vals = signal[-iterations_to_consider:, i].cpu().numpy() + + # If the signal is not constant + if y_vals.std() > 0.0: + + # Remove DC component and apply FFT + y_detrended = y_vals - np.mean(y_vals) + fft_vals = np.fft.fft(y_detrended) + power_spectrum = np.abs(fft_vals[1:len(fft_vals)//2+1]) # Exclude DC and negative frequencies + + # Check if there's a dominant frequency + excl = 2 + p_around = 1 + argmax_power = np.argmax(power_spectrum[excl:]) # Exclude lowest frequencies + max_power = np.sum(power_spectrum[(argmax_power+excl) - p_around:(argmax_power+excl) + p_around]) + total_power = np.sum(power_spectrum) + + # If a single frequency dominates (30%), it might be oscillating (even if low frequency) + single_frequency_power = max_power / total_power + single_frequency_dominance = bool(single_frequency_power > 0.3) + + # If more than 50% of the power comes from high frequencies (>1/3), consider it oscillating + index_high_freq = len(power_spectrum) // 3 + high_frequency_power = np.sum(power_spectrum[index_high_freq:]) / total_power + high_frequency_dominance = bool(high_frequency_power > 0.5) + + # if signal completely flat, it's an indication that has hit the bounds, also consider it oscillating + signal_flat = bool(y_vals.std() < 1e-6) + + # If the signal is constant, consider it non-oscillating but flat + else: + single_frequency_dominance = False + high_frequency_dominance = False + signal_flat = True + + oscillating_dims[i] = single_frequency_dominance or high_frequency_dominance or signal_flat + + + # Back to the original shape + oscillating_dims = oscillating_dims.reshape(signal_raw.shape[1:]) + + # axs[0].plot(y_vals, color=colors[i], ls='-' if oscillating_dims[i] else '--') + # axs[1].plot(power_spectrum/max_power, label = f"{single_frequency_power:.3f}, {high_frequency_power:.3f}, {y_vals.std():.1e}", color=colors[i], ls='-' if oscillating_dims[i] else '--') + # axs[1].legend(loc='best',prop={'size': 6}) + # plt.show() + + return oscillating_dims + + ''' ********************************************************************************************************************************** diff --git a/src/mitim_tools/opt_tools/scripts/evaluate_model.py b/src/mitim_tools/opt_tools/scripts/evaluate_model.py index 4b5ac2fa..95c7392d 100644 --- a/src/mitim_tools/opt_tools/scripts/evaluate_model.py +++ b/src/mitim_tools/opt_tools/scripts/evaluate_model.py @@ -2,7 +2,7 @@ import argparse import numpy as np import matplotlib.pyplot as plt -from mitim_tools.misc_tools import IOtools +from mitim_tools.misc_tools import IOtools, GRAPHICStools from mitim_tools.opt_tools import STRATEGYtools """ @@ -10,8 +10,8 @@ This way, you can try plot, re-ft, find best parameters, etc. It calculates speed, and generates profile file to look at bottlenecks e.g. - evaluate_model.py --folder run1/ --output QiTurb_5 --input aLti_5 --around -3 - evaluate_model.py --folder run1/ --step -1 --output QiTurb_5 --file figure.eps + evaluate_model.py --folder run1/ --output Qi_tr_turb_5 --inputs aLti_5 --around -3 + evaluate_model.py --folder run1/ --step -1 --output Qi_tr_turb_5 --file figure.eps """ # ***************** Inputs @@ -19,8 +19,8 @@ parser = argparse.ArgumentParser() parser.add_argument("--folder", required=True, type=str) parser.add_argument("--step", type=int, required=False, default=-1) -parser.add_argument("--output", required=False, type=str, default="QiTurb_1") -parser.add_argument("--input", required=False, type=str, default="aLti_1") +parser.add_argument("--output", required=False, type=str, default="Qi_tr_turb_1") +parser.add_argument("--inputs", required=False, type=str,nargs='*', default=["aLti_1"]) parser.add_argument("--around", type=int, required=False, default=-1) parser.add_argument("--xrange", type=float, required=False, default=0.5) parser.add_argument("--file", type=str, required=False, default=None) # File to save .eps @@ -31,7 +31,7 @@ folderWork = IOtools.expandPath(args.folder) step_num = args.step output_label = args.output -input_label = args.input +input_labels = args.inputs file = args.file plotYN = args.plot around = args.around @@ -48,12 +48,19 @@ # ***************** Plot +cols = GRAPHICStools.listColors() + if plotYN: gp.plot() if file is not None: plt.savefig(file, transparent=True, dpi=300) - gp.localBehavior_scan(gpA.train_X[around, :], dimension_label=input_label,xrange=xrange) + fig, axs = plt.subplots(nrows=2, figsize=(6, 9)) + for i,input_label in enumerate(input_labels): + gp.localBehavior_scan(gpA.train_X[around, :], dimension_label=input_label,xrange=xrange, axs=axs, c=cols[i], label=input_label) + + axs[0].legend() + axs[0].set_title("Full behavior (untransformed space)") # gp.plot(plotFundamental=False) # gp.plotTraining() diff --git a/src/mitim_tools/opt_tools/scripts/evaluate_optimizer_root.py b/src/mitim_tools/opt_tools/scripts/evaluate_optimizer_root.py index 1d54cc79..86580247 100644 --- a/src/mitim_tools/opt_tools/scripts/evaluate_optimizer_root.py +++ b/src/mitim_tools/opt_tools/scripts/evaluate_optimizer_root.py @@ -7,7 +7,7 @@ from mitim_tools.misc_tools import IOtools, GRAPHICStools from mitim_tools.opt_tools import STRATEGYtools, OPTtools from mitim_tools.opt_tools.utils import TESTtools -from mitim_tools.opt_tools.optimizers import ROOTtools +from mitim_tools.opt_tools.optimizers import multivariate from IPython import embed """ @@ -60,7 +60,7 @@ # for opt,lab in enumerate(['vectorize=True']): #,'vectorize=False']): for opt, lab in enumerate(["x0=0"]): # ,'x0=1.0']): #,'vectorize=False']): - logi = ROOTtools.logistic(l=bounds_logi[0, :], u=bounds_logi[1, :], k=0.5, x0=0) + logi = multivariate.logistic(l=bounds_logi[0, :], u=bounds_logi[1, :], k=0.5, x0=0) # *************************************************************************************************** # OPTIMIZER # *************************************************************************************************** diff --git a/src/mitim_tools/opt_tools/scripts/evaluate_speed.py b/src/mitim_tools/opt_tools/scripts/evaluate_speed.py index bc917504..a84625d3 100644 --- a/src/mitim_tools/opt_tools/scripts/evaluate_speed.py +++ b/src/mitim_tools/opt_tools/scripts/evaluate_speed.py @@ -25,9 +25,7 @@ x = torch.rand(cases, step.train_X.shape[-1]) with IOtools.speeder(f"profiler{name}.prof") as s: - with torch.no_grad(): - mean, upper, lower, _ = step.GP["combined_model"].predict(x) + #with torch.no_grad(): + mean, upper, lower, _ = step.GP["combined_model"].predict(x) -print( - f"\nIt took {s.timeDiff:.3f}s to run {x.shape[0]:.1e} parallel evaluations (i.e. {s.timeDiff*1E6/cases:.3f}micro-s/member) of {mean.shape[-1]} GPs with {x.shape[-1]} raw input dimensions" -) +print(f"\nIt took {s.timeDiff:.3f}s to run {x.shape[0]:.1e} parallel evaluations (i.e. {s.timeDiff*1E6/cases:.3f}micro-s/member) of {mean.shape[-1]} GPs with {x.shape[-1]} raw input dimensions") diff --git a/src/mitim_tools/opt_tools/scripts/read.py b/src/mitim_tools/opt_tools/scripts/read.py index f3001a08..a5b86494 100644 --- a/src/mitim_tools/opt_tools/scripts/read.py +++ b/src/mitim_tools/opt_tools/scripts/read.py @@ -5,6 +5,8 @@ from mitim_tools.misc_tools import IOtools, GRAPHICStools from mitim_tools.opt_tools import STRATEGYtools from mitim_tools.misc_tools.LOGtools import printMsg as print +from mitim_tools.misc_tools.utils import remote_tools + # These import are usually needed if they are called within the pickling object import torch @@ -53,100 +55,16 @@ def plotCompare(folders, plotMeanMax=[True, False]): ax0 = fig.add_subplot(grid[0, 0]) ax1 = fig.add_subplot(grid[1, 0], sharex=ax0) ax2 = fig.add_subplot(grid[0, 1]) - ax3 = fig.add_subplot(grid[1, 1]) + ax3 = fig.add_subplot(grid[1, 1],sharex=ax2) ax1i = fig.add_subplot(grid[2, 0], sharex=ax0) - types_ls = [ - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - "-", - "--", - "-.", - ":", - ] - types_m = [ - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - "o", - "s", - "^", - "v", - "*", - ] + types_ls = GRAPHICStools.listLS() + types_m = GRAPHICStools.listmarkers maxEv = -np.inf yCummMeans = [] xes = [] resS = [] - logS = [] for i, (color, name, folderWork) in enumerate(zip(colors, names, folderWorks)): res = BOgraphics.optimization_results( folderWork / "Outputs" / "optimization_results.out" @@ -156,14 +74,6 @@ def plotCompare(folders, plotMeanMax=[True, False]): ) res.read() - log_class = BOgraphics.LogFile(folderWork / "Outputs" / "optimization_log.txt") - - try: - log_class.interpret() - except: - print("Could not read log", typeMsg="w") - log_class = None - plotAllmembers = len(folderWorks) <= 3 xe, yCummMean = res.plotImprovement( axs=[ax0, ax1, ax1i, None], @@ -178,32 +88,24 @@ def plotCompare(folders, plotMeanMax=[True, False]): #compared = -yCummMean[0] * conv if conv < 0 else conv #ax1.axhline(y=compared, ls="-.", lw=0.3, color=color) - if log_class is not None: - log_class.plot( - axs=[ax2, ax3], - ls=types_ls[i], - lab=name, - marker=types_m[i], - color=colors[i], - ) + IOtools.plot_timings( + folderWork / "Outputs" / "timing.jsonl", axs=[ax2, ax3], label=name, color=color + ) yCummMeans.append(yCummMean) xes.append(xe) resS.append(res) - logS.append(log_class) ax0.set_xlim([0, maxEv]) ax2.legend(prop={"size": 6}) ax3.legend(prop={"size": 6}) - return yCummMeans, xes, resS, logS, fig + return yCummMeans, xes, resS, fig def main(): - - # ----- Inputs parser = argparse.ArgumentParser() @@ -211,43 +113,49 @@ def main(): "--type", type=int, required=False, default=4 ) # 0: Only ResultsOpt plotting, 1: Also pickle, 2: Also final analysis, 3: Others parser.add_argument("folders", type=str, nargs="*") - parser.add_argument("--remote", "-r", type=str, required=False, default=None) parser.add_argument("--seeds", type=int, required=False, default=None) parser.add_argument("--resolution", type=int, required=False, default=50) parser.add_argument("--save", type=str, required=False, default=None) parser.add_argument("--conv", type=float, required=False, default=-1e-2) parser.add_argument("--its", type=int, nargs="*", required=False, default=None) + # Remote options + parser.add_argument("--remote",type=str, required=False, default=None, + help="Remote machine to retrieve the folders from. If not provided, it will read the local folders.") + parser.add_argument("--remote_folder_parent",type=str, required=False, default=None, + help="Parent folder in the remote machine where the folders are located. If not provided, it will use --remote_folders.") + parser.add_argument("--remote_folders",type=str, nargs="*", required=False, default=None, + help="List of folders in the remote machine to retrieve. If not provided, it will use the local folder structures.") + # parser.add_argument("--remote_minimal", required=False, default=False, action="store_true", + # help="If set, it will only retrieve the folder structure with a few key files") + parser.add_argument('--fix', required=False, default=False, action='store_true', + help="If set, it will fix the pkl optimization portals in the remote folders.") + args = parser.parse_args() analysis_level = args.type - folders = args.folders - remote_parent = args.remote seeds = args.seeds resolution = args.resolution save_folder = args.save conv = args.conv rangePlot = args.its -# ----------------------------------------- + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Retrieve from remote + # -------------------------------------------------------------------------------------------------------------------------------------------- - # ----- Folders (complete local path) - folders_complete = [] - for i in range(len(folders)): - if seeds is not None: - aux = [f"{folders[i]}_s{k}" for k in range(seeds)] - folders_complete.extend(aux) - else: - folders_complete.append(folders[i]) + folders = remote_tools.retrieve_remote_folders(args.folders, args.remote, args.remote_folder_parent, args.remote_folders, None) + + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Fix pkl optimization portals in remote + # -------------------------------------------------------------------------------------------------------------------------------------------- + + if args.fix: + for folder in folders: + STRATEGYtools.clean_state(folder) - txt = "***************************************************************************\n" - for i in range(len(folders_complete)): - folders_complete[i] = IOtools.expandPath(folders_complete[i]) - folders_complete[i].mkdir(parents=True, exist_ok=True) - txt += f"* Reading results in {folders_complete[i]}\n" - - # ----- Folders (reduced local path) - folders_reduced = [IOtools.reducePathLevel(folderWork)[-1] for folderWork in folders_complete] + + folders_complete = folders if len(folders_complete) > 1: retrieval_level = copy.deepcopy(analysis_level) @@ -255,27 +163,13 @@ def main(): else: retrieval_level = analysis_level - if remote_parent is None: - folders_remote = [None] * len(folders_complete) - else: - folders_remote = [ - f"{remote_parent}/{reduced_folder}/" - for reduced_folder in folders_reduced - ] - txt += f"\n\t...From remote folder {remote_parent}\n" - - print( - "\n" - + txt - + "***************************************************************************" - ) + print(f"(Analysis level {analysis_level})\n") if len(folders_complete) == 1: opt_fun = STRATEGYtools.opt_evaluator(folders_complete[0]) opt_fun.plot_optimization_results( analysis_level=analysis_level, - folderRemote=folders_remote[0], retrieval_level=retrieval_level, pointsEvaluateEachGPdimension=resolution, save_folder=save_folder, @@ -283,12 +177,11 @@ def main(): ) else: opt_funs = [] - for folderWork, folderRemote in zip(folders_complete, folders_remote): + for folderWork in folders_complete: opt_fun = STRATEGYtools.opt_evaluator(folderWork) try: opt_fun.plot_optimization_results( analysis_level=analysis_level, - folderRemote=folderRemote, retrieval_level=retrieval_level, save_folder=save_folder, rangesPlot=rangePlot, @@ -298,7 +191,7 @@ def main(): opt_funs.append(opt_fun) if analysis_level == -1: - yCummMeans, xes, resS, logS, fig = plotCompare( + yCummMeans, xes, resS, fig = plotCompare( folders_complete, plotMeanMax=[True, len(folders_complete) < 2] ) diff --git a/src/mitim_tools/opt_tools/scripts/slurm.py b/src/mitim_tools/opt_tools/scripts/slurm.py index 4eacb7a7..0683238c 100644 --- a/src/mitim_tools/opt_tools/scripts/slurm.py +++ b/src/mitim_tools/opt_tools/scripts/slurm.py @@ -1,5 +1,6 @@ import os from mitim_tools.misc_tools import FARMINGtools, IOtools +from IPython import embed """ This script is used to launch a slurm job with a scpecific script like... python3 run_case.py 0 --R 6.0 @@ -16,7 +17,9 @@ def run_slurm( seed_specific=0, machine="local", exclude=None, - mem=None + mem=None, + exclusive=False, + wait=False, ): folder = IOtools.expandPath(folder) @@ -38,11 +41,81 @@ def run_slurm( folder.mkdir(parents=True, exist_ok=True) command = [venv,script + (f" --seed {seed}" if seed is not None else "")] - - nameJob = f"mitim_opt_{folder.name}{extra_name}" + nameJob = f"mitim_{folder.name}{extra_name}" _, fileSBATCH, _ = FARMINGtools.create_slurm_execution_files( command, + folder, + folder_local=folder, + slurm={"partition": partition, 'exclude': exclude,'exclusive': exclusive}, + slurm_settings = { + 'name': nameJob, + 'minutes': int(60 * hours), + 'ntasks': 1, + 'cpuspertask': n, + 'memory_req_by_job': mem + } + ) + + if wait == True: + print('* Waiting for job to complete...') + command_execution = f"sbatch --wait {fileSBATCH}" + else: + command_execution = f"sbatch {fileSBATCH}" + + if machine == "local": + os.system(command_execution) + else: + FARMINGtools.perform_quick_remote_execution( + folder, + machine, + command_execution, + input_files=[fileSBATCH], + job_name = nameJob, + ) + + +def run_slurm_array( + script, + array_input, + folder, + partition, + max_concurrent_jobs, + venv = '', + seeds=None, # If not None, assume that the script is able to receive --seeds # + hours=8, + n=32, + seed_specific=0, + machine="local", + exclude=None, + mem=None, +): + + folder = IOtools.expandPath(folder) + + if seeds is not None: + seeds_explore = [seed_specific] if seeds == 1 else list(range(seeds)) + else: + seeds_explore = [None] + + for seed in seeds_explore: + + extra_name = "" if (seed is None or seeds == 1) else f"_s{seed}" + + folder = IOtools.expandPath(folder) + folder = folder.with_name(folder.name + extra_name) + + print(f"* Launching slurm job of MITIM optimization with random seed = {seed}") + + folder.mkdir(parents=True, exist_ok=True) + + command = ['echo $SLURM_ARRAY_TASK_ID', venv, script + ' $SLURM_ARRAY_TASK_ID'+ (f" --seed {seed}" if seed is not None else "")] + string_of_array_input = ','.join([str(i) for i in array_input]) + + nameJob = f"mitim_{folder.name}{extra_name}" + + _, fileSBATCH, _ = FARMINGtools.create_slurm_execution_files( + command=command, folder_remote=folder, folder_local=folder, nameJob=nameJob, @@ -51,6 +124,8 @@ def run_slurm( ntasks=1, cpuspertask=n, memory_req_by_job=mem, + job_array=f'{string_of_array_input}%{max_concurrent_jobs}', + ) command_execution = f"sbatch {fileSBATCH}" diff --git a/src/mitim_tools/opt_tools/utils/BOgraphics.py b/src/mitim_tools/opt_tools/utils/BOgraphics.py index 98969f6b..bd32b55c 100644 --- a/src/mitim_tools/opt_tools/utils/BOgraphics.py +++ b/src/mitim_tools/opt_tools/utils/BOgraphics.py @@ -763,7 +763,7 @@ def ev(X): def localBehavior_scan_surrogate_model( - self, x, numP=50, dimension_label=None, plotYN=True, axs=None, c="b", xrange=0.5 + self, x, numP=50, dimension_label=None, plotYN=True, axs=None, c="b", xrange=0.5, label='' ): """ This works only for individual models @@ -801,15 +801,17 @@ def ev(X): xlabel = xlabels[x_dim_chosen] ax = axs[0] - ax.plot(Jx, Y, "-o", color=c, lw=1.0) + ax.plot(Jx, Y, "-o", color=c, lw=1.0, label=label, markersize=3) ax.set_xlabel(xlabel) ax.set_ylabel(f"{self.output}") + GRAPHICStools.addDenseAxis(ax) ax = axs[1] - ax.plot(Jx, J, "-o", color=c, lw=1.0) + ax.plot(Jx, J, "-o", color=c, lw=1.0, label=label, markersize=3) ax.set_xlabel(xlabel) ax.set_ylabel(f"d({self.output})/d({xlabel})") ax.set_title("Scan of local Jacobian") + GRAPHICStools.addDenseAxis(ax) # ---------------------------------------------------------------------------------------------------- @@ -858,12 +860,7 @@ def retrieveResults( res.read() # ---------------- Read Logger - log = LogFile(folderWork / "Outputs" / "optimization_log.txt") - try: - log.interpret() - except: - print("Could not read log", typeMsg="w") - log = None + timings_file = folderWork / "Outputs" / "timings.json" # ---------------- Read Tabular if analysis_level >= 0: @@ -884,7 +881,6 @@ def retrieveResults( # ------------------- mitim_model.optimization_results = res - mitim_model.logFile = log if plotFN is not None: fn = mitim_model.plot( doNotShow=doNotShow, @@ -896,244 +892,10 @@ def retrieveResults( # If no pickle, plot only the contents of optimization_results else: if plotFN: - fn = res.plot(doNotShow=doNotShow, log=log, fn = plotFN) + fn = res.plot(doNotShow=doNotShow, log=timings_file, fn = plotFN) mitim_model = None - return fn, res, mitim_model, log, data_df - - - -class LogFile: - def __init__(self, file): - self.file = file - - def activate(self, writeAlsoTerminal=True): - sys.stdout = LOGtools.Logger( - logFile=self.file, writeAlsoTerminal=writeAlsoTerminal - ) - - branch, commit_hash = IOtools.get_git_info(__mitimroot__) - print(f"Log file from MITIM version {mitim_version} from {branch} branch and commit hash {commit_hash}") - - def interpret(self): - with open(self.file, "r") as f: - lines = f.readlines() - - self.steps = {} - for line in lines: - if "Starting MITIM Optimization" in line: - try: - self.steps["start"] = IOtools.getTimeFromString( - line.split(",")[0].strip() - ) - except: - self.steps["start"] = IOtools.getTimeFromString( - " ".join(line.split(",")[0].strip().split()[-2:]) - ) - self.steps["steps"] = {} - if "MITIM Step" in line: - aft = line.split("Step")[-1] - ikey = int(aft.split()[0]) - time_str = aft.split("(")[-1].split(")")[0] - self.steps["steps"][ikey] = { - "start": IOtools.getTimeFromString(time_str), - "optimization": {}, - } - if "Posterior Optimization" in line: - time_str = line.split(",")[-1][:-2].strip() - self.steps["steps"][ikey]["optimization"] = { - "start": IOtools.getTimeFromString(time_str), - "steps": {}, - } - cont = 0 - if "Optimization stage " in line: - aft = line.split("Step")[-1] - time_str = aft.split("(")[-1].split(")")[0] - self.steps["steps"][ikey]["optimization"]["steps"][cont] = { - "name": line.split()[4], - "start": IOtools.getTimeFromString(time_str), - } - cont += 1 - - self.process() - - def process(self): - for step in self.steps["steps"]: - time_start = self.steps["steps"][step]["start"] - - if "start" not in self.steps["steps"][step]["optimization"]: - break - time_end = self.steps["steps"][step]["optimization"]["start"] - timeF = IOtools.getTimeDifference( - time_start, newTime=time_end, niceText=False - ) - self.steps["steps"][step]["fitting"] = timeF - - if step + 1 in self.steps["steps"]: - time_end = self.steps["steps"][step + 1]["start"] - time = IOtools.getTimeDifference( - time_start, newTime=time_end, niceText=False - ) - self.steps["steps"][step]["time_s"] = time - - for opt_step in self.steps["steps"][step]["optimization"]["steps"]: - time_start = self.steps["steps"][step]["optimization"]["steps"][ - opt_step - ]["start"] - - if opt_step + 1 in self.steps["steps"][step]["optimization"]["steps"]: - time_end = self.steps["steps"][step]["optimization"]["steps"][ - opt_step + 1 - ]["start"] - time = IOtools.getTimeDifference( - time_start, newTime=time_end, niceText=False - ) - self.steps["steps"][step]["optimization"]["steps"][opt_step][ - "time_s" - ] = time - else: - if step + 1 in self.steps["steps"]: - time_end = time_end = self.steps["steps"][step + 1]["start"] - time = IOtools.getTimeDifference( - time_start, newTime=time_end, niceText=False - ) - self.steps["steps"][step]["optimization"]["steps"][opt_step][ - "time_s" - ] = time - - self.points = [ - 0, - IOtools.getTimeDifference( - self.steps["start"], - newTime=self.steps["steps"][0]["start"], - niceText=False, - ), - ] - self.types = ["b"] - - for step in self.steps["steps"]: - if "fitting" in self.steps["steps"][step]: - self.points.append( - self.steps["steps"][step]["fitting"] + self.points[-1] - ) - self.types.append("r") - - if "steps" not in self.steps["steps"][step]["optimization"]: - break - for opt_step in self.steps["steps"][step]["optimization"]["steps"]: - if ( - "time_s" - in self.steps["steps"][step]["optimization"]["steps"][opt_step] - ): - self.points.append( - self.steps["steps"][step]["optimization"]["steps"][opt_step][ - "time_s" - ] - + self.points[-1] - ) - self.types.append("g") - - self.points = np.array(self.points) - - self.its = np.linspace(0, len(self.points) - 1, len(self.points)) - - def plot( - self, - axs=None, - factor=60.0, - fullCumulative=False, - ls="-", - lab="", - marker="o", - color="b", - ): - if axs is None: - plt.ion() - fig, axs = plt.subplots(ncols=2) - - ax = axs[0] - subtractor = 0 - totals = {"ini": 0.0, "fit": 0.0, "opt": 0.0} - - for i in range(len(self.points) - 1): - if self.types[i] == "r": - ax.axvline(x=self.its[i], ls="--", c="k", lw=0.5) - if not fullCumulative: - subtractor = self.points[i] - - ps = [ - (self.points[i] - subtractor) / factor, - (self.points[i + 1] - subtractor) / factor, - ] - - if self.types[i] == "b": - totals["ini"] += ps[1] - ps[0] - if self.types[i] == "g": - totals["opt"] += ps[1] - ps[0] - if self.types[i] == "r": - totals["fit"] += ps[1] - ps[0] - - if i == 0: - labb = lab - else: - labb = "" - - ax.plot( - [self.its[i], self.its[i + 1]], - ps, - marker + ls, - c=self.types[i], - label=labb, - ) - - if factor == 60.0: - label = "minutes" - ax.axhline(y=60, ls="-.", lw=0.2) - elif factor == 3600.0: - label = "hours" - ax.axhline(y=1, ls="-.", lw=0.2) - else: - label = "seconds" - - # ax.set_xlabel('Workflow Steps') - ax.set_ylabel(f"Cumulated Time ({label})") - # ax.set_xlim(left=0) - ax.set_ylim(bottom=0) - - from matplotlib.lines import Line2D - - custom_lines = [ - Line2D([0], [0], color="b", lw=2), - Line2D([0], [0], color="r", lw=2), - Line2D([0], [0], color="g", lw=2), - ] - - legs = [ - "Initialization + Evaluation", - "Evaluation + Fitting", - "Optimization", - "Total", - ] - ax.legend(custom_lines, legs) - - ax = axs[1] - ax.bar( - legs, - [ - totals["ini"], - totals["fit"], - totals["opt"], - totals["ini"] + totals["fit"] + totals["opt"], - ], - 1 / 3, - alpha=0.5, - label=lab, - color=color, - ) # , label=equil_names[i],color=colors[i],align='edge') - - # ax.set_xlabel('Workflow') - ax.set_ylabel(f"Cumulated Time ({label})") - + return fn, res, mitim_model, data_df class optimization_data: def __init__( @@ -1659,8 +1421,10 @@ def plot( fig4 = self.fn.add_figure(label="Improvement", tab_color=tab_color) if log is not None: figTimes = self.fn.add_figure(label="Times", tab_color=tab_color) - grid = plt.GridSpec(1, 2, hspace=0.3, wspace=0.3) - axsTimes = [figTimes.add_subplot(grid[0]), figTimes.add_subplot(grid[1])] + grid = plt.GridSpec(2, 1, hspace=0.3, wspace=0.3) + axx0 = figTimes.add_subplot(grid[0]) + axx1 = figTimes.add_subplot(grid[1], sharex=axx0) + axsTimes = [axx0, axx1] _ = self.plotComplete( fig=fig1, @@ -1693,7 +1457,7 @@ def plot( _, _ = self.plotImprovement(axs=[ax0, ax1, ax2, ax3]) if log is not None: - log.plot(axs=[axsTimes[0], axsTimes[1]]) + IOtools.plot_timings(log, axs = [axsTimes[0], axsTimes[1]]) return self.fn diff --git a/src/mitim_tools/opt_tools/utils/EVALUATORtools.py b/src/mitim_tools/opt_tools/utils/EVALUATORtools.py index 2a3bde11..9a19b6f1 100644 --- a/src/mitim_tools/opt_tools/utils/EVALUATORtools.py +++ b/src/mitim_tools/opt_tools/utils/EVALUATORtools.py @@ -37,7 +37,6 @@ def parallel_main(Params, cont): lock=lock, ) - def fun( optimization_object, x, @@ -130,15 +129,10 @@ def mitimRun( y, yE, _ = optimization_data.grab_data_point(x) if pd.Series(y).isna().any() or pd.Series(yE).isna().any(): - print( - f"--> Reading Tabular file failed or not evaluated yet for element {numEval}", - typeMsg="w", - ) + print(f"--> Reading Tabular file failed or not evaluated yet for element {numEval}",typeMsg="i",) cold_start = True else: - print( - f"--> Reading Tabular file successful for element {numEval}", - ) + print(f"--> Reading Tabular file successful for element {numEval}",) if cold_start: # Create folder @@ -161,11 +155,7 @@ def mitimRun( try: y, yE = IOtools.readresults(resultsfile) except: - print( - "Could not read results file for {0}, printing error file to screen".format( - numEval - ) - ) + print(f"Could not read results file for {numEval}, printing error file to screen") # print(error) y, yE = np.array([np.nan]), np.array([np.nan]) diff --git a/src/mitim_tools/opt_tools/utils/SBOcorrections.py b/src/mitim_tools/opt_tools/utils/SBOcorrections.py index c97f4e03..0acd91b1 100644 --- a/src/mitim_tools/opt_tools/utils/SBOcorrections.py +++ b/src/mitim_tools/opt_tools/utils/SBOcorrections.py @@ -520,24 +520,12 @@ def updateMetrics(self, evaluatedPoints=1, IsThisAFreshIteration=True, position= Yvar = torch.from_numpy(self.train_Ystd).to(self.dfT) ** 2 if "steps" in self.__dict__: - self.BOmetrics["overall"]["Residual"] = ( - -self.steps[-1].evaluators["objective"](Y).detach().cpu().numpy() - ) - self.BOmetrics["overall"]["ResidualModeledLast"] = ( - -self.steps[-1].evaluators["residual_function"](X).detach().cpu().numpy() - ) - + self.BOmetrics["overall"]["Residual"] = -self.steps[-1].evaluators["objective"](Y).detach().cpu().numpy() + self.BOmetrics["overall"]["ResidualModeledLast"] = -self.steps[-1].evaluators["residual_function"](X).detach().cpu().numpy() else: - print( - "\t~ Cannot perform prediction at this iteration step, returning objective evaluation as acquisition", - typeMsg="w", - ) - self.BOmetrics["overall"]["Residual"] = ( - -self.scalarized_objective(Y)[2].detach().cpu().numpy() - ) - self.BOmetrics["overall"]["ResidualModeledLast"] = self.BOmetrics["overall"][ - "Residual" - ] + print("\t~ Cannot perform prediction at this iteration step, returning objective evaluation as acquisition",typeMsg="i",) + self.BOmetrics["overall"]["Residual"] = -self.scalarized_objective(Y)[2].detach().cpu().numpy() + self.BOmetrics["overall"]["ResidualModeledLast"] = self.BOmetrics["overall"]["Residual"] resi = self.BOmetrics["overall"]["Residual"] resiM = self.BOmetrics["overall"]["ResidualModeledLast"] @@ -547,50 +535,29 @@ def updateMetrics(self, evaluatedPoints=1, IsThisAFreshIteration=True, position= self.BOmetrics["overall"]["indBest"] = np.nanargmin(resi, axis=0) self.BOmetrics["overall"]["indBestModel"] = np.nanargmin(resiM, axis=0) - self.BOmetrics["overall"]["xBest"] = ( - X[self.BOmetrics["overall"]["indBest"], :].detach().cpu() - ) - self.BOmetrics["overall"]["yBest"] = ( - Y[self.BOmetrics["overall"]["indBest"], :].detach().cpu() - ) - self.BOmetrics["overall"]["yVarBest"] = ( - Yvar[self.BOmetrics["overall"]["indBest"], :].detach().cpu() - ) + self.BOmetrics["overall"]["xBest"] = X[self.BOmetrics["overall"]["indBest"], :].detach().cpu() + self.BOmetrics["overall"]["yBest"] = Y[self.BOmetrics["overall"]["indBest"], :].detach().cpu() + self.BOmetrics["overall"]["yVarBest"] = Yvar[self.BOmetrics["overall"]["indBest"], :].detach().cpu() # Best from last iteration zA = np.nanmin(resi[-evaluatedPoints:], axis=0) - self.BOmetrics["overall"]["indBestLast"] = np.nanargmin( - resi[-evaluatedPoints:], axis=0 - ) + self.BOmetrics["overall"]["indBestLast"] = np.nanargmin(resi[-evaluatedPoints:], axis=0) zM = np.nanmin(resiM[-evaluatedPoints:], axis=0) - self.BOmetrics["overall"]["indBestModelLast"] = np.nanargmin( - resiM[-evaluatedPoints:], axis=0 - ) + self.BOmetrics["overall"]["indBestModelLast"] = np.nanargmin(resiM[-evaluatedPoints:], axis=0) - self.BOmetrics["overall"]["xBestLast"] = X[ - self.BOmetrics["overall"]["indBestLast"], : - ] - self.BOmetrics["overall"]["yBestLast"] = Y[ - self.BOmetrics["overall"]["indBestLast"], : - ] + self.BOmetrics["overall"]["xBestLast"] = X[self.BOmetrics["overall"]["indBestLast"], :] + self.BOmetrics["overall"]["yBestLast"] = Y[self.BOmetrics["overall"]["indBestLast"], :] # Metric tracking of previous iterations - if ( - len(self.BOmetrics["overall"]["ResidualModeledLast"]) - == self.Originalinitial_training - ): + if len(self.BOmetrics["overall"]["ResidualModeledLast"]) == self.Originalinitial_training: ratio, metric = np.inf, 0.0 label = "\t(Initial batch only)" else: zA_prev = np.nanmin(resi[:-evaluatedPoints], axis=0) - self.BOmetrics["overall"]["indBestExceptLast"] = np.nanargmin( - resi[:-evaluatedPoints], axis=0 - ) + self.BOmetrics["overall"]["indBestExceptLast"] = np.nanargmin(resi[:-evaluatedPoints], axis=0) zM_prev = np.nanmin(resiM[:-evaluatedPoints], axis=0) - self.BOmetrics["overall"]["indBestModelExceptLast"] = np.nanargmin( - resiM[:-evaluatedPoints], axis=0 - ) + self.BOmetrics["overall"]["indBestModelExceptLast"] = np.nanargmin(resiM[:-evaluatedPoints], axis=0) ratio, metric, label = constructMetricsTR(zA, zM, zA_prev, zM_prev) @@ -609,9 +576,7 @@ def updateMetrics(self, evaluatedPoints=1, IsThisAFreshIteration=True, position= elif IsThisAFreshIteration: print(f"- BO fitness ratio = {ratio:.3f}, metric = {metric} ({label})") else: - print( - "- Note that this one was not fresh (e.g. post-correction), do not track metrics" - ) + print("- Note that this one was not fresh (e.g. post-correction), do not track metrics") print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") # ------------------------------------------------------------------------------------------------------------ @@ -630,13 +595,9 @@ def updateMetrics(self, evaluatedPoints=1, IsThisAFreshIteration=True, position= self.BOmetrics["xBest_track"][position] = self.BOmetrics["overall"]["xBest"] self.BOmetrics["yBest_track"][position] = self.BOmetrics["overall"]["yBest"] - self.BOmetrics["yVarBest_track"][position] = self.BOmetrics["overall"][ - "yVarBest" - ] + self.BOmetrics["yVarBest_track"][position] = self.BOmetrics["overall"]["yVarBest"] - self.BOmetrics["BOmetric_it"][position] = ( - X.shape[0] - 1 - ) # Evaluation position until now + self.BOmetrics["BOmetric_it"][position] = X.shape[0] - 1 # Evaluation position until now def constructMetricsTR(zA, zM, zA_prev, zM_prev): @@ -660,9 +621,7 @@ def constructMetricsTR(zA, zM, zA_prev, zM_prev): """ - label = "Evaluated previous = {2:.2e} --> Predicted new = {1:.2e} --> Evaluated new = {0:.2e}; ".format( - zA, zM, zA_prev, zM_prev - ) + label = f"Evaluated previous = {zA_prev:.2e} --> Predicted new = {zM:.2e} --> Evaluated new = {zA:.2e}; " # Relative improvements (Positive if it has gotten better = lower residual) zA = (zA_prev - zA) / np.abs(zA_prev) @@ -753,26 +712,10 @@ def plotTrustRegionInformation(self, fig=None): ratio = ratio[:lim] - metrics = np.array( - [self.BOmetrics["BOmetric"][i] for i in self.BOmetrics["BOmetric_it"]] - )[ - :lim - ] # self.BOmetrics['BOmetric']#[1:] - boundsX1 = np.array( - [self.BOmetrics["BoundsStorage"][i] for i in self.BOmetrics["BOmetric_it"]] - )[ - :lim - ] # self.BOmetrics['BoundsStorage'] - operat = np.array( - [self.BOmetrics["TRoperation"][i] for i in self.BOmetrics["BOmetric_it"]] - )[ - :lim - ] # self.BOmetrics['TRoperation'] #[1:] - evaluations = np.array( - [self.BOmetrics["BOmetric_it"][i] for i in self.BOmetrics["BOmetric_it"]] - )[ - :lim - ] # [1:] + metrics = np.array([self.BOmetrics["BOmetric"][i] for i in self.BOmetrics["BOmetric_it"]])[:lim] # self.BOmetrics['BOmetric']#[1:] + boundsX1 = np.array([self.BOmetrics["BoundsStorage"][i] for i in self.BOmetrics["BOmetric_it"]])[:lim] # self.BOmetrics['BoundsStorage'] + operat = np.array([self.BOmetrics["TRoperation"][i] for i in self.BOmetrics["BOmetric_it"]])[:lim] # self.BOmetrics['TRoperation'] #[1:] + evaluations = np.array([self.BOmetrics["BOmetric_it"][i] for i in self.BOmetrics["BOmetric_it"]])[:lim] # [1:] size, center = [], [] va = list(boundsX1[0].keys())[0] diff --git a/src/mitim_tools/plasmastate_tools/MITIMstate.py b/src/mitim_tools/plasmastate_tools/MITIMstate.py new file mode 100644 index 00000000..28d0c65e --- /dev/null +++ b/src/mitim_tools/plasmastate_tools/MITIMstate.py @@ -0,0 +1,3107 @@ +import copy +import torch +import csv +import numpy as np +import matplotlib.pyplot as plt +from mitim_tools.misc_tools import GRAPHICStools, MATHtools, PLASMAtools, IOtools +from mitim_modules.powertorch.utils import CALCtools +from mitim_tools.gacode_tools.utils import GACODEdefaults +from mitim_tools.plasmastate_tools.utils import state_plotting +from mitim_tools.misc_tools.LOGtools import printMsg as print +from mitim_tools import __version__ +from IPython import embed + +from mitim_tools.misc_tools.PLASMAtools import md_u + +def ensure_variables_existence(self): + # --------------------------------------------------------------------------- + # Determine minimal set of variables that should be present in the profiles + # --------------------------------------------------------------------------- + + # Kinetics + required_profiles = { + "te(keV)": 1, + "ti(keV)": 2, + "ne(10^19/m^3)": 1, + "ni(10^19/m^3)": 2, + "w0(rad/s)": 1, + "ptot(Pa)": 1, + "z_eff(-)": 1, + } + + # Electromagnetics + required_profiles.update({ + "q(-)": 1, + "polflux(Wb/radian)": 1, + "johm(MA/m^2)": 1, + "jbs(MA/m^2)": 1, + "jbstor(MA/m^2)": 1, + }) + + # Geometry + required_profiles.update({ + "rho(-)": 1, + "rmin(m)": 1, + "rmaj(m)": 1, + "zmag(m)": 1, + "kappa(-)": 1, + "delta(-)": 1, + "zeta(-)": 1, + }) + + # Sources and Sinks + required_profiles.update({ + "qohme(MW/m^3)": 1, + "qei(MW/m^3)": 1, + "qbeame(MW/m^3)": 1, + "qbeami(MW/m^3)": 1, + "qrfe(MW/m^3)": 1, + "qrfi(MW/m^3)": 1, + "qfuse(MW/m^3)": 1, + "qfusi(MW/m^3)": 1, + "qsync(MW/m^3)": 1, + "qbrem(MW/m^3)": 1, + "qline(MW/m^3)": 1, + "qpar_beam(1/m^3/s)": 1, + "qpar_wall(1/m^3/s)": 1, + "qmom(N/m^2)": 1, + }) + + # --------------------------------------------------------------------------- + # Insert zeros in those cases whose column are not there + # --------------------------------------------------------------------------- + + # Choose a template for dimensionality + template_key_1d = "rho(-)" + + # Ensure required keys exist + for key, dim in required_profiles.items(): + if key not in self.profiles: + self.profiles[key] = copy.deepcopy(self.profiles[template_key_1d]) * 0.0 if dim == 1 else np.atleast_2d(copy.deepcopy(self.profiles[template_key_1d]) * 0.0).T + + +''' +The mitim_state class is the base class for manipulating plasma states in MITIM. +Any class that inherits from this class should implement the methods: + + - derive_quantities: to derive quantities from the plasma state (must at least define "r" and call the derive_quantities_base method). + + - derive_geometry: to derive the geometry of the plasma state. + + - write_state: to write the plasma state to a file. + + - plot_geometry: to plot the geometry of the plasma state. + +''' + +class mitim_state: + ''' + Class to manipulate the plasma state in MITIM. + ''' + + def __init__(self, type_file = 'input.gacode'): + + self.type = type_file + + @classmethod + def scratch(cls, profiles, label_header='', **kwargs_process): + # Method to write a scratch file + + instance = cls(None) + + # Header + instance.header = f''' +# Created from scratch with MITIM version {__version__} +# {label_header} +# +''' + # Add data to profiles + instance.profiles = profiles + + instance.derive_quantities(**kwargs_process) + + return instance + + @IOtools.hook_method(before=ensure_variables_existence) + def derive_quantities_base(self, mi_ref=None, derive_quantities=True, rederiveGeometry=True): + + # Make sure the profiles have the required dimensions + if len(self.profiles["ni(10^19/m^3)"].shape) == 1: + self.profiles["ni(10^19/m^3)"] = self.profiles["ni(10^19/m^3)"].reshape(-1, 1) + self.profiles["ti(keV)"] = self.profiles["ti(keV)"].reshape(-1, 1) + + # ------------------------------------- + self.readSpecies() + self.mi_first = self.Species[0]["A"] + self.DTplasma() + self.sumFast() + # ------------------------------------- + + if "derived" not in self.__dict__: + self.derived = {} + + if mi_ref is not None: + self.derived["mi_ref"] = mi_ref + print(f"\t* Reference mass ({self.derived['mi_ref']}) to use was forced by class initialization",typeMsg="w") + else: + self.derived["mi_ref"] = md_u #2.0 #md_u #self.mi_first + print(f"\t* Reference mass ({self.derived['mi_ref']}) from Deuterium, as convention in gacode",typeMsg="i") + + # Useful to have gradients in the basic ---------------------------------------------------------- + self.derived["aLTe"] = aLT(self.derived["r"], self.profiles["te(keV)"]) + self.derived["aLne"] = aLT(self.derived["r"], self.profiles["ne(10^19/m^3)"]) + + self.derived["aLTi"] = self.profiles["ti(keV)"] * 0.0 + self.derived["aLni"] = [] + for i in range(self.profiles["ti(keV)"].shape[1]): + self.derived["aLTi"][:, i] = aLT(self.derived["r"], self.profiles["ti(keV)"][:, i]) + self.derived["aLni"].append(aLT(self.derived["r"], self.profiles["ni(10^19/m^3)"][:, i])) + self.derived["aLni"] = np.transpose(np.array(self.derived["aLni"])) + # ------------------------------------------------------------------------------------------------ + + if derive_quantities: + + # Avoid division by zero warning by using np.errstate + with np.errstate(divide='ignore', invalid='ignore'): + self.derive_quantities_full(rederiveGeometry=rederiveGeometry) + + def write_state(self, file=None): + print("\t- Writting input.gacode file") + + if file is None: + file = self.files[0] + + with open(file, "w") as f: + for line in self.header: + f.write(line) + + for i in self.profiles: + if "(" not in i: + f.write(f"# {i}\n") + else: + f.write(f"# {i.split('(')[0]} | {i.split('(')[-1].split(')')[0]}\n") + + if i in self.titles_single: + listWrite = self.profiles[i] + + if IOtools.isnum(listWrite[0]): + listWrite = [f"{i:.7e}".rjust(14) for i in listWrite] + f.write(f"{''.join(listWrite)}\n") + else: + f.write(f"{' '.join(listWrite)}\n") + + else: + if len(self.profiles[i].shape) == 1: + for j, val in enumerate(self.profiles[i]): + pos = f"{j + 1}".rjust(3) + valt = f"{round(val,99):.7e}".rjust(15) + f.write(f"{pos}{valt}\n") + else: + for j, val in enumerate(self.profiles[i]): + pos = f"{j + 1}".rjust(3) + txt = "".join([f"{k:.7e}".rjust(15) for k in val]) + f.write(f"{pos}{txt}\n") + + print(f"\t\t~ File {IOtools.clipstr(file)} written") + + # Update file + self.files[0] = file + + # ************************************************************************************************************************************************ + # Derivation methods that children classes should implement + # ************************************************************************************************************************************************ + + def derive_quantities(self, *args, **kwargs): + raise Exception('[MITIM] derive_quantities method is not implemented in the base class (to define "r"). Please use a derived class that implements it.') + + def derive_geometry(self, *args, **kwargs): + raise Exception('[MITIM] This method is not implemented in the base class. Please use a derived class that implements it.') + + def plot_geometry(self, *args, **kwargs): + print('[MITIM] Method plot_geometry() is not implemented in the base class. Please use a derived class that implements it.') + pass + + # ************************************************************************************************************************************************ + # Derivation methods + # ************************************************************************************************************************************************ + + def calculate_Er( + self, + folder, + rhos=None, + vgenOptions={}, + name="vgen1", + includeAll=False, + write_new_file=None, + cold_start=False, + ): + profiles = copy.deepcopy(self) + + # Resolution? + resol_changed = False + if rhos is not None: + profiles.changeResolution(rho_new=rhos) + resol_changed = True + + from mitim_tools.gacode_tools import NEOtools + self.neo = NEOtools.NEO() + self.neo.prep(profiles, folder) + self.neo.run_vgen(subfolder=name, vgenOptions=vgenOptions, cold_start=cold_start) + + profiles_new = copy.deepcopy(self.neo.inputgacode_vgen) + if resol_changed: + profiles_new.changeResolution(rho_new=self.profiles["rho(-)"]) + + # Get the information from the NEO run + + variables = ["w0(rad/s)"] + if includeAll: + variables += [ + "vpol(m/s)", + "vtor(m/s)", + "jbs(MA/m^2)", + "jbstor(MA/m^2)", + "johm(MA/m^2)", + ] + + for ikey in variables: + if ikey in profiles_new.profiles: + print(f'\t- Inserting {ikey} from NEO run{" (went back to original resolution by interpolation)" if resol_changed else ""}') + self.profiles[ikey] = profiles_new.profiles[ikey] + + self.derive_quantities() + + if write_new_file is not None: + self.write_state(file=write_new_file) + + def readSpecies(self, maxSpecies=100, correct_zeff = True): + maxSpecies = int(self.profiles["nion"][0]) + + Species = [] + for j in range(maxSpecies): + # To determine later if this specie has zero density + niT = self.profiles["ni(10^19/m^3)"][0, j] + + sp = { + "N": self.profiles["name"][j], + "Z": float(self.profiles["z"][j]), + "A": float(self.profiles["mass"][j]), + "S": self.profiles["type"][j].split("[")[-1].split("]")[0], + "n0": niT, + } + + Species.append(sp) + + self.Species = Species + + # Correct Zeff if needed + if correct_zeff: + self.correct_zeff_array() + + def correct_zeff_array(self): + + self.profiles["z_eff(-)"] = np.sum(self.profiles["ni(10^19/m^3)"] * self.profiles["z"] ** 2, axis=1) / self.profiles["ne(10^19/m^3)"] + + def sumFast(self): + self.nFast = self.profiles["ne(10^19/m^3)"] * 0.0 + self.nZFast = self.profiles["ne(10^19/m^3)"] * 0.0 + self.nThermal = self.profiles["ne(10^19/m^3)"] * 0.0 + self.nZThermal = self.profiles["ne(10^19/m^3)"] * 0.0 + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "fast": + self.nFast += self.profiles["ni(10^19/m^3)"][:, sp] + self.nZFast += ( + self.profiles["ni(10^19/m^3)"][:, sp] * self.profiles["z"][sp] + ) + else: + self.nThermal += self.profiles["ni(10^19/m^3)"][:, sp] + self.nZThermal += self.profiles["ni(10^19/m^3)"][:, sp] * self.profiles["z"][sp] + + def derive_quantities_full(self, mi_ref=None, rederiveGeometry=True): + """ + deriving geometry is expensive, so if I'm just updating profiles it may not be needed + """ + + if "derived" not in self.__dict__: + self.derived = {} + + # --------------------------------------------------------------------------------------------------------------------- + # --------- MAIN (useful for STATEtools) + # --------------------------------------------------------------------------------------------------------------------- + + self.derived["a"] = self.derived["r"][-1] + # self.derived['epsX'] = self.profiles['rmaj(m)'] / self.profiles['rmin(m)'] + # self.derived['eps'] = self.derived['epsX'][-1] + self.derived["eps"] = self.derived["r"][-1] / self.profiles["rmaj(m)"][-1] + + self.derived["roa"] = self.derived["r"] / self.derived["a"] + self.derived["Rmajoa"] = self.profiles["rmaj(m)"] / self.derived["a"] + self.derived["Zmagoa"] = self.profiles["zmag(m)"] / self.derived["a"] + + self.derived["torflux"] = float(self.profiles["torfluxa(Wb/radian)"][0])* 2* np.pi* self.profiles["rho(-)"] ** 2 # Wb + self.derived["B_unit"] = PLASMAtools.Bunit(self.derived["torflux"], self.derived["r"]) + + self.derived["psi_pol_n"] = ( + self.profiles["polflux(Wb/radian)"] - self.profiles["polflux(Wb/radian)"][0] + ) / ( + self.profiles["polflux(Wb/radian)"][-1] + - self.profiles["polflux(Wb/radian)"][0] + ) + self.derived["rho_pol"] = self.derived["psi_pol_n"] ** 0.5 + + self.derived["q95"] = np.interp(0.95, self.derived["psi_pol_n"], self.profiles["q(-)"]) + + self.derived["q0"] = self.profiles["q(-)"][0] + + if self.profiles["q(-)"].min() > 1.0: + self.derived["rho_saw"] = np.nan + else: + self.derived["rho_saw"] = np.interp(1.0, self.profiles["q(-)"], self.profiles["rho(-)"]) + + # --------- Geometry (only if it doesn't exist or if I ask to recalculate) + + if rederiveGeometry or ("volp_geo" not in self.derived): + self.derive_geometry() + + # -------------------------------------------------------------------------- + # Reference mass + # -------------------------------------------------------------------------- + + # Forcing mass from this specific derive_quantities call + if mi_ref is not None: + self.derived["mi_ref"] = mi_ref + print(f'\t- Using mi_ref={self.derived["mi_ref"]} provided in this particular derive_quantities method, subtituting initialization one',typeMsg='i') + + # --------------------------------------------------------------------------------------------------------------------- + # --------- Important for scaling laws + # --------------------------------------------------------------------------------------------------------------------- + + self.derived["Rgeo"] = float(self.profiles["rcentr(m)"][-1]) + self.derived["B0"] = np.abs(float(self.profiles["bcentr(T)"][-1])) + + # --------------------------------------------------------------------------------------------------------------------- + + + self.derived["c_s"] = PLASMAtools.c_s(self.profiles["te(keV)"], self.derived["mi_ref"]) + self.derived["rho_s"] = PLASMAtools.rho_s(self.profiles["te(keV)"], self.derived["mi_ref"], self.derived["B_unit"]) + self.derived["rho_sa"] = self.derived["rho_s"] / self.derived["a"] + + self.derived["q_gb"], self.derived["g_gb"], self.derived["pi_gb"], self.derived["s_gb"], _ = PLASMAtools.gyrobohmUnits( + self.profiles["te(keV)"], + self.profiles["ne(10^19/m^3)"] * 1e-1, + self.derived["mi_ref"], + np.abs(self.derived["B_unit"]), + self.derived["r"][-1], + ) + + """ + In prgen_map_plasmastate: + qspow_e = expro_qohme+expro_qbeame+expro_qrfe+expro_qfuse-expro_qei & + -expro_qsync-expro_qbrem-expro_qline + qspow_i = expro_qbeami+expro_qrfi+expro_qfusi+expro_qei + """ + + qe_terms = { + "qohme(MW/m^3)": 1, + "qbeame(MW/m^3)": 1, + "qrfe(MW/m^3)": 1, + "qfuse(MW/m^3)": 1, + "qei(MW/m^3)": -1, + "qsync(MW/m^3)": -1, + "qbrem(MW/m^3)": -1, + "qline(MW/m^3)": -1, + "qione(MW/m^3)": 1, + } + + self.derived["qe"] = np.zeros(len(self.profiles["rho(-)"])) + for i in qe_terms: + if i in self.profiles: + self.derived["qe"] += qe_terms[i] * self.profiles[i] + + qrad = { + "qsync(MW/m^3)": 1, + "qbrem(MW/m^3)": 1, + "qline(MW/m^3)": 1, + } + + self.derived["qrad"] = np.zeros(len(self.profiles["rho(-)"])) + for i in qrad: + if i in self.profiles: + self.derived["qrad"] += qrad[i] * self.profiles[i] + + qi_terms = { + "qbeami(MW/m^3)": 1, + "qrfi(MW/m^3)": 1, + "qfusi(MW/m^3)": 1, + "qei(MW/m^3)": 1, + "qioni(MW/m^3)": 1, + } + + self.derived["qi"] = np.zeros(len(self.profiles["rho(-)"])) + for i in qi_terms: + if i in self.profiles: + self.derived["qi"] += qi_terms[i] * self.profiles[i] + + # Depends on GACODE version + ge_terms = {"qpar_beam(1/m^3/s)": 1, "qpar_wall(1/m^3/s)": 1} + + self.derived["ge"] = np.zeros(len(self.profiles["rho(-)"])) + for i in ge_terms: + if i in self.profiles: + self.derived["ge"] += ge_terms[i] * self.profiles[i] + + """ + Careful, that's in MW/m^3. I need to find the volumes. Using here the Miller + calculation. Should be consistent with TGYRO + + profiles_gen puts any missing power into the CX: qioni, qione + """ + + r = self.derived["r"] + volp = self.derived["volp_geo"] + + self.derived["qe_MW"] = CALCtools.volume_integration(self.derived["qe"], r, volp) + self.derived["qi_MW"] = CALCtools.volume_integration(self.derived["qi"], r, volp) + self.derived["ge_10E20"] = CALCtools.volume_integration(self.derived["ge"] * 1e-20, r, volp) # Because the units were #/sec/m^3 + + self.derived["geIn"] = self.derived["ge_10E20"][-1] # 1E20 particles/sec + + self.derived["qe_MWm2"] = self.derived["qe_MW"] / (volp) + self.derived["qi_MWm2"] = self.derived["qi_MW"] / (volp) + self.derived["ge_10E20m2"] = self.derived["ge_10E20"] / (volp) + + self.derived["QiQe"] = self.derived["qi_MWm2"] / np.where(self.derived["qe_MWm2"] == 0, 1e-10, self.derived["qe_MWm2"]) # to avoid division by zero + + # "Convective" flux + self.derived["ce_MW"] = PLASMAtools.convective_flux(self.profiles["te(keV)"], self.derived["ge_10E20"]) + self.derived["ce_MWm2"] = PLASMAtools.convective_flux(self.profiles["te(keV)"], self.derived["ge_10E20m2"]) + + # qmom + self.derived["mt_Jmiller"] = CALCtools.volume_integration(self.profiles["qmom(N/m^2)"], r, volp) + self.derived["mt_Jm2"] = self.derived["mt_Jmiller"] / (volp) + + # Extras for plotting in TGYRO for comparison + P = np.zeros(len(self.derived["r"])) + if "qsync(MW/m^3)" in self.profiles: + P += self.profiles["qsync(MW/m^3)"] + if "qbrem(MW/m^3)" in self.profiles: + P += self.profiles["qbrem(MW/m^3)"] + if "qline(MW/m^3)" in self.profiles: + P += self.profiles["qline(MW/m^3)"] + self.derived["qe_rad_MW"] = CALCtools.volume_integration(P, r, volp) + + P = self.profiles["qei(MW/m^3)"] + self.derived["qe_exc_MW"] = CALCtools.volume_integration(P, r, volp) + + """ + --------------------------------------------------------------------------------------------------------------------- + Note that the real auxiliary power is RF+BEAMS+OHMIC, + The QIONE is added by TGYRO, but sometimes it includes radiation and direct RF to electrons + --------------------------------------------------------------------------------------------------------------------- + """ + + # ** Electrons + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qrfe(MW/m^3)", "qohme(MW/m^3)", "qbeame(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + + self.derived["qe_auxONLY"] = copy.deepcopy(P) + self.derived["qe_auxONLY_MW"] = CALCtools.volume_integration(P, r, volp) + + for i in ["qione(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + + self.derived["qe_aux"] = copy.deepcopy(P) + self.derived["qe_aux_MW"] = CALCtools.volume_integration(P, r, volp) + + # ** Ions + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qrfi(MW/m^3)", "qbeami(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + + self.derived["qi_auxONLY"] = copy.deepcopy(P) + self.derived["qi_auxONLY_MW"] = CALCtools.volume_integration(P, r, volp) + + for i in ["qioni(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + + self.derived["qi_aux"] = copy.deepcopy(P) + self.derived["qi_aux_MW"] = CALCtools.volume_integration(P, r, volp) + + # ** General + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qohme(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + self.derived["qOhm_MW"] = CALCtools.volume_integration(P, r, volp) + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qrfe(MW/m^3)", "qrfi(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + self.derived["qRF_MW"] = CALCtools.volume_integration(P, r, volp) + if "qrfe(MW/m^3)" in self.profiles: + self.derived["qRFe_MW"] = CALCtools.volume_integration( + self.profiles["qrfe(MW/m^3)"], r, volp + ) + if "qrfi(MW/m^3)" in self.profiles: + self.derived["qRFi_MW"] = CALCtools.volume_integration( + self.profiles["qrfi(MW/m^3)"], r, volp + ) + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qbeame(MW/m^3)", "qbeami(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + self.derived["qBEAM_MW"] = CALCtools.volume_integration(P, r, volp) + + self.derived["qrad_MW"] = CALCtools.volume_integration(self.derived["qrad"], r, volp) + if "qsync(MW/m^3)" in self.profiles: + self.derived["qrad_sync_MW"] = CALCtools.volume_integration(self.profiles["qsync(MW/m^3)"], r, volp) + else: + self.derived["qrad_sync_MW"] = self.derived["qrad_MW"]*0.0 + if "qbrem(MW/m^3)" in self.profiles: + self.derived["qrad_brem_MW"] = CALCtools.volume_integration(self.profiles["qbrem(MW/m^3)"], r, volp) + else: + self.derived["qrad_brem_MW"] = self.derived["qrad_MW"]*0.0 + if "qline(MW/m^3)" in self.profiles: + self.derived["qrad_line_MW"] = CALCtools.volume_integration(self.profiles["qline(MW/m^3)"], r, volp) + else: + self.derived["qrad_line_MW"] = self.derived["qrad_MW"]*0.0 + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qfuse(MW/m^3)", "qfusi(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + self.derived["qFus_MW"] = CALCtools.volume_integration(P, r, volp) + + P = np.zeros(len(self.profiles["rho(-)"])) + for i in ["qioni(MW/m^3)", "qione(MW/m^3)"]: + if i in self.profiles: + P += self.profiles[i] + self.derived["qz_MW"] = CALCtools.volume_integration(P, r, volp) + + self.derived["q_MW"] = ( + self.derived["qe_MW"] + self.derived["qi_MW"] + ) + + # --------------------------------------------------------------------------------------------------------------------- + # --------------------------------------------------------------------------------------------------------------------- + + P = np.zeros(len(self.profiles["rho(-)"])) + if "qfuse(MW/m^3)" in self.profiles: + P = self.profiles["qfuse(MW/m^3)"] + self.derived["qe_fus_MW"] = CALCtools.volume_integration(P, r, volp) + + P = np.zeros(len(self.profiles["rho(-)"])) + if "qfusi(MW/m^3)" in self.profiles: + P = self.profiles["qfusi(MW/m^3)"] + self.derived["qi_fus_MW"] = CALCtools.volume_integration(P, r, volp) + + P = np.zeros(len(self.profiles["rho(-)"])) + if "qfusi(MW/m^3)" in self.profiles: + self.derived["q_fus"] = ( + self.profiles["qfuse(MW/m^3)"] + self.profiles["qfusi(MW/m^3)"] + ) * 5 + P = self.derived["q_fus"] + self.derived["q_fus"] = P + self.derived["q_fus_MW"] = CALCtools.volume_integration(P, r, volp) + + """ + Derivatives + """ + self.derived["aLTe"] = aLT(self.derived["r"], self.profiles["te(keV)"]) + self.derived["aLTi"] = self.profiles["ti(keV)"] * 0.0 + for i in range(self.profiles["ti(keV)"].shape[1]): + self.derived["aLTi"][:, i] = aLT( + self.derived["r"], self.profiles["ti(keV)"][:, i] + ) + self.derived["aLne"] = aLT( + self.derived["r"], self.profiles["ne(10^19/m^3)"] + ) + self.derived["aLni"] = [] + for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): + self.derived["aLni"].append( + aLT(self.derived["r"], self.profiles["ni(10^19/m^3)"][:, i]) + ) + self.derived["aLni"] = np.transpose(np.array(self.derived["aLni"])) + + if "w0(rad/s)" not in self.profiles: + self.profiles["w0(rad/s)"] = self.profiles["rho(-)"] * 0.0 + self.derived["aLw0"] = aLT(self.derived["r"], self.profiles["w0(rad/s)"]) + self.derived["dw0dr"] = -grad( + self.derived["r"], self.profiles["w0(rad/s)"] + ) + + self.derived["dqdr"] = grad(self.derived["r"], self.profiles["q(-)"]) + + """ + Other, performance + """ + qFus = self.derived["qe_fus_MW"] + self.derived["qi_fus_MW"] + self.derived["Pfus"] = qFus[-1] * 5 + + # Note that in cases with NPRAD=0 in TRANPS, this includes radiation! no way to deal wit this... + qIn = self.derived["qe_aux_MW"] + self.derived["qi_aux_MW"] + self.derived["qIn"] = qIn[-1] + self.derived["Q"] = self.derived["Pfus"] / self.derived["qIn"] + self.derived["qHeat"] = qIn[-1] + qFus[-1] + + self.derived["qTr"] = ( + self.derived["qe_aux_MW"] + + self.derived["qi_aux_MW"] + + (self.derived["qe_fus_MW"] + self.derived["qi_fus_MW"]) + - self.derived["qrad_MW"] + ) + + self.derived["Prad"] = self.derived["qrad_MW"][-1] + self.derived["Prad_sync"] = self.derived["qrad_sync_MW"][-1] + self.derived["Prad_brem"] = self.derived["qrad_brem_MW"][-1] + self.derived["Prad_line"] = self.derived["qrad_line_MW"][-1] + self.derived["Psol"] = self.derived["qHeat"] - self.derived["Prad"] + + self.derived["Ti_thr"] = [] + self.derived["ni_thr"] = [] + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm": + self.derived["ni_thr"].append(self.profiles["ni(10^19/m^3)"][:, sp]) + self.derived["Ti_thr"].append(self.profiles["ti(keV)"][:, sp]) + + self.derived["ni_thr"] = np.transpose(self.derived["ni_thr"]) + self.derived["Ti_thr"] = np.transpose(np.array(self.derived["Ti_thr"])) + + if len(self.derived["ni_thr"].shape) == 1: + self.derived["ni_thr"] = self.derived["ni_thr"].reshape(-1, 1) + self.derived["Ti_thr"] = self.derived["Ti_thr"].reshape(-1, 1) + + self.derived["ni_thrAll"] = self.derived["ni_thr"].sum(axis=1) + + self.derived["ni_All"] = self.profiles["ni(10^19/m^3)"].sum(axis=1) + + + ( + self.derived["ptot_manual"], + self.derived["pe"], + self.derived["pi"], + self.derived["pi_all"], + ) = PLASMAtools.calculatePressure( + np.expand_dims(self.profiles["te(keV)"], 0), + np.expand_dims(np.transpose(self.profiles["ti(keV)"]), 0), + np.expand_dims(self.profiles["ne(10^19/m^3)"] * 0.1, 0), + np.expand_dims(np.transpose(self.profiles["ni(10^19/m^3)"] * 0.1), 0), + ) + self.derived["ptot_manual"], self.derived["pe"], self.derived["pi"], self.derived["pi_all"] = ( + self.derived["ptot_manual"][0,...], + self.derived["pe"][0,...], + self.derived["pi"][0,...], + self.derived["pi_all"][0,...], + ) + self.derived['pi_all'] = np.transpose(self.derived['pi_all']) # to have the same shape as ni_thr + + + ( + self.derived["pthr_manual"], + _, + self.derived["pi_thr"], + _, + ) = PLASMAtools.calculatePressure( + np.expand_dims(self.profiles["te(keV)"], 0), + np.expand_dims(np.transpose(self.derived["Ti_thr"]), 0), + np.expand_dims(self.profiles["ne(10^19/m^3)"] * 0.1, 0), + np.expand_dims(np.transpose(self.derived["ni_thr"] * 0.1), 0), + ) + self.derived["pthr_manual"], self.derived["pi_thr"] = ( + self.derived["pthr_manual"][0], + self.derived["pi_thr"][0], + ) + + + # ------- + # Content + # ------- + + ( + self.derived["We"], + self.derived["Wi_thr"], + self.derived["Ne"], + self.derived["Ni_thr"], + ) = PLASMAtools.calculateContent( + np.expand_dims(r, 0), + np.expand_dims(self.profiles["te(keV)"], 0), + np.expand_dims(np.transpose(self.derived["Ti_thr"]), 0), + np.expand_dims(self.profiles["ne(10^19/m^3)"] * 0.1, 0), + np.expand_dims(np.transpose(self.derived["ni_thr"] * 0.1), 0), + np.expand_dims(volp, 0), + ) + + ( + self.derived["We"], + self.derived["Wi_thr"], + self.derived["Ne"], + self.derived["Ni_thr"], + ) = ( + self.derived["We"][0], + self.derived["Wi_thr"][0], + self.derived["Ne"][0], + self.derived["Ni_thr"][0], + ) + + self.derived["Nthr"] = self.derived["Ne"] + self.derived["Ni_thr"] + self.derived["Wthr"] = self.derived["We"] + self.derived["Wi_thr"] # Thermal + + self.derived["tauE"] = self.derived["Wthr"] / self.derived["qHeat"] # Seconds + + self.derived["tauP"] = np.where(self.derived["geIn"] != 0, self.derived["Ne"] / self.derived["geIn"], np.inf) # Seconds + + + self.derived["tauPotauE"] = self.derived["tauP"] / self.derived["tauE"] + + # Dilutions + self.derived["fi"] = self.profiles["ni(10^19/m^3)"] / np.atleast_2d( + self.profiles["ne(10^19/m^3)"] + ).transpose().repeat(self.profiles["ni(10^19/m^3)"].shape[1], axis=1) + + # Vol-avg density + self.derived["volume"] = CALCtools.volume_integration(np.ones(r.shape[0]), r, volp)[ + -1 + ] # m^3 + self.derived["ne_vol20"] = ( + CALCtools.volume_integration(self.profiles["ne(10^19/m^3)"] * 0.1, r, volp)[-1] + / self.derived["volume"] + ) # 1E20/m^3 + + self.derived["ni_vol20"] = np.zeros(self.profiles["ni(10^19/m^3)"].shape[1]) + self.derived["fi_vol"] = np.zeros(self.profiles["ni(10^19/m^3)"].shape[1]) + for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): + self.derived["ni_vol20"][i] = ( + CALCtools.volume_integration( + self.profiles["ni(10^19/m^3)"][:, i] * 0.1, r, volp + )[-1] + / self.derived["volume"] + ) # 1E20/m^3 + self.derived["fi_vol"][i] = ( + self.derived["ni_vol20"][i] / self.derived["ne_vol20"] + ) + + self.derived["fi_onlyions_vol"] = self.derived["ni_vol20"] / np.sum( + self.derived["ni_vol20"] + ) + + self.derived["ne_peaking"] = ( + self.profiles["ne(10^19/m^3)"][0] * 0.1 / self.derived["ne_vol20"] + ) + + xcoord = self.derived[ + "rho_pol" + ] # to find the peaking at rho_pol (with square root) as in Angioni PRL 2003 + self.derived["ne_peaking0.2"] = ( + self.profiles["ne(10^19/m^3)"][np.argmin(np.abs(xcoord - 0.2))] + * 0.1 + / self.derived["ne_vol20"] + ) + + self.derived["Te_vol"] = ( + CALCtools.volume_integration(self.profiles["te(keV)"], r, volp)[-1] + / self.derived["volume"] + ) # keV + self.derived["Te_peaking"] = ( + self.profiles["te(keV)"][0] / self.derived["Te_vol"] + ) + self.derived["Ti_vol"] = ( + CALCtools.volume_integration(self.profiles["ti(keV)"][:, 0], r, volp)[-1] + / self.derived["volume"] + ) # keV + self.derived["Ti_peaking"] = ( + self.profiles["ti(keV)"][0, 0] / self.derived["Ti_vol"] + ) + + self.derived["ptot_manual_vol"] = ( + CALCtools.volume_integration(self.derived["ptot_manual"], r, volp)[-1] + / self.derived["volume"] + ) # MPa + self.derived["pthr_manual_vol"] = ( + CALCtools.volume_integration(self.derived["pthr_manual"], r, volp)[-1] + / self.derived["volume"] + ) # MPa + + self.derived['pfast_manual'] = self.derived['ptot_manual'] - self.derived['pthr_manual'] + self.derived["pfast_manual_vol"] = ( + CALCtools.volume_integration(self.derived["pfast_manual"], r, volp)[-1] + / self.derived["volume"] + ) # MPa + + self.derived['pfast_fraction'] = self.derived['pfast_manual_vol'] / self.derived['ptot_manual_vol'] + + #approximate pedestal top density + self.derived['ptop(Pa)'] = np.interp(0.90, self.profiles['rho(-)'], self.profiles['ptot(Pa)']) + + # Quasineutrality + self.derived["QN_Error"] = np.abs( + 1 - np.sum(self.derived["fi_vol"] * self.profiles["z"]) + ) + self.derived["Zeff"] = ( + np.sum(self.profiles["ni(10^19/m^3)"] * self.profiles["z"] ** 2, axis=1) + / self.profiles["ne(10^19/m^3)"] + ) + self.derived["Zeff_vol"] = ( + CALCtools.volume_integration(self.derived["Zeff"], r, volp)[-1] + / self.derived["volume"] + ) + + self.derived["nu_eff"] = PLASMAtools.coll_Angioni07( + self.derived["ne_vol20"] * 1e1, + self.derived["Te_vol"], + self.derived["Rgeo"], + Zeff=self.derived["Zeff_vol"], + ) + + self.derived["nu_eff2"] = PLASMAtools.coll_Angioni07( + self.derived["ne_vol20"] * 1e1, + self.derived["Te_vol"], + self.derived["Rgeo"], + Zeff=2.0, + ) + + # Avg mass + self.calculateMass() + + params_set_scaling = ( + np.abs(float(self.profiles["current(MA)"][-1])), + self.derived["Rgeo"], + self.derived["kappa_a"], + self.derived["ne_vol20"], + self.derived["a"] / self.derived["Rgeo"], + self.derived["B0"], + self.derived["mbg_main"], + self.derived["qHeat"], + ) + + self.derived["tau98y2"], self.derived["H98"] = PLASMAtools.tau98y2( + *params_set_scaling, tauE=self.derived["tauE"] + ) + self.derived["tau89p"], self.derived["H89"] = PLASMAtools.tau89p( + *params_set_scaling, tauE=self.derived["tauE"] + ) + self.derived["tau97L"], self.derived["H97L"] = PLASMAtools.tau97L( + *params_set_scaling, tauE=self.derived["tauE"] + ) + + """ + Mach number + """ + + Vtor_LF_Mach1 = PLASMAtools.constructVtorFromMach( + 1.0, self.profiles["ti(keV)"][:, 0], self.derived["mbg"] + ) # m/s + w0_Mach1 = Vtor_LF_Mach1 / (self.derived["R_LF"]) # rad/s + self.derived["MachNum"] = self.profiles["w0(rad/s)"] / w0_Mach1 + self.derived["MachNum_vol"] = ( + CALCtools.volume_integration(self.derived["MachNum"], r, volp)[-1] + / self.derived["volume"] + ) + + # Retain the old beta definition for comparison with 0D modeling + Beta_old = (self.derived["ptot_manual_vol"]* 1e6 / (self.derived["B0"] ** 2 / (2 * 4 * np.pi * 1e-7))) + self.derived["BetaN_engineering"] = (Beta_old / + (np.abs(float(self.profiles["current(MA)"][-1])) / + (self.derived["a"] * self.derived["B0"]) + )* 100.0 + ) # expressed in percent + + ''' + --------------------------------------------------------------------------------------------------- + Using B_unit, derive and for betap and betat calculations. + Equivalent to GACODE expro_bp2, expro_bt2 + --------------------------------------------------------------------------------------------------- + ''' + + self.derived["bp2_exp"] = self.derived["bp2_geo"] * self.derived["B_unit"] ** 2 + self.derived["bt2_exp"] = self.derived["bt2_geo"] * self.derived["B_unit"] ** 2 + + # Calculate the volume averages of bt2 and bp2 + + P = self.derived["bp2_exp"] + self.derived["bp2_vol_avg"] = CALCtools.volume_integration(P, r, volp)[-1] / self.derived["volume"] + P = self.derived["bt2_exp"] + self.derived["bt2_vol_avg"] = CALCtools.volume_integration(P, r, volp)[-1] / self.derived["volume"] + + # calculate beta_poloidal and beta_toroidal using volume averaged values + # mu0 = 4pi x 10^-7, also need to convert MPa to Pa + + self.derived["Beta_p"] = (2 * 4 * np.pi * 1e-7)*self.derived["ptot_manual_vol"]* 1e6/self.derived["bp2_vol_avg"] + self.derived["Beta_t"] = (2 * 4 * np.pi * 1e-7)*self.derived["ptot_manual_vol"]* 1e6/self.derived["bt2_vol_avg"] + + self.derived["Beta"] = 1/(1/self.derived["Beta_p"]+1/self.derived["Beta_t"]) + + TroyonFactor = np.abs(float(self.profiles["current(MA)"][-1])) / (self.derived["a"] * self.derived["B0"]) + + self.derived["BetaN"] = self.derived["Beta"] / TroyonFactor * 100.0 + + # --- + + nG = PLASMAtools.Greenwald_density( + np.abs(float(self.profiles["current(MA)"][-1])), + float(self.derived["r"][-1]), + ) + self.derived["fG"] = self.derived["ne_vol20"] / nG + self.derived["fG_x"] = self.profiles["ne(10^19/m^3)"]* 0.1 / nG + + self.derived["tite_all"] = self.profiles["ti(keV)"] / self.profiles["te(keV)"][:, np.newaxis] + self.derived["tite"] = self.derived["tite_all"][:, 0] + self.derived["tite_vol"] = self.derived["Ti_vol"] / self.derived["Te_vol"] + + self.derived["LH_nmin"] = PLASMAtools.LHthreshold_nmin( + np.abs(float(self.profiles["current(MA)"][-1])), + self.derived["B0"], + self.derived["a"], + self.derived["Rgeo"], + ) + + self.derived["LH_Martin2"] = ( + PLASMAtools.LHthreshold_Martin2( + self.derived["ne_vol20"], + self.derived["B0"], + self.derived["a"], + self.derived["Rgeo"], + nmin=self.derived["LH_nmin"], + ) + * (2 / self.derived["mbg_main"]) ** 1.11 + ) + + self.derived["LHratio"] = self.derived["Psol"] / self.derived["LH_Martin2"] + + self.readSpecies() + + # ------------------------------------------------------- + # q-star + # ------------------------------------------------------- + + self.derived["qstar"] = PLASMAtools.evaluate_qstar( + self.profiles['current(MA)'][0], + self.profiles['rcentr(m)'], + self.derived['kappa95'], + self.profiles['bcentr(T)'], + self.derived['eps'], + self.derived['delta95'], + ITERcorrection=False, + includeShaping=True, + )[0] + self.derived["qstar_ITER"] = PLASMAtools.evaluate_qstar( + self.profiles['current(MA)'][0], + self.profiles['rcentr(m)'], + self.derived['kappa95'], + self.profiles['bcentr(T)'], + self.derived['eps'], + self.derived['delta95'], + ITERcorrection=True, + includeShaping=True, + )[0] + + # ------------------------------------------------------- + # Separatrix estimations + # ------------------------------------------------------- + + # ~~~~ Estimate lambda_q + pressure_atm = self.derived["ptot_manual_vol"] * 1e6 / 101325.0 + Lambda_q = PLASMAtools.calculateHeatFluxWidth_Brunner(pressure_atm) + + # ~~~~ Estimate upstream temperature + Bt = self.profiles["bcentr(T)"][0] + Bp = self.derived["eps"] * Bt / self.derived["q95"] #TODO: VERY ROUGH APPROXIMATION!!!! + + self.derived['Te_lcfs_estimate'] = PLASMAtools.calculateUpstreamTemperature( + Lambda_q, + self.derived["q95"], + self.derived["ne_vol20"], + self.derived["Psol"], + self.profiles["rcentr(m)"][0], + Bp, + Bt + )[0] + + # ~~~~ Estimate upstream density + self.derived['ne_lcfs_estimate'] = self.derived["ne_vol20"] * 0.6 + + # ------------------------------------------------------- + # Transport parameters + # ------------------------------------------------------- + + self.derived['betae'] = PLASMAtools.betae( + self.profiles['te(keV)'], + self.profiles['ne(10^19/m^3)']*0.1, + self.derived["B_unit"] + ) + + self.derived['xnue'] = PLASMAtools.xnue( + torch.from_numpy(self.profiles['te(keV)']).to(torch.double), + torch.from_numpy(self.profiles['ne(10^19/m^3)']*0.1).to(torch.double), + self.derived["a"], + mref_u=self.derived["mi_ref"] + ).cpu().numpy() + + self.derived['debye'] = PLASMAtools.debye( + self.profiles['te(keV)'], + self.profiles['ne(10^19/m^3)']*0.1, + self.derived["mi_ref"], + self.derived["B_unit"] + ) + self.derived['s_hat'] = self.derived["r"]*self._deriv_gacode( np.log(abs(self.profiles["q(-)"])) ) + self.derived['s_q'] = (self.profiles["q(-)"] / self.derived['roa'])**2 * self.derived['s_hat'] + self.derived['s_q'][0] = 0.0 # infinite in first location + + # Derivate function + def _deriv_gacode(self,y): + return grad(self.derived["r"],y).cpu().numpy() + + def calculateMass(self): + self.derived["mbg"] = 0.0 + self.derived["fmain"] = 0.0 + for i in range(self.derived["ni_vol20"].shape[0]): + self.derived["mbg"] += ( + float(self.profiles["mass"][i]) * self.derived["fi_onlyions_vol"][i] + ) + + if self.DTplasmaBool: + self.derived["mbg_main"] = ( + self.profiles["mass"][self.Dion] + * self.derived["fi_onlyions_vol"][self.Dion] + + self.profiles["mass"][self.Tion] + * self.derived["fi_onlyions_vol"][self.Tion] + ) / ( + self.derived["fi_onlyions_vol"][self.Dion] + + self.derived["fi_onlyions_vol"][self.Tion] + ) + self.derived["fmain"] = ( + self.derived["fi_vol"][self.Dion] + self.derived["fi_vol"][self.Tion] + ) + else: + self.derived["mbg_main"] = self.profiles["mass"][self.Mion] + self.derived["fmain"] = self.derived["fi_vol"][self.Mion] + + def deriveContentByVolumes(self, rhos=[0.5], impurityPosition=3): + """ + Calculates total particles and energy for ions and electrons, at a given volume + It fails near axis because of the polynomial integral, requiring a number of poitns + """ + + min_number_points = 3 + + We_x = np.zeros(self.profiles["te(keV)"].shape[0]) + Wi_x = np.zeros(self.profiles["te(keV)"].shape[0]) + Ne_x = np.zeros(self.profiles["te(keV)"].shape[0]) + Ni_x = np.zeros(self.profiles["te(keV)"].shape[0]) + for j in range(self.profiles["te(keV)"].shape[0] - min_number_points): + i = j + min_number_points + We_x[i], Wi_x[i], Ne_x[i], _ = PLASMAtools.calculateContent( + np.expand_dims(self.derived["r"][:i], 0), + np.expand_dims(self.profiles["te(keV)"][:i], 0), + np.expand_dims(np.transpose(self.profiles["ti(keV)"][:i]), 0), + np.expand_dims(self.profiles["ne(10^19/m^3)"][:i] * 0.1, 0), + np.expand_dims( + np.transpose(self.profiles["ni(10^19/m^3)"][:i] * 0.1), 0 + ), + np.expand_dims(self.derived["volp_geo"][:i], 0), + ) + + _, _, Ni_x[i], _ = PLASMAtools.calculateContent( + np.expand_dims(self.derived["r"][:i], 0), + np.expand_dims(self.profiles["te(keV)"][:i], 0), + np.expand_dims(np.transpose(self.profiles["ti(keV)"][:i]), 0), + np.expand_dims( + self.profiles["ni(10^19/m^3)"][:i, impurityPosition] * 0.1, 0 + ), + np.expand_dims( + np.transpose(self.profiles["ni(10^19/m^3)"][:i] * 0.1), 0 + ), + np.expand_dims(self.derived["volp_geo"][:i], 0), + ) + + We, Wi, Ne, Ni = ( + np.zeros(len(rhos)), + np.zeros(len(rhos)), + np.zeros(len(rhos)), + np.zeros(len(rhos)), + ) + for i in range(len(rhos)): + We[i] = np.interp(rhos[i], self.profiles["rho(-)"], We_x) + Wi[i] = np.interp(rhos[i], self.profiles["rho(-)"], Wi_x) + Ne[i] = np.interp(rhos[i], self.profiles["rho(-)"], Ne_x) + Ni[i] = np.interp(rhos[i], self.profiles["rho(-)"], Ni_x) + + return We, Wi, Ne, Ni + + def printInfo(self, label="", reDeriveIfNotFound=True): + + Prad_ratio = self.derived['Prad'] / self.derived['qHeat'] + Prad_ratio_brem = self.derived['Prad_brem']/self.derived['Prad'] + Prad_ratio_line = self.derived['Prad_line']/self.derived['Prad'] + Prad_ratio_sync = self.derived['Prad_sync']/self.derived['Prad'] + + try: + ImpurityText = "" + for i in range(len(self.Species)): + ImpurityText += f"{self.Species[i]['N']}({self.Species[i]['Z']:.0f},{self.Species[i]['A']:.0f}) = {self.derived['fi_vol'][i]:.1e}, " + ImpurityText = ImpurityText[:-2] + + print(f"\n***********************{label}****************") + print("Engineering Parameters:") + print(f"\tBt = {self.profiles['bcentr(T)'][0]:.2f}T, Ip = {self.profiles['current(MA)'][0]:.2f}MA (q95 = {self.derived['q95']:.2f}, q* = {self.derived['qstar']:.2f}, q*ITER = {self.derived['qstar_ITER']:.2f}), Pin = {self.derived['qIn']:.2f}MW") + print(f"\tR = {self.profiles['rcentr(m)'][0]:.2f}m, a = {self.derived['a']:.2f}m (eps = {self.derived['eps']:.3f})") + print(f"\tkappa_sep = {self.profiles['kappa(-)'][-1]:.2f}, kappa_995 = {self.derived['kappa995']:.2f}, kappa_95 = {self.derived['kappa95']:.2f}, kappa_a = {self.derived['kappa_a']:.2f}") + print(f"\tdelta_sep = {self.profiles['delta(-)'][-1]:.2f}, delta_995 = {self.derived['delta995']:.2f}, delta_95 = {self.derived['delta95']:.2f}") + print("Performance:") + print("\tQ = {0:.2f} (Pfus = {1:.1f}MW, Pin = {2:.1f}MW)".format(self.derived["Q"], self.derived["Pfus"], self.derived["qIn"])) + print("\tH98y2 = {0:.2f} (tauE = {1:.3f} s)".format(self.derived["H98"], self.derived["tauE"])) + print("\tH89p = {0:.2f} (H97L = {1:.2f})".format(self.derived["H89"], self.derived["H97L"])) + print("\tnu_ne = {0:.2f} (nu_eff = {1:.2f})".format(self.derived["ne_peaking"], self.derived["nu_eff"])) + print("\tnu_ne0.2 = {0:.2f} (nu_eff w/Zeff2 = {1:.2f})".format(self.derived["ne_peaking0.2"], self.derived["nu_eff2"])) + print(f"\tnu_Ti = {self.derived['Ti_peaking']:.2f}") + print(f"\tp_vol = {self.derived['ptot_manual_vol']:.2f} MPa ({self.derived['pfast_fraction']*100.0:.1f}% fast)") + print(f"\tBetaN = {self.derived['BetaN']:.3f} (BetaN w/B0 = {self.derived['BetaN_engineering']:.3f})") + print(f"\tPrad = {self.derived['Prad']:.1f}MW ({Prad_ratio*100.0:.1f}% of total) ({Prad_ratio_brem*100.0:.1f}% brem, {Prad_ratio_line*100.0:.1f}% line, {Prad_ratio_sync*100.0:.1f}% sync)") + print("\tPsol = {0:.1f}MW (fLH = {1:.2f})".format(self.derived["Psol"], self.derived["LHratio"])) + print("Operational point ( [,] = [{0:.2f},{1:.2f}] ) and species:".format(self.derived["ne_vol20"], self.derived["Te_vol"])) + print("\t = {0:.2f} keV (/ = {1:.2f}, Ti0/Te0 = {2:.2f})".format(self.derived["Ti_vol"],self.derived["tite_vol"],self.derived["tite"][0],)) + print("\tfG = {0:.2f} ( = {1:.2f} * 10^20 m^-3)".format(self.derived["fG"], self.derived["ne_vol20"])) + print(f"\tZeff = {self.derived['Zeff_vol']:.2f} (M_main = {self.derived['mbg_main']:.2f}, f_main = {self.derived['fmain']:.2f}) [QN err = {self.derived['QN_Error']:.1e}]") + print(f"\tMach = {self.derived['MachNum_vol']:.2f} (vol avg)") + print("Content:") + print("\tWe = {0:.2f} MJ, Wi_thr = {1:.2f} MJ (W_thr = {2:.2f} MJ)".format(self.derived["We"], self.derived["Wi_thr"], self.derived["Wthr"])) + print("\tNe = {0:.1f}*10^20, Ni_thr = {1:.1f}*10^20 (N_thr = {2:.1f}*10^20)".format(self.derived["Ne"], self.derived["Ni_thr"], self.derived["Nthr"])) + print(f"\ttauE = { self.derived['tauE']:.3f} s, tauP = {self.derived['tauP']:.3f} s (tauP/tauE = {self.derived['tauPotauE']:.2f})") + print("Species concentration:") + print(f"\t{ImpurityText}") + print("******************************************************") + except KeyError: + print("\t- When printing info, not all keys found, probably because this input.gacode class came from an old MITIM version",typeMsg="w",) + if reDeriveIfNotFound: + self.derive_quantities() + self.printInfo(label=label, reDeriveIfNotFound=False) + + def export_to_table(self, table=None, name=None): + + if table is None: + table = DataTable() + + data = [name] + for var in table.variables: + if table.variables[var][1] is not None: + if table.variables[var][1].split("_")[0] == "rho": + ix = np.argmin( + np.abs( + self.profiles["rho(-)"] + - float(table.variables[var][1].split("_")[1]) + ) + ) + elif table.variables[var][1].split("_")[0] == "psi": + ix = np.argmin( + np.abs( + self.derived["psi_pol_n"] + - float(table.variables[var][1].split("_")[1]) + ) + ) + elif table.variables[var][1].split("_")[0] == "pos": + ix = int(table.variables[var][1].split("_")[1]) + vari = self.__dict__[table.variables[var][2]][table.variables[var][0]][ + ix + ] + else: + vari = self.__dict__[table.variables[var][2]][table.variables[var][0]] + + data.append(f"{vari*table.variables[var][4]:{table.variables[var][3]}}") + + table.data.append(data) + print(f"\t* Exported {name} to table") + + return table + + def makeAllThermalIonsHaveSameTemp(self, refIon=0): + SpecRef = self.Species[refIon]["N"] + tiRef = self.profiles["ti(keV)"][:, refIon] + + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm" and sp != refIon: + print( + f"\t\t\t- Temperature forcing {self.Species[sp]['N']} --> {SpecRef}" + ) + self.profiles["ti(keV)"][:, sp] = tiRef + + def scaleAllThermalDensities(self, scaleFactor=1.0): + scaleFactor_ions = scaleFactor + + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm": + print(f"\t\t\t- Scaling density of {self.Species[sp]['N']} by an average factor of {np.mean(scaleFactor_ions):.3f}") + ni_orig = self.profiles["ni(10^19/m^3)"][:, sp] + self.profiles["ni(10^19/m^3)"][:, sp] = scaleFactor_ions * ni_orig + + def toNumpyArrays(self): + self.profiles.update({key: tensor.cpu().detach().cpu().numpy() for key, tensor in self.profiles.items() if isinstance(tensor, torch.Tensor)}) + self.derived.update({key: tensor.cpu().detach().cpu().numpy() for key, tensor in self.derived.items() if isinstance(tensor, torch.Tensor)}) + + def changeResolution(self, n=100, rho_new=None, interpolation_function=MATHtools.extrapolateCubicSpline): + rho = copy.deepcopy(self.profiles["rho(-)"]) + + if rho_new is None: + n = int(n) + rho_new = np.linspace(rho[0], rho[-1], n) + else: + rho_new = np.unique(np.sort(rho_new)) + n = len(rho_new) + + self.profiles["nexp"] = [str(n)] + + pro = self.profiles + for i in pro: + if i not in self.titles_single: + if len(pro[i].shape) == 1: + pro[i] = interpolation_function(rho_new, rho, pro[i]) + else: + prof = [] + for j in range(pro[i].shape[1]): + pp = interpolation_function(rho_new, rho, pro[i][:, j]) + prof.append(pp) + prof = np.array(prof) + + pro[i] = np.transpose(prof) + + self.derive_quantities() + + print(f"\t\t- Resolution of profiles changed to {n} points with function {interpolation_function}") + + def DTplasma(self): + self.Dion, self.Tion = None, None + try: + self.Dion = np.where(self.profiles["name"] == "D")[0][0] + except: + pass + try: + self.Tion = np.where(self.profiles["name"] == "T")[0][0] + except: + pass + + if self.Dion is not None and self.Tion is not None: + self.DTplasmaBool = True + else: + self.DTplasmaBool = False + if self.Dion is not None: + self.Mion = self.Dion # Main + elif self.Tion is not None: + self.Mion = self.Tion # Main + else: + self.Mion = 0 # If no D or T, assume that the main ion is the first and only + + self.ion_list_main = [self.Dion+1, self.Tion+1] if self.DTplasmaBool else [self.Mion+1] + self.ion_list_impurities = [i+1 for i in range(len(self.Species)) if i+1 not in self.ion_list_main] + + def remove(self, ions_list): + # First order them + ions_list.sort() + print("\t\t- Removing ions in positions (of ions order, no zero): ",ions_list,typeMsg="i",) + + ions_list = [(i - 1 if i >-1 else i) for i in ions_list] + + fail = False + + var_changes = ["name", "type", "mass", "z"] + for i in var_changes: + try: + self.profiles[i] = np.delete(self.profiles[i], ions_list) + except: + print(f"\t\t\t* Ions {[k+1 for k in ions_list]} could not be removed",typeMsg="w") + fail = True + break + + if not fail: + var_changes = ["ni(10^19/m^3)", "ti(keV)", "vpol(m/s)", "vtor(m/s)"] + for i in var_changes: + if i in self.profiles: + self.profiles[i] = np.delete(self.profiles[i], ions_list, axis=1) + + if not fail: + # Ensure we extract the scalar value from the array + self.profiles["nion"] = np.array([str(int(self.profiles["nion"][0]) - len(ions_list))]) + + self.readSpecies() + self.derive_quantities(rederiveGeometry=False) + + print("\t\t\t- Set of ions in updated profiles: ", self.profiles["name"]) + + def lumpSpecies( + self, ions_list=[2, 3], allthermal=False, forcename=None, force_integer=False, force_mass=None + ): + """ + if (D,Z1,Z2), lumping Z1 and Z2 requires ions_list = [2,3] + + if force_integer, the Zeff won't be kept exactly + """ + + # All thermal except first + if allthermal: + ions_list = [] + for i in range(len(self.Species) - 1): + if self.Species[i + 1]["S"] == "therm": + ions_list.append(i + 2) + lab = "therm" + else: + lab = "therm" + + print("\t\t- Lumping ions in positions (of ions order, no zero): ",ions_list,typeMsg="i",) + + if forcename is None: + forcename = "LUMPED" + + # Contributions to dilution and to Zeff + fZ1 = np.zeros(self.derived["fi"].shape[0]) + fZ2 = np.zeros(self.derived["fi"].shape[0]) + for i in ions_list: + fZ1 += self.Species[i - 1]["Z"] * self.derived["fi"][:, i - 1] + fZ2 += self.Species[i - 1]["Z"] ** 2 * self.derived["fi"][:, i - 1] + + Zr = fZ2 / fZ1 + Zr_vol = CALCtools.volume_integration(Zr, self.derived["r"], self.derived["volp_geo"])[-1] / self.derived["volume"] + + print(f'\t\t\t* Original plasma had Zeff_vol={self.derived["Zeff_vol"]:.2f}, QN error={self.derived["QN_Error"]:.4f}') + + # New specie parameters + if force_integer: + Z = round(Zr_vol) + print(f"\t\t\t* Lumped Z forced to be an integer ({Zr_vol}->{Z}), so plasma may not be quasineutral or fulfill original Zeff",typeMsg="w",) + else: + Z = Zr_vol + + A = Z * 2 if force_mass is None else force_mass + nZ = fZ1 / Z * self.profiles["ne(10^19/m^3)"] + mass_density = A * self.derived["fi"] + + # Compute the mass weighted average velocity profiles + if "vpol(m/s)" in self.profiles: + vpol = np.sum((mass_density * self.profiles["vpol(m/s)"])[:,np.array(ions_list)-1],axis=1) / np.sum(mass_density[:,np.array(ions_list)-1],axis=1) + if "vtor(m/s)" in self.profiles: + mask = np.isclose(np.mean(self.profiles["vtor(m/s)"][:,np.array(ions_list)-1],axis=0),1e-12) + vtor = np.sum((mass_density * self.profiles["vtor(m/s)"])[:,np.array(ions_list)-1][:,~mask],axis=1) / np.sum(mass_density[:,np.array(ions_list)-1][:,~mask],axis=1) + + print(f"\t\t\t* New lumped impurity has Z={Z:.2f}, A={A:.2f} (calculated as 2*Z)") + + # Insert cases + self.profiles["nion"] = np.array([f"{int(self.profiles['nion'][0])+1}"]) + self.profiles["name"] = np.append(self.profiles["name"], forcename) + self.profiles["mass"] = np.append(self.profiles["mass"], A) + self.profiles["z"] = np.append(self.profiles["z"], Z) + self.profiles["type"] = np.append(self.profiles["type"], f"[{lab}]") + self.profiles["ni(10^19/m^3)"] = np.append( + self.profiles["ni(10^19/m^3)"], np.transpose(np.atleast_2d(nZ)), axis=1 + ) + self.profiles["ti(keV)"] = np.append( + self.profiles["ti(keV)"], + np.transpose(np.atleast_2d(self.profiles["ti(keV)"][:, 0])), + axis=1, + ) + if "vpol(m/s)" in self.profiles: + self.profiles["vpol(m/s)"] = np.append( + self.profiles["vpol(m/s)"], np.transpose(np.atleast_2d(vpol)), axis=1 + ) + if "vtor(m/s)" in self.profiles: + self.profiles["vtor(m/s)"] = np.append( + self.profiles["vtor(m/s)"], np.transpose(np.atleast_2d(vtor)), axis=1 + ) + + self.readSpecies() + self.derive_quantities(rederiveGeometry=False) + + # Remove species + self.remove(ions_list) + + # Contributions to dilution and to Zeff + print(f'\t\t\t* New plasma has Zeff_vol={self.derived["Zeff_vol"]:.2f}, QN error={self.derived["QN_Error"]:.4f}') + + def lumpImpurities(self): + + self.lumpSpecies(ions_list=self.ion_list_impurities) + + def lumpIons(self): + + self.lumpSpecies(ions_list=self.ion_list_main+self.ion_list_impurities) + + def lumpDT(self): + + if self.DTplasmaBool: + self.lumpSpecies(ions_list=self.ion_list_main, forcename="DT", force_mass=2.5) + else: + print('\t\t- No DT plasma, so no lumping of main ions') + + self.moveSpecie(pos=len(self.Species), pos_new=1) + + def changeZeff( + self, + Zeff, + ion_pos = 2, # Position of ion to change (if (D,Z1,Z2), pos 1 -> change Z1) + keep_fmain = False, # If True, it will keep fmain and change Z of ion in position ion_pos. If False, it will change the content of ion in position ion_pos and the content of quasineutral ions to achieve Zeff + fmain_force = None, # If keep_fmain is True, it will force fmain to this value. If None, it will use the current fmain + enforceSameGradients = False # If True, it will scale all thermal densities to have the same gradients after changing Zeff + ): + + if not keep_fmain and fmain_force is not None: + raise ValueError("[MITIM] fmain_force can only be used if keep_fmain is True") + + if fmain_force is not None: + fmain_factor = fmain_force / self.derived["fmain"] + else: + fmain_factor = 1.0 + + if self.DTplasmaBool: + quasineutral_ions = [self.Dion, self.Tion] + else: + quasineutral_ions = [self.Mion] + + if not keep_fmain: + print(f'\t\t- Changing Zeff (from {self.derived["Zeff_vol"]:.3f} to {Zeff=:.3f}) by changing content of ion in position {ion_pos} {self.Species[ion_pos]["N"],self.Species[ion_pos]["Z"]}, quasineutralized by ions {quasineutral_ions}',typeMsg="i") + else: + print(f'\t\t- Changing Zeff (from {self.derived["Zeff_vol"]:.3f} to {Zeff=:.3f}) by changing content and Z of ion in position {ion_pos} {self.Species[ion_pos]["N"],self.Species[ion_pos]["Z"]}, quasineutralized by ions {quasineutral_ions} and keeping fmain={self.derived["fmain"]*fmain_factor:.3f}',typeMsg="i") + + # Plasma needs to be in quasineutrality to start with + self.enforceQuasineutrality() + + # ------------------------------------------------------ + # Contributions to equations + # ------------------------------------------------------ + Zq = np.zeros(self.derived["fi"].shape[0]) + Zq2 = np.zeros(self.derived["fi"].shape[0]) + fZq = np.zeros(self.derived["fi"].shape[0]) + fZq2 = np.zeros(self.derived["fi"].shape[0]) + fZj = np.zeros(self.derived["fi"].shape[0]) + fZj2 = np.zeros(self.derived["fi"].shape[0]) + for i in range(len(self.Species)): + + # Ions for quasineutrality (main ones) + if i in quasineutral_ions: + Zq += self.Species[i]["Z"] + Zq2 += self.Species[i]["Z"] ** 2 + + fZq += self.Species[i]["Z"] * self.derived["fi"][:, i] * fmain_factor + fZq2 += self.Species[i]["Z"] ** 2 * self.derived["fi"][:, i] * fmain_factor + # Non-quasineutral and not the ion to change + elif i != ion_pos: + fZj += self.Species[i]["Z"] * self.derived["fi"][:, i] + fZj2 += self.Species[i]["Z"] ** 2 * self.derived["fi"][:, i] + # Ion to change + else: + Zk = self.Species[i]["Z"] + + fi_orig = self.derived["fi"][:, ion_pos] + Zi_orig = self.Species[ion_pos]["Z"] + Ai_orig = self.Species[ion_pos]["A"] + + if not keep_fmain: + # ------------------------------------------------------ + # Find free parameters (fk and fq) + # ------------------------------------------------------ + + fk = ( Zeff - (1-fZj)*Zq2/Zq - fZj2 ) / ( Zk**2 - Zk*Zq2/Zq) + fq = ( 1 - fZj - fk*Zk ) / Zq + + if (fq<0).any(): + raise ValueError(f"Zeff cannot be reduced by changing ion #{ion_pos} because it would require negative densities for quasineutral ions") + + # ------------------------------------------------------ + # Insert + # ------------------------------------------------------ + + self.profiles["ni(10^19/m^3)"][:, ion_pos] = fk * self.profiles["ne(10^19/m^3)"] + for i in quasineutral_ions: + self.profiles["ni(10^19/m^3)"][:, i] = fq * self.profiles["ne(10^19/m^3)"] + else: + # ------------------------------------------------------ + # Find free parameters (fk and Zk) + # ------------------------------------------------------ + + Zk = (Zeff - fZq2 - fZj2) / (1 - fZq - fZj) + + # I need a single value + Zk_ave = CALCtools.volume_integration(Zk, self.profiles["rmin(m)"], self.derived["volp_geo"])[-1] / self.derived["volume"] + + fk = (1 - fZq - fZj) / Zk_ave + + # ------------------------------------------------------ + # Insert + # ------------------------------------------------------ + + self.profiles['z'][ion_pos] = Zk_ave + self.profiles['mass'][ion_pos] = Zk_ave * 2 + self.profiles["ni(10^19/m^3)"][:, ion_pos] = fk * self.profiles["ne(10^19/m^3)"] + + if fmain_force is not None: + for i in quasineutral_ions: + self.profiles["ni(10^19/m^3)"][:, i] *= fmain_factor + + self.readSpecies() + + self.derive_quantities(rederiveGeometry=False) + + if enforceSameGradients: + self.scaleAllThermalDensities() + self.derive_quantities(rederiveGeometry=False) + + print(f'\t\t\t* Dilution changed from {fi_orig.mean():.2e} (vol avg) of ion [{Zi_orig:.2f},{Ai_orig:.2f}] to { self.derived["fi"][:, ion_pos].mean():.2e} of ion [{self.profiles["z"][ion_pos]:.2f}, {self.profiles["mass"][ion_pos]:.2f}] to achieve Zeff={self.derived["Zeff_vol"]:.3f} (fDT={self.derived["fmain"]:.3f}) [quasineutrality error = {self.derived["QN_Error"]:.1e}]') + + def moveSpecie(self, pos=2, pos_new=1): + """ + if (D,Z1,Z2), pos 1 pos_new 2-> (Z1,D,Z2) + """ + + if pos_new > pos: + pos, pos_new = pos_new, pos + + position_to_moveFROM_in_profiles = pos - 1 + position_to_moveTO_in_profiles = pos_new - 1 + + print(f'\t\t- Moving ion in position (of ions order, no zero) {pos} ({self.profiles["name"][position_to_moveFROM_in_profiles]}) to {pos_new}',typeMsg="i",) + + self.profiles["nion"] = np.array([f"{int(self.profiles['nion'][0])+1}"]) + + for ikey in ["name", "mass", "z", "type", "ni(10^19/m^3)", "ti(keV)", "vpol(m/s)", "vtor(m/s)"]: + if ikey in self.profiles: + if len(self.profiles[ikey].shape) > 1: + axis = 1 + newly = self.profiles[ikey][:, position_to_moveFROM_in_profiles] + else: + axis = 0 + newly = self.profiles[ikey][position_to_moveFROM_in_profiles] + self.profiles[ikey] = np.insert( + self.profiles[ikey], position_to_moveTO_in_profiles, newly, axis=axis + ) + + self.readSpecies() + self.derive_quantities(rederiveGeometry=False) + + if position_to_moveTO_in_profiles > position_to_moveFROM_in_profiles: + self.remove([position_to_moveFROM_in_profiles + 1]) + else: + self.remove([position_to_moveFROM_in_profiles + 2]) + + def addSpecie(self, Z=5.0, mass=10.0, fi_vol=0.1, forcename=None): + print(f"\t\t- Creating new specie with Z={Z}, mass={mass}, fi_vol={fi_vol}",typeMsg="i",) + + if forcename is None: + forcename = "LUMPED" + + lab = "therm" + nZ = fi_vol * self.profiles["ne(10^19/m^3)"] + + self.profiles["nion"] = np.array([f"{int(self.profiles['nion'][0])+1}"]) + self.profiles["name"] = np.append(self.profiles["name"], forcename) + self.profiles["mass"] = np.append(self.profiles["mass"], mass) + self.profiles["z"] = np.append(self.profiles["z"], Z) + self.profiles["type"] = np.append(self.profiles["type"], f"[{lab}]") + self.profiles["ni(10^19/m^3)"] = np.append( + self.profiles["ni(10^19/m^3)"], np.transpose(np.atleast_2d(nZ)), axis=1 + ) + self.profiles["ti(keV)"] = np.append( + self.profiles["ti(keV)"], + np.transpose(np.atleast_2d(self.profiles["ti(keV)"][:, 0])), + axis=1, + ) + if "vtor(m/s)" in self.profiles: + self.profiles["vtor(m/s)"] = np.append( + self.profiles["vtor(m/s)"], + np.transpose(np.atleast_2d(self.profiles["vtor(m/s)"][:, 0])), + axis=1, + ) + + self.readSpecies() + self.derive_quantities(rederiveGeometry=False) + + def correct(self, options={}, write=False, new_file=None): + """ + if name= T D LUMPED, and I want to eliminate D, removeIons = [2] + """ + + recalculate_ptot = options.get("recalculate_ptot", True) # Only done by default + removeIons = options.get("removeIons", []) + remove_fast = options.get("remove_fast", False) + quasineutrality = options.get("quasineutrality", False) + enforce_same_aLn = options.get("enforce_same_aLn", False) + groupQIONE = options.get("groupQIONE", False) + ensure_positive_Gamma = options.get("ensure_positive_Gamma", False) + force_mach = options.get("force_mach", None) + thermalize_fast = options.get("thermalize_fast", False) + + print("\t- Custom correction of input.gacode file has been requested") + + # ---------------------------------------------------------------------- + # Correct + # ---------------------------------------------------------------------- + + # Remove desired ions + if len(removeIons) > 0: + self.remove(removeIons) + + # Remove fast + if remove_fast: + ions_fast = [] + for sp in range(len(self.Species)): + if self.Species[sp]["S"] != "therm": + ions_fast.append(sp + 1) + if len(ions_fast) > 0: + print( + f"\t\t- Detected fast ions in positions {ions_fast}, removing them..." + ) + self.remove(ions_fast) + # Fast as thermal + elif thermalize_fast: + self.make_fast_ions_thermal() + + # Correct LUMPED + for i in range(len(self.profiles["name"])): + if self.profiles["name"][i] in ["LUMPED", "None"]: + name = ionName( + int(self.profiles["z"][i]), int(self.profiles["mass"][i]) + ) + if name is not None: + print( + f'\t\t- Ion in position #{i+1} was named LUMPED with Z={self.profiles["z"][i]}, now it is renamed to {name}', + typeMsg="i", + ) + self.profiles["name"][i] = name + else: + print( + f'\t\t- Ion in position #{i+1} was named LUMPED with Z={self.profiles["z"][i]}, but I could not find what element it is, so doing nothing', + typeMsg="w", + ) + + # Correct qione + if groupQIONE and (np.abs(self.profiles["qione(MW/m^3)"].sum()) > 1e-14): + print('\t\t- Inserting "qione" into "qrfe"', typeMsg="i") + self.profiles["qrfe(MW/m^3)"] += self.profiles["qione(MW/m^3)"] + self.profiles["qione(MW/m^3)"] = self.profiles["qione(MW/m^3)"] * 0.0 + + # Make all thermal ions have the same gradient as the electron density, by keeping volume average constant + if enforce_same_aLn: + self.enforce_same_density_gradients() + + # Enforce quasineutrality + if quasineutrality: + self.enforceQuasineutrality() + + print(f"\t\t\t* Quasineutrality error = {self.derived['QN_Error']:.1e}") + + # Recompute ptot + if recalculate_ptot: + self.derive_quantities(rederiveGeometry=False) + self.selfconsistentPTOT() + + # If I don't trust the negative particle flux in the core that comes from TRANSP... + if ensure_positive_Gamma: + print("\t\t- Making particle flux always positive", typeMsg="i") + self.profiles["qpar_beam(1/m^3/s)"] = self.profiles["qpar_beam(1/m^3/s)"].clip(0) + self.profiles["qpar_wall(1/m^3/s)"] = self.profiles["qpar_wall(1/m^3/s)"].clip(0) + + # Mach + if force_mach is not None: + self.introduceRotationProfile(Mach_LF=force_mach) + + # ---------------------------------------------------------------------- + # Re-derive + # ---------------------------------------------------------------------- + + self.derive_quantities(rederiveGeometry=False) + + # ---------------------------------------------------------------------- + # Write + # ---------------------------------------------------------------------- + if write: + self.write_state(file=new_file) + self.printInfo() + + def enforce_same_density_gradients(self, onlyThermal=False): + txt = "" + for sp in range(len(self.Species)): + if (not onlyThermal) or (self.Species[sp]["S"] == "therm"): + self.profiles["ni(10^19/m^3)"][:, sp] = self.derived["fi_vol"][sp] * self.profiles["ne(10^19/m^3)"] + txt += f"{self.Species[sp]['N']} " + print(f"\t\t- Making all {'thermal ' if onlyThermal else ''}ions ({txt}) have the same a/Ln as electrons (making them an exact flat fraction)",typeMsg="i",) + self.derive_quantities(rederiveGeometry=False) + + def make_fast_ions_thermal(self): + modified_num = 0 + for i in range(len(self.Species)): + if self.Species[i]["S"] != "therm": + print( + f'\t\t- Specie {i} ({self.profiles["name"][i]}) was fast, but now it is considered thermal' + ) + self.Species[i]["S"] = "therm" + self.profiles["type"][i] = "[therm]" + self.profiles["ti(keV)"][:, i] = self.profiles["ti(keV)"][:, 0] + modified_num += 1 + if modified_num > 0: + print("\t- Making fast species as if they were thermal (to keep dilution effect and Qi-sum of fluxes)",typeMsg="w") + + def selfconsistentPTOT(self): + print(f"\t\t* Recomputing ptot and inserting it as ptot(Pa), changed from p0 = {self.profiles['ptot(Pa)'][0] * 1e-3:.1f} to {self.derived['ptot_manual'][0]*1e+3:.1f} kPa",typeMsg="i") + self.profiles["ptot(Pa)"] = self.derived["ptot_manual"] * 1e6 + + def enforceQuasineutrality(self, using_ion = None): + print(f"\t\t- Enforcing quasineutrality (error = {self.derived['QN_Error']:.1e})",typeMsg="i",) + + # What's the lack of quasineutrality? + ni = self.profiles["ne(10^19/m^3)"] * 0.0 + for sp in range(len(self.Species)): + ni += self.profiles["ni(10^19/m^3)"][:, sp] * self.profiles["z"][sp] + ne_missing = self.profiles["ne(10^19/m^3)"] - ni + + # What ion to modify? + if using_ion is None: + if self.DTplasmaBool: + print("\t\t\t* Enforcing quasineutrality by modifying D and T equally") + prev_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Dion]) + self.profiles["ni(10^19/m^3)"][:, self.Dion] += ne_missing / 2 + self.profiles["ni(10^19/m^3)"][:, self.Tion] += ne_missing / 2 + new_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Dion]) + else: + print(f"\t\t\t* Enforcing quasineutrality by modifying main ion (position #{self.Mion})") + prev_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Mion]) + self.profiles["ni(10^19/m^3)"][:, self.Mion] += ne_missing + new_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, self.Mion]) + else: + print(f"\t\t\t* Enforcing quasineutrality by modifying ion (position #{using_ion})") + prev_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, using_ion]) + self.profiles["ni(10^19/m^3)"][:, using_ion] += ne_missing + new_on_axis = copy.deepcopy(self.profiles["ni(10^19/m^3)"][0, using_ion]) + + + print(f"\t\t\t\t- Changed on-axis density from n0 = {prev_on_axis:.2f} to {new_on_axis:.2f} ({100*(new_on_axis-prev_on_axis)/prev_on_axis:.1f}%)") + + self.derive_quantities(rederiveGeometry=False) + + def introduceRotationProfile(self, Mach_LF=1.0, new_file=None): + print(f"\t- Enforcing Mach Number in LF of {Mach_LF}") + self.derive_quantities() + Vtor_LF = PLASMAtools.constructVtorFromMach( + Mach_LF, self.profiles["ti(keV)"][:, 0], self.derived["mbg"] + ) # m/s + + self.profiles["w0(rad/s)"] = Vtor_LF / (self.derived["R_LF"]) # rad/s + + self.derive_quantities() + + if new_file is not None: + self.write_state(file=new_file) + + def parabolizePlasma(self): + _, T = PLASMAtools.parabolicProfile( + Tbar=self.derived["Te_vol"], + nu=self.derived["Te_peaking"], + rho=self.profiles["rho(-)"], + Tedge=self.profiles["te(keV)"][-1], + ) + _, Ti = PLASMAtools.parabolicProfile( + Tbar=self.derived["Ti_vol"], + nu=self.derived["Ti_peaking"], + rho=self.profiles["rho(-)"], + Tedge=self.profiles["ti(keV)"][-1, 0], + ) + _, n = PLASMAtools.parabolicProfile( + Tbar=self.derived["ne_vol20"] * 1e1, + nu=self.derived["ne_peaking"], + rho=self.profiles["rho(-)"], + Tedge=self.profiles["ne(10^19/m^3)"][-1], + ) + + self.profiles["te(keV)"] = T + + self.profiles["ti(keV)"][:, 0] = Ti + self.makeAllThermalIonsHaveSameTemp(refIon=0) + + factor_n = n / self.profiles["ne(10^19/m^3)"] + self.profiles["ne(10^19/m^3)"] = n + self.scaleAllThermalDensities(scaleFactor=factor_n) + + self.derive_quantities() + + def changeRFpower(self, PrfMW=25.0): + """ + keeps same partition + """ + print(f"- Changing the RF power from {self.derived['qRF_MW'][-1]:.1f} MW to {PrfMW:.1f} MW",typeMsg="i",) + + if self.derived["qRF_MW"][-1] == 0.0: + raise Exception("No RF power in the input.gacode, cannot modify the RF power") + + for i in ["qrfe(MW/m^3)", "qrfi(MW/m^3)"]: + self.profiles[i] = self.profiles[i] * PrfMW / self.derived["qRF_MW"][-1] + + self.derive_quantities() + + def imposeBCtemps(self, TkeV=0.5, rho=0.9, typeEdge="linear", Tesep=0.1, Tisep=0.2): + + ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) + + self.profiles["te(keV)"] = self.profiles["te(keV)"] * TkeV / self.profiles["te(keV)"][ix] + + print(f"- Producing {typeEdge} boundary condition @ rho = {rho}, T = {TkeV} keV",typeMsg="i",) + + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm": + self.profiles["ti(keV)"][:, sp] = self.profiles["ti(keV)"][:, sp] * TkeV / self.profiles["ti(keV)"][ix, sp] + + if typeEdge == "linear": + self.profiles["te(keV)"][ix:] = np.linspace(TkeV, Tesep, len(self.profiles["rho(-)"][ix:])) + + for sp in range(len(self.Species)): + if self.Species[sp]["S"] == "therm": + self.profiles["ti(keV)"][ix:, sp] = np.linspace(TkeV, Tisep, len(self.profiles["rho(-)"][ix:])) + + elif typeEdge == "same": + pass + else: + raise Exception("no edge") + + def imposeBCdens(self, n20=2.0, rho=0.9, typeEdge="linear", nedge20=0.5, isn20_edge=True): + ix = np.argmin(np.abs(rho - self.profiles["rho(-)"])) + + # Determine the factor to scale the density (either average or at rho) + if not isn20_edge: + print(f"- Changing the initial average density from {self.derived['ne_vol20']:.1f} 1E20/m3 to {n20:.1f} 1E20/m3",typeMsg="i") + factor = n20 / self.derived["ne_vol20"] + else: + print(f"- Changing the density at rho={rho} from {self.profiles['ne(10^19/m^3)'][ix]*1E-1:.1f} 1E20/m3 to {n20:.1f} 1E20/m3",typeMsg="i") + factor = n20 / (self.profiles["ne(10^19/m^3)"][ix]*1E-1) + # ------------------------------------------------------------------ + + # Scale the density profiles + for i in ["ne(10^19/m^3)", "ni(10^19/m^3)"]: + self.profiles[i] = self.profiles[i] * factor + + # Apply the edge condition + if typeEdge == "linear": + factor_x = np.linspace(self.profiles["ne(10^19/m^3)"][ix],nedge20 * 1e1,len(self.profiles["rho(-)"][ix:]),)/ self.profiles["ne(10^19/m^3)"][ix:] + + self.profiles["ne(10^19/m^3)"][ix:] = self.profiles["ne(10^19/m^3)"][ix:] * factor_x + + for i in range(self.profiles["ni(10^19/m^3)"].shape[1]): + self.profiles["ni(10^19/m^3)"][ix:, i] = self.profiles["ni(10^19/m^3)"][ix:, i] * factor_x + + elif typeEdge == "same": + pass + else: + raise Exception("no edge") + + def addSawtoothEffectOnOhmic(self, PohTot, mixRadius=None, plotYN=False): + """ + This will implement a flat profile inside the mixRadius to reduce the ohmic power by certain amount + """ + + if mixRadius is None: + mixRadius = self.profiles["rho(-)"][np.where(self.profiles["q(-)"] > 1)][0] + + print(f"\t- Original Ohmic power: {self.derived['qOhm_MW'][-1]:.2f}MW") + Ohmic_old = copy.deepcopy(self.profiles["qohme(MW/m^3)"]) + + dvol = self.derived["volp_geo"] * np.append( + [0], np.diff(self.derived["r"]) + ) + + print( + f"\t- Will implement sawtooth ohmic power correction inside rho={mixRadius}" + ) + from mitim_tools.transp_tools import CDFtools + Psaw = CDFtools.profilePower( + self.profiles["rho(-)"], + dvol, + PohTot - self.derived["qOhm_MW"][-1], + mixRadius, + ) + self.profiles["qohme(MW/m^3)"] += Psaw + self.derive_quantities() + + print(f"\t- New Ohmic power: {self.derived['qOhm_MW'][-1]:.2f}MW") + Ohmic_new = copy.deepcopy(self.profiles["qohme(MW/m^3)"]) + + if plotYN: + fig, ax = plt.subplots() + ax.plot(self.profiles["rho(-)"], Ohmic_old, "r", lw=2) + ax.plot(self.profiles["rho(-)"], Ohmic_new, "g", lw=2) + plt.show() + + # ************************************************************************************************************************************************ + # Plotting methods for the state class, which is used to plot the profiles, powers, geometry, gradients, flows, and other quantities. + # ************************************************************************************************************************************************ + + def plot( + self, + fn=None,fnlab="", + axs1=None, axs2=None, axs3=None, axs4=None, axsFlows=None, axs6=None, axsImps=None, + color="b",legYN=True,extralab="",lsFlows="-",legFlows=True,showtexts=True,lastRhoGradients=0.89, + ): + if axs1 is None: + if fn is None: + from mitim_tools.misc_tools.GUItools import FigureNotebook + + self.fn = FigureNotebook("PROFILES Notebook", geometry="1600x1000") + + figs = state_plotting.add_figures(self.fn, fnlab=fnlab) + axs1, axs2, axs3, axs4, axsFlows, axs6, axsImps = state_plotting.add_axes(figs) + + lw, fs = 1, 6 + + state_plotting.plot_profiles(self,axs1, color=color, legYN=legYN, extralab=extralab, lw=lw, fs=fs) + state_plotting.plot_powers(self,axs2, color=color, legYN=legYN, extralab=extralab, lw=lw, fs=fs) + self.plot_geometry(axs3, color=color, legYN=legYN, extralab=extralab, lw=lw, fs=fs) + state_plotting.plot_gradients(self,axs4, color=color, lw=lw, lastRho=lastRhoGradients, label=extralab) + if axsFlows is not None: + state_plotting.plot_flows(self, axsFlows, ls=lsFlows, leg=legFlows, showtexts=showtexts) + state_plotting.plot_other(self,axs6, color=color, lw=lw, extralab=extralab, fs=fs) + state_plotting.plot_ions(self,axsImps, color=color, legYN=legYN, extralab=extralab, lw=lw, fs=fs) + + # To allow this to be called from the object + def plot_gradients(self, *args, **kwargs): + return state_plotting.plot_gradients(self, *args, **kwargs) + + def plot_geometry(self, *args, **kwargs): + pass + + def plot_flows(self, *args, **kwargs): + return state_plotting.plot_flows(self, *args, **kwargs) + + def plotPeaking( + self, ax, c="b", marker="*", label="", debugPlot=False, printVals=False + ): + nu_effCGYRO = self.derived["nu_eff"] * 2 / self.derived["Zeff_vol"] + ne_peaking = self.derived["ne_peaking0.2"] + ax.scatter([nu_effCGYRO], [ne_peaking], s=400, c=c, marker=marker, label=label) + + if printVals: + print(f"\t- nu_eff = {nu_effCGYRO}, ne_peaking = {ne_peaking}") + + # Extra + r = self.derived["r"] + volp = self.derived["volp_geo"] + ix = np.argmin(np.abs(self.profiles["rho(-)"] - 0.9)) + + if debugPlot: + fig, axq = plt.subplots() + + ne = self.profiles["ne(10^19/m^3)"] + axq.plot(self.profiles["rho(-)"], ne, color="m") + ne_vol = ( + CALCtools.volume_integration(ne * 0.1, r, volp)[-1] / self.derived["volume"] + ) + axq.axhline(y=ne_vol * 10, color="m") + + ne = copy.deepcopy(self.profiles["ne(10^19/m^3)"]) + ne[ix:] = (0,) * len(ne[ix:]) + ne_vol = CALCtools.volume_integration(ne * 0.1, r, volp)[-1] / self.derived["volume"] + ne_peaking0 = ( + ne[np.argmin(np.abs(self.derived["rho_pol"] - 0.2))] * 0.1 / ne_vol + ) + + if debugPlot: + axq.plot(self.profiles["rho(-)"], ne, color="r") + axq.axhline(y=ne_vol * 10, color="r") + + ne = copy.deepcopy(self.profiles["ne(10^19/m^3)"]) + ne[ix:] = (ne[ix],) * len(ne[ix:]) + ne_vol = CALCtools.volume_integration(ne * 0.1, r, volp)[-1] / self.derived["volume"] + ne_peaking1 = ( + ne[np.argmin(np.abs(self.derived["rho_pol"] - 0.2))] * 0.1 / ne_vol + ) + + ne_peaking0 = ne_peaking + + ax.errorbar( + [nu_effCGYRO], + [ne_peaking], + yerr=[[ne_peaking - ne_peaking1], [ne_peaking0 - ne_peaking]], + marker=marker, + c=c, + markersize=16, + capsize=2.0, + fmt="s", + elinewidth=1.0, + capthick=1.0, + ) + + if debugPlot: + axq.plot(self.profiles["rho(-)"], ne, color="b") + axq.axhline(y=ne_vol * 10, color="b") + plt.show() + + # print(f'{ne_peaking0}-{ne_peaking}-{ne_peaking1}') + + return nu_effCGYRO, ne_peaking + + def plotRelevant(self, axs = None, color = 'b', label ='', lw = 1, ms = 1): + + if axs is None: + fig = plt.figure() + axs = fig.subplot_mosaic( + """ + ABCDH + AEFGI + """ + ) + axs = [axs['A'], axs['B'], axs['C'], axs['D'], axs['E'], axs['F'], axs['G'], axs['H'], axs['I']] + + # ---------------------------------- + # Equilibria + # ---------------------------------- + + ax = axs[0] + rho = np.linspace(0, 1, 21) + + self.plot_state_flux_surfaces(ax=ax, surfaces_rho=rho, label=label, color=color, lw=lw, lw1=lw*3) + + ax.set_xlabel("R (m)") + ax.set_ylabel("Z (m)") + ax.set_aspect("equal") + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Equilibria") + + # ---------------------------------- + # Kinetic Profiles + # ---------------------------------- + + # T profiles + ax = axs[1] + + ax.plot(self.profiles['rho(-)'], self.profiles['te(keV)'], '-o', markersize=ms, lw = lw, label=label+', e', color=color) + ax.plot(self.profiles['rho(-)'], self.profiles['ti(keV)'][:,0], '--*', markersize=ms, lw = lw, label=label+', i', color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$T$ (keV)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Temperatures") + + # ne profiles + ax = axs[2] + + ax.plot(self.profiles['rho(-)'], self.profiles['ne(10^19/m^3)']*1E-1, '-o', markersize=ms, lw = lw, label=label, color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$n_e$ ($10^{20}m^{-3}$)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Electron Density") + + # ---------------------------------- + # Pressure + # ---------------------------------- + + ax = axs[3] + + ax.plot(self.profiles['rho(-)'], self.derived['ptot_manual'], '-o', markersize=ms, lw = lw, label=label, color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$p_{kin}$ (MPa)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Total Pressure") + + # ---------------------------------- + # Current + # ---------------------------------- + + # q-profile + ax = axs[4] + + ax.plot(self.profiles['rho(-)'], self.profiles['q(-)'], '-o', markersize=ms, lw = lw, label=label, color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$q$") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Safety Factor") + + # ---------------------------------- + # Powers + # ---------------------------------- + + # RF + ax = axs[5] + + ax.plot(self.profiles['rho(-)'], self.profiles['qrfe(MW/m^3)'], '-o', markersize=ms, lw = lw, label=label+', e', color=color) + ax.plot(self.profiles['rho(-)'], self.profiles['qrfi(MW/m^3)'], '--*', markersize=ms, lw = lw, label=label+', i', color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$P_{ich}$ (MW/m$^3$)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("ICH Power Deposition") + + # Ohmic + ax = axs[6] + + ax.plot(self.profiles['rho(-)'], self.profiles['qohme(MW/m^3)'], '-o', markersize=ms, lw = lw, label=label, color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$P_{oh}$ (MW/m$^3$)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Ohmic Power Deposition") + + # ---------------------------------- + # Heat fluxes + # ---------------------------------- + + ax = axs[7] + + ax.plot(self.profiles['rho(-)'], self.derived['qe_MWm2'], '-o', markersize=ms, lw = lw, label=label+', e', color=color) + ax.plot(self.profiles['rho(-)'], self.derived['qi_MWm2'], '--*', markersize=ms, lw = lw, label=label+', i', color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$Q$ ($MW/m^2$)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Energy Fluxes") + + # ---------------------------------- + # Dynamic targets + # ---------------------------------- + + ax = axs[8] + + ax.plot(self.profiles['rho(-)'], self.derived['qrad'], '-o', markersize=ms, lw = lw, label=label+', rad', color=color) + ax.plot(self.profiles['rho(-)'], self.profiles['qei(MW/m^3)'], '--*', markersize=ms, lw = lw, label=label+', exc', color=color) + if 'qfuse(MW/m^3)' in self.profiles: + ax.plot(self.profiles['rho(-)'], self.profiles['qfuse(MW/m^3)']+self.profiles['qfusi(MW/m^3)'], '-.s', markersize=ms, lw = lw, label=label+', fus', color=color) + + ax.set_xlabel("$\\rho_N$") + ax.set_ylabel("$Q$ ($MW/m^2$)") + #ax.set_ylim(bottom = 0) + ax.set_xlim(0,1) + ax.legend(prop={'size':8}) + GRAPHICStools.addDenseAxis(ax) + ax.set_title("Dynamic Targets") + + def csv(self, file="input.gacode.xlsx"): + dictExcel = IOtools.OrderedDict() + + for ikey in self.profiles: + print(ikey) + if len(self.profiles[ikey].shape) == 1: + dictExcel[ikey] = self.profiles[ikey] + else: + dictExcel[ikey] = self.profiles[ikey][:, 0] + + IOtools.writeExcel_fromDict(dictExcel, file, fromRow=1) + + # ************************************************************************************************************************************************ + # Code conversions + # ************************************************************************************************************************************************ + + def _print_gb_normalizations(self,L_label,Z_label,A_label,n_label,T_label, B_label, L, Z, A): + print(f'\t- GB normalizations, such that Q_gb = n_ref * T_ref^5/2 * m_ref^0.5 / (Z_ref * L_ref * B_ref)^2') + print(f'\t\t* L_ref = {L_label} = {L:.3f}') + print(f'\t\t* Z_ref = {Z_label} = {Z:.3f}') + print(f'\t\t* A_ref = {A_label} = {A:.3f}') + print(f'\t\t* B_ref = {B_label}') + print(f'\t\t* n_ref = {n_label}') + print(f'\t\t* T_ref = {T_label}') + print(f'') + + def _calculate_pressure_gradient_from_aLx(self, pe, pi, aLTe, aLTi, aLne, aLni, a): + ''' + pe and pi in MPa. pi two dimensional + ''' + + adpedr = - pe * 1E6 * (aLTe + aLne) + adpjdr = - pi * 1E6 * (aLTi + aLni) + + dpdr = ( adpedr + adpjdr.sum(axis=-1)) / a + + return dpdr + + def to_tglf(self, r=[0.5], code_settings='SAT0', r_is_rho = True): + + # <> Function to interpolate a curve <> + from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function + + # Determine if the input radius is rho toroidal or r/a + if r_is_rho: + r_interpolation = self.profiles['rho(-)'] + else: + r_interpolation = self.derived['roa'] + + # Determine the number of species to use in TGLF + max_species_tglf = 6 # TGLF only accepts up to 6 species + if len(self.Species) > max_species_tglf-1: + print(f"\t- Warning: TGLF only accepts {max_species_tglf} species, but there are {len(self.Species)} ions pecies in the GACODE input. The first {max_species_tglf-1} will be used.", typeMsg="w") + tglf_ions_num = max_species_tglf - 1 + else: + tglf_ions_num = len(self.Species) + + # Determine the mass reference for TGLF (use 2.0 for D-mass normalization; derivatives use mD_u elsewhere) + mass_ref = 2.0 + + self._print_gb_normalizations('a', 'Z_D', 'A_D', 'n_e', 'T_e', 'B_unit', self.derived["a"], 1.0, mass_ref) + + # ----------------------------------------------------------------------- + # Derived profiles + # ----------------------------------------------------------------------- + + sign_it = -np.sign(self.profiles["current(MA)"][-1]) + sign_bt = -np.sign(self.profiles["bcentr(T)"][-1]) + + s_kappa = self.derived["r"] / self.profiles["kappa(-)"] * self._deriv_gacode(self.profiles["kappa(-)"]) + s_delta = self.derived["r"] * self._deriv_gacode(self.profiles["delta(-)"]) + s_zeta = self.derived["r"] * self._deriv_gacode(self.profiles["zeta(-)"]) + + ''' + Total pressure + -------------------------------------------------------- + Recompute pprime with those species that belong to this run #TODO not exact? + ''' + + dpdr = self._calculate_pressure_gradient_from_aLx( + self.derived['pe'], self.derived['pi_all'][:,:tglf_ions_num], + self.derived['aLTe'], self.derived['aLTi'][:,:tglf_ions_num], + self.derived['aLne'], self.derived['aLni'][:,:tglf_ions_num], + self.derived['a'] + ) + + pprime = 1E-7 * abs(self.profiles["q(-)"])*self.derived['a']**2/self.derived["r"]/self.derived["B_unit"]**2*dpdr + pprime[0] = 0 # infinite in first location + + ''' + Rotations + -------------------------------------------------------- + From TGYRO/TGLF definitions + w0p = expro_w0p(:)/100.0 + f_rot(:) = w0p(:)/w0_norm + gamma_p0 = -r_maj(i_r)*f_rot(i_r)*w0_norm + gamma_eb0 = gamma_p0*r(i_r)/(q_abs*r_maj(i_r)) + ''' + + w0p = self._deriv_gacode(self.profiles["w0(rad/s)"]) + gamma_p0 = -self.profiles["rmaj(m)"]*w0p + gamma_eb0 = -self._deriv_gacode(self.profiles["w0(rad/s)"]) * self.derived["r"]/ np.abs(self.profiles["q(-)"]) + + vexb_shear = -sign_it * gamma_eb0 * self.derived["a"]/self.derived['c_s'] + vpar_shear = -sign_it * gamma_p0 * self.derived["a"]/self.derived['c_s'] + vpar = -sign_it * self.profiles["rmaj(m)"]*self.profiles["w0(rad/s)"]/self.derived['c_s'] + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Prepare the inputs for TGLF + # --------------------------------------------------------------------------------------------------------------------------------------- + + input_parameters = {} + for rho in r: + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Define interpolator at this rho + # --------------------------------------------------------------------------------------------------------------------------------------- + + def interpolator(y): + return interpolation_function(rho, r_interpolation,y).item() + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Controls come from options + # --------------------------------------------------------------------------------------------------------------------------------------- + + controls = GACODEdefaults.addTGLFcontrol(code_settings) + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Species come from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + species = { + 1: { + 'ZS': -1.0, + 'MASS': PLASMAtools.me_u / mass_ref, + 'RLNS': interpolator(self.derived['aLne']), + 'RLTS': interpolator(self.derived['aLTe']), + 'TAUS': 1.0, + 'AS': 1.0, + 'VPAR': interpolator(vpar), + 'VPAR_SHEAR': interpolator(vpar_shear), + 'VNS_SHEAR': 0.0, + 'VTS_SHEAR': 0.0}, + } + + for i in range(min(len(self.Species), max_species_tglf-1)): + species[i+2] = { + 'ZS': self.Species[i]['Z'], + 'MASS': self.Species[i]['A']/mass_ref, + 'RLNS': interpolator(self.derived['aLni'][:,i]), + 'RLTS': interpolator(self.derived["aLTi"][:,i]), + 'TAUS': interpolator(self.derived["tite_all"][:,i]), + 'AS': interpolator(self.derived['fi'][:,i]), + 'VPAR': interpolator(vpar), + 'VPAR_SHEAR': interpolator(vpar_shear), + 'VNS_SHEAR': 0.0, + 'VTS_SHEAR': 0.0 + } + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Plasma comes from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + plasma = { + 'NS': len(species), + 'SIGN_BT': sign_bt, + 'SIGN_IT': sign_it, + 'VEXB': 0.0, + 'VEXB_SHEAR': interpolator(vexb_shear), + 'XNUE': interpolator(self.derived['xnue']), + 'ZEFF': interpolator(self.derived['Zeff']), + 'DEBYE': interpolator(self.derived['debye']), + 'BETAE': interpolator(self.derived['betae']), + } + + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Geometry comes from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + parameters = { + 'RMIN_LOC': self.derived['roa'], + 'RMAJ_LOC': self.derived['Rmajoa'], + 'ZMAJ_LOC': self.derived["Zmagoa"], + 'DRMINDX_LOC': np.ones(self.profiles["rho(-)"].shape), # Force 1.0 because of numerical issues in TGLF + 'DRMAJDX_LOC': self._deriv_gacode(self.profiles["rmaj(m)"]), + 'DZMAJDX_LOC': self._deriv_gacode(self.profiles["zmag(m)"]), + 'Q_LOC': np.abs(self.profiles["q(-)"]), + 'KAPPA_LOC': self.profiles["kappa(-)"], + 'S_KAPPA_LOC': s_kappa, + 'DELTA_LOC': self.profiles["delta(-)"], + 'S_DELTA_LOC': s_delta, + 'ZETA_LOC': self.profiles["zeta(-)"], + 'S_ZETA_LOC': s_zeta, + 'Q_PRIME_LOC': self.derived['s_q'], + 'P_PRIME_LOC': pprime, + } + + # Add MXH and derivatives + for ikey in self.profiles: + if 'shape_cos' in ikey or 'shape_sin' in ikey: + + # TGLF only accepts 6, as of July 2025 + if int(ikey[-4]) > 6: + continue + + key_mod = ikey.upper().split('(')[0] + + parameters[key_mod] = self.profiles[ikey] + parameters[f"{key_mod.split('_')[0]}_S_{key_mod.split('_')[-1]}"] = self.derived["r"] * self._deriv_gacode(self.profiles[ikey]) + + for k in parameters: + par = torch.nan_to_num(torch.from_numpy(parameters[k]) if type(parameters[k]) is np.ndarray else parameters[k], nan=0.0, posinf=1E10, neginf=-1E10) + plasma[k] = interpolator(par) + + plasma['BETA_LOC'] = 0.0 + plasma['KX0_LOC'] = 0.0 + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Merging + # --------------------------------------------------------------------------------------------------------------------------------------- + + input_dict = controls | plasma + + for i in range(len(species)): + for k in species[i+1]: + input_dict[f'{k}_{i+1}'] = species[i+1][k] + + input_parameters[rho] = input_dict + + return input_parameters + + def to_neo(self, r=[0.5], r_is_rho = True, code_settings='Sonic'): + + # <> Function to interpolate a curve <> + from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function + + # Determine if the input radius is rho toroidal or r/a + if r_is_rho: + r_interpolation = self.profiles['rho(-)'] + else: + r_interpolation = self.derived['roa'] + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Prepare the inputs + # --------------------------------------------------------------------------------------------------------------------------------------- + + # Determine the mass reference + mass_ref = 2.0 + + sign_it = int(-np.sign(self.profiles["current(MA)"][-1])) + sign_bt = int(-np.sign(self.profiles["bcentr(T)"][-1])) + + s_kappa = self.derived["r"] / self.profiles["kappa(-)"] * self._deriv_gacode(self.profiles["kappa(-)"]) + s_delta = self.derived["r"] * self._deriv_gacode(self.profiles["delta(-)"]) + s_zeta = self.derived["r"] * self._deriv_gacode(self.profiles["zeta(-)"]) + + # Rotations + rmaj = self.derived['Rmajoa'] + cs = self.derived['c_s'] + a = self.derived['a'] + mach = self.profiles["w0(rad/s)"] * (self.derived['Rmajoa']*a) + gamma_p = self._deriv_gacode(self.profiles["w0(rad/s)"]) * (self.derived['Rmajoa']*a) + + # NEO definition: 'OMEGA_ROT=',mach_loc/rmaj_loc/cs_loc + omega_rot = mach / rmaj / cs # Equivalent to: self.profiles["w0(rad/s)"] / self.derived['c_s'] * a + + # NEO definition: 'OMEGA_ROT_DERIV=',-gamma_p_loc*a/cs_loc/rmaj_loc + omega_rot_deriv = gamma_p * a / cs / rmaj # Equivalent to: self._deriv_gacode(self.profiles["w0(rad/s)"])/ self.derived['c_s'] * self.derived['a']**2 + + self._print_gb_normalizations('a', 'Z_D', 'A_D', 'n_e', 'T_e', 'B_unit', self.derived["a"], 1.0, mass_ref) + + input_parameters = {} + for rho in r: + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Define interpolator at this rho + # --------------------------------------------------------------------------------------------------------------------------------------- + + def interpolator(y): + return interpolation_function(rho, r_interpolation,y).item() + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Controls come from options + # --------------------------------------------------------------------------------------------------------------------------------------- + + controls = GACODEdefaults.addNEOcontrol(code_settings) + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Species come from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + species = {} + for i in range(len(self.Species)): + species[i+1] = { + 'Z': self.Species[i]['Z'], + 'MASS': self.Species[i]['A']/mass_ref, + 'DLNNDR': interpolator(self.derived['aLni'][:,i]), + 'DLNTDR': interpolator(self.derived["aLTi"][:,i]), + 'TEMP': interpolator(self.derived["tite_all"][:,i]), + 'DENS': interpolator(self.derived['fi'][:,i]), + } + + ie = i+2 + species[ie] = { + 'Z': -1.0, + 'MASS': 0.000272445, + 'DLNNDR': interpolator(self.derived['aLne']), + 'DLNTDR': interpolator(self.derived['aLTe']), + 'TEMP': 1.0, + 'DENS': 1.0, + } + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Plasma comes from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + #TODO Does this work with no deuterium first ion? + factor_nu = species[1]['Z']**4 * species[1]['DENS'] * (species[ie]['MASS']/species[1]['MASS'])**0.5 * species[1]['TEMP']**(-1.5) + + plasma = { + 'N_SPECIES': len(species), + 'IPCCW': sign_bt, + 'BTCCW': sign_it, + 'OMEGA_ROT': interpolator(omega_rot), + 'OMEGA_ROT_DERIV': interpolator(omega_rot_deriv), + 'NU_1': interpolator(self.derived['xnue'])* factor_nu, + 'RHO_STAR': interpolator(self.derived["rho_sa"]), + } + + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Geometry comes from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + parameters = { + 'RMIN_OVER_A': self.derived['roa'], + 'RMAJ_OVER_A': self.derived['Rmajoa'], + 'SHIFT': self._deriv_gacode(self.profiles["rmaj(m)"]), + 'ZMAG_OVER_A': self.derived["Zmagoa"], + 'S_ZMAG': self._deriv_gacode(self.profiles["zmag(m)"]), + 'Q': np.abs(self.profiles["q(-)"]), + 'SHEAR': self.derived["s_hat"], + 'KAPPA': self.profiles["kappa(-)"], + 'S_KAPPA': s_kappa, + 'DELTA': self.profiles["delta(-)"], + 'S_DELTA': s_delta, + 'ZETA': self.profiles["zeta(-)"], + 'S_ZETA': s_zeta, + } + + # Add MXH and derivatives + for ikey in self.profiles: + if 'shape_cos' in ikey or 'shape_sin' in ikey: + + # TGLF only accepts 6, as of July 2025 + if int(ikey[-4]) > 6: + continue + + key_mod = ikey.upper().split('(')[0] + + parameters[key_mod] = self.profiles[ikey] + parameters[f"{key_mod.split('_')[0]}_S_{key_mod.split('_')[-1]}"] = self.derived["r"] * self._deriv_gacode(self.profiles[ikey]) + + for k in parameters: + par = torch.nan_to_num(torch.from_numpy(parameters[k]) if type(parameters[k]) is np.ndarray else parameters[k], nan=0.0, posinf=1E10, neginf=-1E10) + plasma[k] = interpolator(par) + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Merging + # --------------------------------------------------------------------------------------------------------------------------------------- + + input_dict = controls | plasma + + for i in range(len(species)): + for k in species[i+1]: + input_dict[f'{k}_{i+1}'] = species[i+1][k] + + input_parameters[rho] = input_dict + + return input_parameters + + def to_cgyro(self, r=[0.5], r_is_rho = True, code_settings = 'Linear'): + + # <> Function to interpolate a curve <> + from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function + + # Determine if the input radius is rho toroidal or r/a + if r_is_rho: + r_interpolation = self.profiles['rho(-)'] + else: + r_interpolation = self.derived['roa'] + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Prepare the inputs + # --------------------------------------------------------------------------------------------------------------------------------------- + + # Determine the mass reference + mass_ref = 2.0 + + sign_it = int(-np.sign(self.profiles["current(MA)"][-1])) + sign_bt = int(-np.sign(self.profiles["bcentr(T)"][-1])) + + s_kappa = self.derived["r"] / self.profiles["kappa(-)"] * self._deriv_gacode(self.profiles["kappa(-)"]) + s_delta = self.derived["r"] * self._deriv_gacode(self.profiles["delta(-)"]) + s_zeta = self.derived["r"] * self._deriv_gacode(self.profiles["zeta(-)"]) + + # Rotations + cs = self.derived['c_s'] + a = self.derived['a'] + mach = self.profiles["w0(rad/s)"] * (self.derived['Rmajoa']*a) + gamma_p = -self._deriv_gacode(self.profiles["w0(rad/s)"]) * (self.derived['Rmajoa']*a) + gamma_e = -self._deriv_gacode(self.profiles["w0(rad/s)"]) * (self.profiles['rmin(m)'] / self.profiles['q(-)']) + + # CGYRO definition: 'MACH=',mach_loc/cs_loc + mach = mach / cs + + # CGYRO definition: 'GAMMA_P=',gamma_p_loc*a/cs_loc + gamma_p = gamma_p * a / cs + + # CGYRO definition: 'GAMMA_E=',gamma_e_loc*a/cs_loc + gamma_e = gamma_e * a / cs + + # Because in MITIMstate I keep Bunit always positive, but CGYRO routines may need it negative? #TODO + sign_Bunit = np.sign(self.profiles['torfluxa(Wb/radian)'][0]) + + self._print_gb_normalizations('a', 'Z_D', 'A_D', 'n_e', 'T_e', 'B_unit', self.derived["a"], 1.0, mass_ref) + + input_parameters = {} + for rho in r: + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Define interpolator at this rho + # --------------------------------------------------------------------------------------------------------------------------------------- + + def interpolator(y): + return interpolation_function(rho, r_interpolation,y).item() + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Controls come from options + # --------------------------------------------------------------------------------------------------------------------------------------- + + controls = GACODEdefaults.addCGYROcontrol(code_settings) + controls['PROFILE_MODEL'] = 1 + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Species come from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + species = {} + for i in range(len(self.Species)): + species[i+1] = { + 'Z': self.Species[i]['Z'], + 'MASS': self.Species[i]['A']/mass_ref, + 'DLNNDR': interpolator(self.derived['aLni'][:,i]), + 'DLNTDR': interpolator(self.derived["aLTi"][:,i]), + 'TEMP': interpolator(self.derived["tite_all"][:,i]), + 'DENS': interpolator(self.derived['fi'][:,i]), + } + + ie = i+2 + species[ie] = { + 'Z': -1.0, + 'MASS': 0.000272445, + 'DLNNDR': interpolator(self.derived['aLne']), + 'DLNTDR': interpolator(self.derived['aLTe']), + 'TEMP': 1.0, + 'DENS': 1.0, + } + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Plasma comes from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + plasma = { + 'N_SPECIES': len(species), + 'IPCCW': sign_bt, + 'BTCCW': sign_it, + 'MACH': interpolator(mach), + 'GAMMA_E': interpolator(gamma_e), + 'GAMMA_P': interpolator(gamma_p), + 'NU_EE': interpolator(self.derived['xnue']), + 'BETAE_UNIT': interpolator(self.derived['betae']), + 'LAMBDA_STAR': interpolator(self.derived['debye']) * sign_Bunit, + } + + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Geometry comes from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + parameters = { + 'RMIN': self.derived['roa'], + 'RMAJ': self.derived['Rmajoa'], + 'SHIFT': self._deriv_gacode(self.profiles["rmaj(m)"]), + 'ZMAG': self.derived["Zmagoa"], + 'DZMAG': self._deriv_gacode(self.profiles["zmag(m)"]), + 'Q': np.abs(self.profiles["q(-)"]), + 'S': self.derived["s_hat"], + 'KAPPA': self.profiles["kappa(-)"], + 'S_KAPPA': s_kappa, + 'DELTA': self.profiles["delta(-)"], + 'S_DELTA': s_delta, + 'ZETA': self.profiles["zeta(-)"], + 'S_ZETA': s_zeta, + } + + # Add MXH and derivatives + for ikey in self.profiles: + if 'shape_cos' in ikey or 'shape_sin' in ikey: + + # TGLF only accepts 6, as of July 2025 + if int(ikey[-4]) > 6: + continue + + key_mod = ikey.upper().split('(')[0] + + parameters[key_mod] = self.profiles[ikey] + parameters[f"{key_mod.split('_')[0]}_S_{key_mod.split('_')[-1]}"] = self.derived["r"] * self._deriv_gacode(self.profiles[ikey]) + + for k in parameters: + par = torch.nan_to_num(torch.from_numpy(parameters[k]) if type(parameters[k]) is np.ndarray else parameters[k], nan=0.0, posinf=1E10, neginf=-1E10) + plasma[k] = interpolator(par) + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Merging + # --------------------------------------------------------------------------------------------------------------------------------------- + + input_dict = controls | plasma + + for i in range(len(species)): + for k in species[i+1]: + input_dict[f'{k}_{i+1}'] = species[i+1][k] + + input_parameters[rho] = input_dict + + return input_parameters + + def to_gx(self, r=[0.5], r_is_rho = True, code_settings = 'Linear'): + + # <> Function to interpolate a curve <> + from mitim_tools.misc_tools.MATHtools import extrapolateCubicSpline as interpolation_function + + # Determine if the input radius is rho toroidal or r/a + if r_is_rho: + r_interpolation = self.profiles['rho(-)'] + else: + r_interpolation = self.derived['roa'] + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Prepare the inputs + # --------------------------------------------------------------------------------------------------------------------------------------- + + # Determine the mass reference + mass_ref = 2.0 + + dpdr = self._calculate_pressure_gradient_from_aLx( + self.derived['pe'], self.derived['pi_all'][:,:], + self.derived['aLTe'], self.derived['aLTi'][:,:], + self.derived['aLne'], self.derived['aLni'][:,:], + self.derived['a'] + ) + betaprim = -(8*np.pi*1E-7) * self.derived['a'] / self.derived['B_unit']**2 * dpdr + + #TODO #to check + s_kappa = self.derived["r"] / self.profiles["kappa(-)"] * self._deriv_gacode(self.profiles["kappa(-)"]) + s_delta = self.derived["r"] * self._deriv_gacode(self.profiles["delta(-)"]) + + self._print_gb_normalizations('a', 'Z_D', 'A_D', 'n_e', 'T_e', 'B_unit', self.derived["a"], 1.0, mass_ref) + + input_parameters = {} + for rho in r: + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Define interpolator at this rho + # --------------------------------------------------------------------------------------------------------------------------------------- + + def interpolator(y): + return interpolation_function(rho, r_interpolation,y).item() + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Controls come from options + # --------------------------------------------------------------------------------------------------------------------------------------- + + controls = GACODEdefaults.addGXcontrol(code_settings) + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Species come from profiles + # --------------------------------------------------------------------------------------------------------------------------------------- + + species = {} + + # Ions + for i in range(len(self.Species)): + + nu_ii = self.derived['xnue'] * \ + (self.Species[i]['Z']/self.profiles['ze'][0])**4 * \ + (self.profiles['ni(10^19/m^3)'][:,0]/self.profiles['ne(10^19/m^3)']) * \ + (self.profiles['mass'][i]/self.profiles['masse'][0])**-0.5 * \ + (self.profiles['ti(keV)'][:,0]/self.profiles['te(keV)'])**-1.5 + + species[i+1] = { + 'z': self.Species[i]['Z'], + 'mass': self.Species[i]['A']/mass_ref, + 'temp': interpolator(self.derived["tite_all"][:,i]), + 'dens': interpolator(self.derived['fi'][:,i]), + 'fprim': interpolator(self.derived['aLni'][:,i]), + 'tprim': interpolator(self.derived["aLTi"][:,i]), + 'vnewk': interpolator(nu_ii), + 'type': 'ion', + } + + # Electrons + ie = i+2 + species[ie] = { + 'z': -1.0, + 'mass': 0.000272445, + 'temp': 1.0, + 'dens': 1.0, + 'fprim': interpolator(self.derived['aLne']), + 'tprim': interpolator(self.derived['aLTe']), + 'vnewk': interpolator(self.derived['xnue']), + 'type': 'electron' + } + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Plasma and geometry + # --------------------------------------------------------------------------------------------------------------------------------------- + + plasma = { + 'nspecies': len(species) + } + + parameters = { + 'beta': self.derived['betae'], + 'rhoc': self.derived['roa'], + 'Rmaj': self.derived['Rmajoa'], + 'R_geo': self.derived['Rmajoa'] / abs(self.derived['B_unit'] / self.derived['B0']), + 'shift': self._deriv_gacode(self.profiles["rmaj(m)"]), + 'qinp': np.abs(self.profiles["q(-)"]), + 'shat': self.derived["s_hat"], + 'akappa': self.profiles["kappa(-)"], + 'akappri': s_kappa, + 'tri': self.profiles["delta(-)"], + 'tripri': s_delta, + 'betaprim': betaprim, + } + + for k in parameters: + par = torch.nan_to_num(torch.from_numpy(parameters[k]) if type(parameters[k]) is np.ndarray else parameters[k], nan=0.0, posinf=1E10, neginf=-1E10) + plasma[k] = interpolator(par) + + # --------------------------------------------------------------------------------------------------------------------------------------- + # Merging + # --------------------------------------------------------------------------------------------------------------------------------------- + + input_dict = controls | plasma + + for i in range(len(species)): + for k in species[i+1]: + input_dict[f'{k}_{i+1}'] = species[i+1][k] + + input_parameters[rho] = input_dict + + return input_parameters + + + def to_transp(self, folder = '~/scratch/', shot = '12345', runid = 'P01', times = [0.0,1.0], Vsurf = 0.0): + + print("\t- Converting to TRANSP") + folder = IOtools.expandPath(folder) + folder.mkdir(parents=True, exist_ok=True) + + from mitim_tools.transp_tools.utils import TRANSPhelpers + transp = TRANSPhelpers.transp_run(folder, shot, runid) + for time in times: + transp.populate_time.from_profiles(time,self, Vsurf = Vsurf) + + transp.write_ufiles() + + return transp + + def to_eped(self, ped_rho = 0.95): + + neped_19 = np.interp(ped_rho, self.profiles['rho(-)'], self.profiles['ne(10^19/m^3)']) + + eped_evaluation = { + 'Ip': np.abs(self.profiles['current(MA)'][0]), + 'Bt': np.abs(self.profiles['bcentr(T)'][0]), + 'R': np.abs(self.profiles['rcentr(m)'][0]), + 'a': np.abs(self.derived['a']), + 'kappa995': np.abs(self.derived['kappa995']), + 'delta995': np.abs(self.derived['delta995']), + 'neped': np.abs(neped_19), + 'betan': np.abs(self.derived['BetaN_engineering']), + 'zeff': np.abs(self.derived['Zeff_vol']), + 'tesep': np.abs(self.profiles['te(keV)'][-1])*1E3, + 'nesep_ratio': np.abs(self.profiles['ne(10^19/m^3)'][-1] / neped_19), + } + + return eped_evaluation + + +class DataTable: + def __init__(self, variables=None): + + if variables is not None: + self.variables = variables + else: + + # Default for confinement mode access studies (JWH 03/2024) + self.variables = { + "Rgeo": ["rcentr(m)", "pos_0", "profiles", ".2f", 1, "m"], + "ageo": ["a", None, "derived", ".2f", 1, "m"], + "volume": ["volume", None, "derived", ".2f", 1, "m"], + "kappa @psi=0.95": ["kappa(-)", "psi_0.95", "profiles", ".2f", 1, None], + "delta @psi=0.95": ["delta(-)", "psi_0.95", "profiles", ".2f", 1, None], + "Bt": ["bcentr(T)", "pos_0", "profiles", ".1f", 1, "T"], + "Ip": ["current(MA)", "pos_0", "profiles", ".1f", 1, "MA"], + "Pin": ["qIn", None, "derived", ".1f", 1, "MW"], + "Te @rho=0.9": ["te(keV)", "rho_0.90", "profiles", ".2f", 1, "keV"], + "Ti/Te @rho=0.9": ["tite", "rho_0.90", "derived", ".2f", 1, None], + "ne @rho=0.9": [ + "ne(10^19/m^3)", + "rho_0.90", + "profiles", + ".2f", + 0.1, + "E20m-3", + ], + "ptot @rho=0.9": [ + "ptot_manual", + "rho_0.90", + "derived", + ".1f", + 1e3, + "kPa", + ], + "Zeff": ["Zeff_vol", None, "derived", ".1f", 1, None], + "fDT": ["fmain", None, "derived", ".2f", 1, None], + "H89p": ["H89", None, "derived", ".2f", 1, None], + "H98y2": ["H98", None, "derived", ".2f", 1, None], + "ne (vol avg)": ["ne_vol20", None, "derived", ".2f", 1, "E20m-3"], + "Ptop": ["ptop", None, "derived", ".1f", 1, "Pa"], + "fG": ["fG", None, "derived", ".2f", 1, None], + "Pfus": ["Pfus", None, "derived", ".1f", 1, "MW"], + "Prad": ["Prad", None, "derived", ".1f", 1, "MW"], + "Q": ["Q", None, "derived", ".2f", 1, None], + "Pnet @rho=0.9": ["qTr", "rho_0.90", "derived", ".1f", 1, "MW"], + "Qi/Qe @rho=0.9": ["QiQe", "rho_0.90", "derived", ".2f", 1, None], + } + + self.data = [] + + def export_to_csv(self, filename, title=None): + + title_data = [""] + for key in self.variables: + if self.variables[key][5] is None: + title_data.append(f"{key}") + else: + title_data.append(f"{key} ({self.variables[key][5]})") + + # Open a file with the given filename in write mode + with open(filename, mode="w", newline="") as file: + writer = csv.writer(file) + + # Write the title row first if it is provided + if title: + writer.writerow([title] + [""] * (len(self.data[0]) - 1)) + + writer.writerow(title_data) + + # Write each row in self.data to the CSV file + for row in self.data: + writer.writerow(row) + +def aLT(r, p): + return ( + r[-1] + * CALCtools.derivation_into_Lx( + torch.from_numpy(r).to(torch.double), torch.from_numpy(p).to(torch.double) + ) + .cpu() + .cpu().numpy() + ) + + +def grad(r, p): + return MATHtools.deriv(torch.from_numpy(r), torch.from_numpy(p), array=False) + + +def ionName(Z, A): + # Based on Z + if Z == 2: + return "He" + elif Z == 9: + return "F" + elif Z == 6: + return "C" + elif Z == 11: + return "Na" + elif Z == 30: + return "Zn" + elif Z == 31: + return "Ga" + + # # Based on Mass (this is the correct way, since the radiation needs to be calculated with the full element) + # if A in [3,4]: return 'He' + # elif A == 18: return 'F' + # elif A == 12: return 'C' + # elif A == 22: return 'Na' + # elif A == 60: return 'Zn' + # elif A == 69: return 'Ga' + + +def gradientsMerger(p0, p_true, roa=0.46, blending=0.1): + p = copy.deepcopy(p0) + + aLTe_true = np.interp( + p.derived["roa"], p_true.derived["roa"], p_true.derived["aLTe"] + ) + aLTi_true = np.interp( + p.derived["roa"], p_true.derived["roa"], p_true.derived["aLTi"][:, 0] + ) + aLne_true = np.interp( + p.derived["roa"], p_true.derived["roa"], p_true.derived["aLne"] + ) + + ix1 = np.argmin(np.abs(p.derived["roa"] - roa + blending)) + ix2 = np.argmin(np.abs(p.derived["roa"] - roa)) + + aLT0 = aLTe_true[: ix1 + 1] + aLT2 = p.derived["aLTe"][ix2:] + aLT1 = np.interp( + p.derived["roa"][ix1 : ix2 + 1], + [p.derived["roa"][ix1], p.derived["roa"][ix2]], + [aLT0[-1], aLT2[0]], + )[1:-1] + + aLTe = np.append(np.append(aLT0, aLT1), aLT2) + Te = ( + CALCtools.integration_Lx( + torch.from_numpy(p.derived["roa"]).unsqueeze(0), + torch.Tensor(aLTe).unsqueeze(0), + p.profiles["te(keV)"][-1], + ) + .cpu() + .cpu().numpy()[0] + ) + + aLT0 = aLTi_true[: ix1 + 1] + aLT2 = p.derived["aLTi"][ix2:, 0] + aLT1 = np.interp( + p.derived["roa"][ix1 : ix2 + 1], + [p.derived["roa"][ix1], p.derived["roa"][ix2]], + [aLT0[-1], aLT2[0]], + )[1:-1] + + aLTi = np.append(np.append(aLT0, aLT1), aLT2) + Ti = ( + CALCtools.integration_Lx( + torch.from_numpy(p.derived["roa"]).unsqueeze(0), + torch.Tensor(aLTi).unsqueeze(0), + p.profiles["ti(keV)"][-1, 0], + ) + .cpu() + .cpu().numpy()[0] + ) + + aLT0 = aLne_true[: ix1 + 1] + aLT2 = p.derived["aLne"][ix2:] + aLT1 = np.interp( + p.derived["roa"][ix1 : ix2 + 1], + [p.derived["roa"][ix1], p.derived["roa"][ix2]], + [aLT0[-1], aLT2[0]], + )[1:-1] + + aLne = np.append(np.append(aLT0, aLT1), aLT2) + ne = ( + CALCtools.integration_Lx( + torch.from_numpy(p.derived["roa"]).unsqueeze(0), + torch.Tensor(aLne).unsqueeze(0), + p.profiles["ne(10^19/m^3)"][-1], + ) + .cpu() + .cpu().numpy()[0] + ) + + p.profiles["te(keV)"] = Te + p.profiles["ti(keV)"][:, 0] = Ti + p.profiles["ne(10^19/m^3)"] = ne + + p.derive_quantities() + + return p + +def impurity_location(profiles, impurity_of_interest): + + position_of_impurity = None + for i in range(len(profiles.Species)): + if profiles.Species[i]["N"] == impurity_of_interest: + if position_of_impurity is not None: + raise ValueError(f"[MITIM] Species {impurity_of_interest} found at positions {position_of_impurity} and {i}") + position_of_impurity = i + if position_of_impurity is None: + raise ValueError(f"[MITIM] Species {impurity_of_interest} not found in profiles") + + return position_of_impurity diff --git a/src/mitim_tools/plasmastate_tools/utils/VMECtools.py b/src/mitim_tools/plasmastate_tools/utils/VMECtools.py new file mode 100644 index 00000000..36f8fa87 --- /dev/null +++ b/src/mitim_tools/plasmastate_tools/utils/VMECtools.py @@ -0,0 +1,523 @@ +import numpy as np +from collections import OrderedDict +import matplotlib.pyplot as plt +from pathlib import Path +from scipy.interpolate import interp1d +from mitim_tools.misc_tools import GRAPHICStools, IOtools +from mitim_tools.plasmastate_tools import MITIMstate +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class vmec_state(MITIMstate.mitim_state): + ''' + Class to read and manipulate VMEC files + ''' + + # ************************************************************************************************************************************************ + # Reading and interpreting + # ************************************************************************************************************************************************ + + def __init__( + self, + file_vmec, + file_profs=None, + derive_quantities=True, + mi_ref=None + ): + + # Initialize the base class and tell it the type of file + super().__init__(type_file='vmec') + + + self.header = ['# VMEC state file generated by MITIMtools\n'] + self.titles_singleNum = ["nexp", "nion", "shot", "name", "type", "time"] + self.titles_singleArr = ["masse","mass","ze","z","torfluxa(Wb/radian)","rcentr(m)","bcentr(T)"] #,"current(MA)"] + self.titles_single = self.titles_singleNum + self.titles_singleArr + + # Read the input file and store the raw data + self.files = [file_vmec, file_profs] + if self.files[0] is not None: + self._read_vmec() + + # Derive (Depending on resolution, derived can be expensive, so I may not do it every time) + self.derive_quantities(mi_ref=mi_ref, derive_quantities=derive_quantities) + + @IOtools.hook_method(after=MITIMstate.ensure_variables_existence) + def _read_vmec(self): + + import vmecpp + + # Read VMEC file + print("\t- Reading VMEC file") + self.wout = vmecpp.VmecWOut.from_wout_file(Path(self.files[0])) + + # Initialize profiles dictionary + self.profiles = OrderedDict() + + self.profiles['nion'] = np.array([1]) + self.profiles['name'] = np.array(['D']) + self.profiles['type'] = np.array(['[therm]']) + self.profiles['mass'] = np.array([2.0]) + self.profiles['z'] = np.array([1.0]) + + self.profiles['rcentr(m)'] = np.array([self.wout.Rmajor_p]) + self.profiles['bcentr(T)'] = np.array([self.wout.rbtor/self.wout.Rmajor_p]) + self.profiles["current(MA)"] = np.array([0.0]) + + self.profiles["torfluxa(Wb/radian)"] = np.array([self.wout.phipf[-1]]) + + # Produce variables + self.profiles["rho(-)"] = (self.wout.phi/self.wout.phi[-1])**0.5 #np.linspace(0, 1, self.wout.ns)**0.5 + self.profiles["ptot(Pa)"] = self.wout.presf + + #self.profiles["q(-)"] = self.wout.q_factor + #self.profiles["polflux(Wb/radian)"] = self.wout.chi + + # Read Profiles + if self.files[1] is None: + print("\t- No profiles file provided, skipping profile reading") + return + + data = self._read_profiles(x_coord=self.profiles["rho(-)"]) + + self.profiles['te(keV)'] = data['Te'] + self.profiles['ne(10^19/m^3)'] = data['ne'] + self.profiles['ti(keV)'] = np.atleast_2d(data['Ti']).T + self.profiles['ni(10^19/m^3)'] = np.atleast_2d(data['ni']).T + + self.profiles['qbeami(MW/m^3)'] = data['Qi'] * 1e-6 # Convert from W/m^3 to MW/m^3 + self.profiles['qrfe(MW/m^3)'] = data['Qe'] * 1e-6 # Convert from W/m^3 to MW/m^3 + self.profiles['qpar_beam(1/m^3/s)'] = data['S'] + + # ************************************************************************************************************************************************ + # Derivation (different from MITIMstate) + # ************************************************************************************************************************************************ + + def derive_quantities(self, **kwargs): + + if "derived" not in self.__dict__: + self.derived = {} + + # Define the minor radius used in all calculations (could be the half-width of the midplance intersect, or an effective minor radius) + self.derived["r"] = self.profiles["rho(-)"] # Assume that r = rho so r/a = rho too + + super().derive_quantities_base(**kwargs) + + def derive_geometry(self, **kwargs): + + r = self.derived["r"] + + half_grid_r = r - (r[1] - r[0]) / 2 + d_volume_d_r = ( + (2 * np.pi) ** 2 + * np.array(self.wout.vp) + * 2 + * np.sqrt(half_grid_r) + ) + + self.derived["B_ref"] = np.ones(r.shape) * self.profiles["torfluxa(Wb/radian)"][-1] / (np.pi * self.wout.Aminor_p**2) + + self.derived["volp_geo"] = d_volume_d_r + self.derived["volp_geo"][0] = 1E-9 + + self.derived["kappa_a"] = 0.0 + self.derived["kappa95"] = 0.0 + self.derived["delta95"] = 0.0 + self.derived["kappa995"] = 0.0 + self.derived["delta995"] = 0.0 + self.derived["R_LF"] = np.zeros(r.shape) + + self.derived["bp2_exp"] = np.zeros(r.shape) + self.derived["bt2_exp"] = np.zeros(r.shape) + self.derived["bp2_geo"] = np.zeros(r.shape) + self.derived["bt2_geo"] = np.zeros(r.shape) + + def plot_geometry(self, axs, color="b", legYN=True, extralab="", lw=1, fs=6): + + [ax00c,ax10c,ax20c,ax01c,ax11c,ax21c,ax02c,ax12c,ax22c,axs_3d,axs_2d] = axs + + rho = self.profiles["rho(-)"] + + ax = ax00c + + var = self.derived['r'] + ax.plot(rho, var, "-", lw=lw, c=color) + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylim(bottom=0) + ax.set_ylabel("Effective radius ($r$)") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax01c + ax.plot(rho, self.derived['volp_geo'], color=color, lw=lw, label = extralab) + ax.set_xlabel('$\\rho$'); ax.set_xlim(0, 1) + ax.set_ylabel(f"$dV/d\\rho$ ($m^3$)") + GRAPHICStools.addDenseAxis(ax) + + if legYN: + ax.legend(loc="best", fontsize=fs) + + # ---- + phis_plot = [0.0, np.pi/2, np.pi, 3*np.pi/2] + + self.plot_plasma_boundary(ax=axs_3d, color=color, phi_cuts=phis_plot) + self.plot_state_flux_surfaces(ax=axs_2d, c=color, phis_plot=phis_plot) + + def plot_state_flux_surfaces(self, ax=None, c='b', phis_plot=[0.0]): + + rhos_plot = np.linspace(0.0, 1.0, 10) + + + ls = GRAPHICStools.listLS() + + for phi_cut, lsi in zip(phis_plot, ls): + + for i in range(len(rhos_plot)): + self.plot_flux_surface(ax = ax, phi_cut=phi_cut, rho=rhos_plot[i], c=c, lw = 0.5, ls = lsi) + self.plot_flux_surface(ax = ax, phi_cut=phi_cut, rho=1.0, c=c, lw = 4, ls = lsi, label = f"{phi_cut*180/np.pi:.1f}°") + + ax.set_aspect('equal') + ax.set_xlabel('R [m]') + ax.set_ylabel('Z [m]') + GRAPHICStools.addDenseAxis(ax) + ax.legend(loc='best', fontsize=6) + #GRAPHICStools.addLegendApart(ax, ratio=0.9, size=6) + + ax.set_title(f'Poloidal cross-sections') + + def _read_profiles(self, x_coord=None, debug = False): + + filename = self.files[1] + + if x_coord is None: + # Create uniform coordinate array from 0 to 1 + x_coord = np.linspace(0, 1, 200) + + # Raw data storage + raw_data = { + 'Te_data': {'x': [], 'y': []}, + 'ne_data': {'x': [], 'y': []}, + 'Ti_data': {'x': [], 'y': []}, + 'ni_data': {'x': [], 'y': []}, + 'S_data': {'x': [], 'y': []}, + 'Qe_data': {'x': [], 'y': []}, + 'Qi_data': {'x': [], 'y': []} + } + current_section = None + + with open(filename, 'r') as f: + lines = f.readlines() + + for line in lines: + line = line.strip() + if not line: + continue + + # Check for section headers + if line.startswith('#'): + if 'Te' in line and 'ne' in line: + current_section = 'Te_ne' + elif 'Ti' in line and 'ni' in line: + current_section = 'Ti_ni' + elif 'Particle source' in line: + current_section = 'S' + elif 'Q W/m3' in line: + current_section = 'Qe' + elif 'NBI W/m3' in line: + current_section = 'Qi' + continue + + # Parse data lines + parts = line.split() + if len(parts) < 2: + continue + + x = float(parts[0]) # x/a or r/a coordinate (first column) + + if current_section == 'Te_ne' and len(parts) == 4: + # x/a, x/rho, Te, ne - use x/a coordinate + raw_data['Te_data']['x'].append(x) + raw_data['Te_data']['y'].append(float(parts[2])) + raw_data['ne_data']['x'].append(x) + raw_data['ne_data']['y'].append(float(parts[3])) + + elif current_section == 'Ti_ni' and len(parts) == 4: + # x/a, x/rho, Ti, ni - use x/a coordinate + raw_data['Ti_data']['x'].append(x) + raw_data['Ti_data']['y'].append(float(parts[2])) + raw_data['ni_data']['x'].append(x) + raw_data['ni_data']['y'].append(float(parts[3])) + + elif current_section == 'S' and len(parts) == 2: + raw_data['S_data']['x'].append(x) + raw_data['S_data']['y'].append(float(parts[1])) + + elif current_section == 'Qe' and len(parts) == 2: + raw_data['Qe_data']['x'].append(x) + raw_data['Qe_data']['y'].append(float(parts[1])) + + elif current_section == 'Qi' and len(parts) == 2: + raw_data['Qi_data']['x'].append(x) + raw_data['Qi_data']['y'].append(float(parts[1])) + + # Convert to numpy arrays + for profile_data in raw_data.values(): + profile_data['x'] = np.array(profile_data['x']) + profile_data['y'] = np.array(profile_data['y']) + + # Interpolate each profile to uniform grid + uniform_data = {'x_coord': x_coord} + + profile_map = { + 'Te': 'Te_data', 'ne': 'ne_data', 'Ti': 'Ti_data', + 'ni': 'ni_data', 'S': 'S_data', 'Qe': 'Qe_data', 'Qi': 'Qi_data' + } + + for profile_name, data_key in profile_map.items(): + x_data = raw_data[data_key]['x'] + y_data = raw_data[data_key]['y'] + + if len(x_data) > 0: + # Interpolate using actual coordinates from the data + f = interp1d(x_data, y_data, kind='linear', + bounds_error=False, fill_value=(y_data[0], y_data[-1])) + uniform_data[profile_name] = f(x_coord) + else: + uniform_data[profile_name] = None + + # Also store original data for plotting + uniform_data['raw_data'] = raw_data + + if debug: + plot_profiles(uniform_data) + embed() + + return uniform_data + + def plot_plasma_boundary(self, ax=None, color="b", phi_cuts=[]): + + # The output object contains the Fourier coefficients of the geometry in R and Z + # as a function of the poloidal (theta) and toroidal (phi) angle-like coordinates + # for a number of discrete radial locations. + + # number of flux surfaces, i.e., final radial resolution + ns = self.wout.ns + + # poloidal mode numbers: m + xm = self.wout.xm + + # toroidal mode numbers: n * nfp + xn = self.wout.xn + + # stellarator-symmetric Fourier coefficients of flux surface geometry R ~ cos(m * theta - n * nfp * phi) + rmnc = self.wout.rmnc + + # stellarator-symmetric Fourier coefficients of flux surface geometry Z ~ sin(m * theta - n * nfp * phi) + zmns = self.wout.zmns + + # plot the outermost (last) flux surface, which is the plasma boundary + j = ns - 1 + + # resolution over the flux surface + num_theta = 101 + num_phi = 181 + + min_phi = 0.0 + max_phi = 2.0 * np.pi + + # grid in theta and phi along the flux surface + grid_theta = np.linspace(0.0, 2.0 * np.pi, num_theta, endpoint=True) + grid_phi = np.linspace(min_phi, max_phi, num_phi, endpoint=True) + + # compute Cartesian coordinates of flux surface geometry + x = np.zeros([num_theta, num_phi]) + y = np.zeros([num_theta, num_phi]) + z = np.zeros([num_theta, num_phi]) + for idx_theta, theta in enumerate(grid_theta): + for idx_phi, phi in enumerate(grid_phi): + kernel = xm * theta - xn * phi + r = np.dot(rmnc[:, j], np.cos(kernel)) + x[idx_theta, idx_phi] = r * np.cos(phi) + y[idx_theta, idx_phi] = r * np.sin(phi) + z[idx_theta, idx_phi] = np.dot(zmns[:, j], np.sin(kernel)) + + # actually make the 3D plot + if ax is None: + fig = plt.figure() + ax = fig.add_subplot(projection="3d") + + # Plot the surface + ax.plot_surface(x, y, z, alpha=0.3 if len(phi_cuts)>0 else 0.7, color=color) + + # Add cutting planes at specific toroidal angles + for phi_cut in phi_cuts: + self._add_cutting_plane(ax, phi_cut, j, xm, xn, rmnc, zmns, color) + + # Set an equal aspect ratio + ax.set_aspect("equal") + + ax.set_title(f'3D plasma boundary') + + def plot_flux_surface(self, ax=None, phi_cut=0.0, rho=1.0, c='b', lw=1, ls='-', label = ''): + """ + Plot poloidal cross-section of the torus at a specified toroidal angle. + + Parameters: + ----------- + ax : matplotlib axes object, optional + Axes to plot on. If None, creates new figure and axes. + phi_cut : float, optional + Toroidal angle for the cross-section in radians. Default is 0.0. + rho : float, optional + Normalized flux surface coordinate (0 to 1). Default is 1.0 (boundary). + """ + + ns = self.wout.ns + xm = self.wout.xm + xn = self.wout.xn + rmnc = self.wout.rmnc + zmns = self.wout.zmns + + # Find closest flux surface index for given rho + rho_grid = self.profiles["rho(-)"] + j = np.argmin(np.abs(rho_grid - rho)) + + num_theta = 201 + grid_theta = np.linspace(0.0, 2.0 * np.pi, num_theta, endpoint=True) + + R = np.zeros(num_theta) + Z = np.zeros(num_theta) + + for idx_theta, theta in enumerate(grid_theta): + kernel = xm * theta - xn * phi_cut + R[idx_theta] = np.dot(rmnc[:, j], np.cos(kernel)) + Z[idx_theta] = np.dot(zmns[:, j], np.sin(kernel)) + + if ax is None: + plt.ion() + fig, ax = plt.subplots() + + ax.plot(R, Z, ls=ls, color = c, linewidth=lw, label=label) + + def _add_cutting_plane(self, ax, phi_cut, j, xm, xn, rmnc, zmns, plane_color): + """ + Add a cutting plane at a specific toroidal angle to the 3D plot. + + Parameters: + ----------- + ax : matplotlib 3D axes + The 3D axes to plot on + phi_cut : float + Toroidal angle for the cutting plane in radians + j : int + Flux surface index (typically ns-1 for boundary) + xm, xn : array + Poloidal and toroidal mode numbers + rmnc, zmns : array + Fourier coefficients for R and Z + plane_color : str + Color for the cutting plane + """ + num_theta = 101 + grid_theta = np.linspace(0.0, 2.0 * np.pi, num_theta, endpoint=True) + + R = np.zeros(num_theta) + Z = np.zeros(num_theta) + X = np.zeros(num_theta) + Y = np.zeros(num_theta) + + for idx_theta, theta in enumerate(grid_theta): + kernel = xm * theta - xn * phi_cut + r = np.dot(rmnc[:, j], np.cos(kernel)) + R[idx_theta] = r + Z[idx_theta] = np.dot(zmns[:, j], np.sin(kernel)) + X[idx_theta] = r * np.cos(phi_cut) + Y[idx_theta] = r * np.sin(phi_cut) + + # Plot the cutting plane as a line in 3D + ax.plot(X, Y, Z, color=plane_color, linewidth=2, + label=f'φ = {phi_cut*180/np.pi:.0f}°') + +def plot_profiles(data): + """ + Create plots of the plasma profiles + """ + fig, axes = plt.subplots(2, 3, figsize=(15, 10)) + + raw_data = data['raw_data'] + + # Temperature profiles + if len(raw_data['Te_data']['x']) > 0: + axes[0,0].plot(raw_data['Te_data']['x'], raw_data['Te_data']['y'], 'ro-', label='Te (original)', markersize=3) + if data['Te'] is not None: + axes[0,0].plot(data['x_coord'], data['Te'], 'r-', label='Te (interpolated)', linewidth=2) + + if len(raw_data['Ti_data']['x']) > 0: + axes[0,0].plot(raw_data['Ti_data']['x'], raw_data['Ti_data']['y'], 'bo-', label='Ti (original)', markersize=3) + if data['Ti'] is not None: + axes[0,0].plot(data['x_coord'], data['Ti'], 'b-', label='Ti (interpolated)', linewidth=2) + + axes[0,0].set_xlabel('x/a') + axes[0,0].set_ylabel('Temperature [keV]') + axes[0,0].legend() + axes[0,0].grid(True) + axes[0,0].set_title('Temperature Profiles') + + # Density profiles + if len(raw_data['ne_data']['x']) > 0: + axes[0,1].plot(raw_data['ne_data']['x'], raw_data['ne_data']['y'], 'ro-', label='ne (original)', markersize=3) + if data['ne'] is not None: + axes[0,1].plot(data['x_coord'], data['ne'], 'r-', label='ne (interpolated)', linewidth=2) + + if len(raw_data['ni_data']['x']) > 0: + axes[0,1].plot(raw_data['ni_data']['x'], raw_data['ni_data']['y'], 'bo-', label='ni (original)', markersize=3) + if data['ni'] is not None: + axes[0,1].plot(data['x_coord'], data['ni'], 'b-', label='ni (interpolated)', linewidth=2) + + axes[0,1].set_xlabel('x/a') + axes[0,1].set_ylabel('Density [10¹⁹ m⁻³]') + axes[0,1].legend() + axes[0,1].grid(True) + axes[0,1].set_title('Density Profiles') + + # Particle source + if len(raw_data['S_data']['x']) > 0: + axes[0,2].plot(raw_data['S_data']['x'], raw_data['S_data']['y'], 'go-', label='S (original)', markersize=3) + if data['S'] is not None: + axes[0,2].plot(data['x_coord'], data['S'], 'g-', label='S (interpolated)', linewidth=2) + axes[0,2].set_xlabel('x/a') + axes[0,2].set_ylabel('Particle Source [m⁻³s⁻¹]') + axes[0,2].legend() + axes[0,2].grid(True) + axes[0,2].set_title('Particle Source') + + # Electron heating + if len(raw_data['Qe_data']['x']) > 0: + axes[1,0].plot(raw_data['Qe_data']['x'], raw_data['Qe_data']['y'], 'ro-', label='Qe (original)', markersize=3) + if data['Qe'] is not None: + axes[1,0].plot(data['x_coord'], data['Qe'], 'r-', label='Qe (interpolated)', linewidth=2) + axes[1,0].set_xlabel('x/a') + axes[1,0].set_ylabel('Qe [W/m³]') + axes[1,0].legend() + axes[1,0].grid(True) + axes[1,0].set_title('Electron Heating') + + # Ion heating + if len(raw_data['Qi_data']['x']) > 0: + axes[1,1].plot(raw_data['Qi_data']['x'], raw_data['Qi_data']['y'], 'bo-', label='Qi (original)', markersize=3) + if data['Qi'] is not None: + axes[1,1].plot(data['x_coord'], data['Qi'], 'b-', label='Qi (interpolated)', linewidth=2) + axes[1,1].set_xlabel('x/a') + axes[1,1].set_ylabel('Qi [W/m³]') + axes[1,1].legend() + axes[1,1].grid(True) + axes[1,1].set_title('Ion Heating (NBI)') + + # Remove empty subplot + axes[1,2].remove() + + plt.tight_layout() + plt.show() \ No newline at end of file diff --git a/src/mitim_tools/plasmastate_tools/utils/state_plotting.py b/src/mitim_tools/plasmastate_tools/utils/state_plotting.py new file mode 100644 index 00000000..9559e42f --- /dev/null +++ b/src/mitim_tools/plasmastate_tools/utils/state_plotting.py @@ -0,0 +1,1345 @@ +import numpy as np +import matplotlib.pyplot as plt +from mitim_tools.misc_tools import GRAPHICStools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from mitim_tools import __version__ +from IPython import embed + + +def add_figures(fn, fnlab='', fnlab_pre='', tab_color=None): + + fig1 = fn.add_figure(label= fnlab_pre + "Profiles" + fnlab, tab_color=tab_color) + fig2 = fn.add_figure(label= fnlab_pre + "Powers" + fnlab, tab_color=tab_color) + fig3 = fn.add_figure(label= fnlab_pre + "Geometry" + fnlab, tab_color=tab_color) + fig4 = fn.add_figure(label= fnlab_pre + "Gradients" + fnlab, tab_color=tab_color) + fig5 = fn.add_figure(label= fnlab_pre + "Flows" + fnlab, tab_color=tab_color) + fig6 = fn.add_figure(label= fnlab_pre + "Other" + fnlab, tab_color=tab_color) + fig7 = fn.add_figure(label= fnlab_pre + "Impurities" + fnlab, tab_color=tab_color) + figs = [fig1, fig2, fig3, fig4, fig5, fig6, fig7] + + return figs + + +def add_axes(figs): + + fig1, fig2, fig3, fig4, fig5, fig6, fig7 = figs + + grid = plt.GridSpec(3, 3, hspace=0.3, wspace=0.3) + axsProf_1 = [ + fig1.add_subplot(grid[0, 0]), + fig1.add_subplot(grid[1, 0]), + fig1.add_subplot(grid[2, 0]), + fig1.add_subplot(grid[0, 1]), + fig1.add_subplot(grid[1, 1]), + fig1.add_subplot(grid[2, 1]), + fig1.add_subplot(grid[0, 2]), + fig1.add_subplot(grid[1, 2]), + fig1.add_subplot(grid[2, 2]), + ] + + grid = plt.GridSpec(2, 4, hspace=0.3, wspace=0.3) + axsProf_2 = [ + fig2.add_subplot(grid[0, 0]), + fig2.add_subplot(grid[0, 1]), + fig2.add_subplot(grid[1, 0]), + fig2.add_subplot(grid[1, 1]), + fig2.add_subplot(grid[0, 2]), + fig2.add_subplot(grid[1, 2]), + fig2.add_subplot(grid[0, 3]), + fig2.add_subplot(grid[1, 3]), + ] + + # GEOMETRY + grid = plt.GridSpec(6, 5, hspace=0.8, wspace=0.4) + ax00c = fig3.add_subplot(grid[0:2, 0]) + axsProf_3 = [ + ax00c, + fig3.add_subplot(grid[2:4, 0], sharex=ax00c), + fig3.add_subplot(grid[4:, 0]), + fig3.add_subplot(grid[0:2, 1]), + fig3.add_subplot(grid[2:4, 1]), + fig3.add_subplot(grid[4:, 1]), + fig3.add_subplot(grid[0:2, 2]), + fig3.add_subplot(grid[2:4, 2]), + fig3.add_subplot(grid[4:, 2]), + fig3.add_subplot(grid[0:3, 3:], projection="3d"), + fig3.add_subplot(grid[3:, 3:]), + ] + # ---- + + grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) + axsProf_4 = [ + fig4.add_subplot(grid[0, 0]), + fig4.add_subplot(grid[1, 0]), + fig4.add_subplot(grid[0, 1]), + fig4.add_subplot(grid[1, 1]), + fig4.add_subplot(grid[0, 2]), + fig4.add_subplot(grid[1, 2]), + ] + + grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) + axsFlows = [ + fig5.add_subplot(grid[0, 0]), + fig5.add_subplot(grid[1, 0]), + fig5.add_subplot(grid[0, 1]), + fig5.add_subplot(grid[0, 2]), + fig5.add_subplot(grid[1, 1]), + fig5.add_subplot(grid[1, 2]), + ] + + grid = plt.GridSpec(2, 5, hspace=0.3, wspace=0.3) + axsProf_6 = [ + fig6.add_subplot(grid[0, 0]), + fig6.add_subplot(grid[0, 1]), + fig6.add_subplot(grid[0, 2]), + fig6.add_subplot(grid[1, 0]), + fig6.add_subplot(grid[1, 1]), + fig6.add_subplot(grid[1, 2]), + fig6.add_subplot(grid[0, 3]), + fig6.add_subplot(grid[1, 3]), + fig6.add_subplot(grid[0, 4]), + fig6.add_subplot(grid[1, 4]), + ] + grid = plt.GridSpec(2, 2, hspace=0.3, wspace=0.3) + axsImps = [ + fig7.add_subplot(grid[0, 0]), + fig7.add_subplot(grid[0, 1]), + fig7.add_subplot(grid[1, 0]), + fig7.add_subplot(grid[1, 1]), + ] + + return axsProf_1, axsProf_2, axsProf_3, axsProf_4, axsFlows, axsProf_6, axsImps + + +def plot_profiles(self, axs1, color="b", legYN=True, extralab="", lw=1, fs=6): + + [ax00, ax10, ax20, ax01, ax11, ax21, ax02, ax12, ax22] = axs1 + + rho = self.profiles["rho(-)"] + + lines = GRAPHICStools.listLS() + + ax=ax00 + var = self.profiles["te(keV)"] + varL = "$T_e$ , $T_i$ (keV)" + if legYN: + lab = extralab + "e" + else: + lab = "" + ax.plot(rho, var, lw=lw, ls="-", label=lab, c=color) + var = self.profiles["ti(keV)"][:, 0] + if legYN: + lab = extralab + "i" + else: + lab = "" + ax.plot(rho, var, lw=lw, ls="--", label=lab, c=color) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + GRAPHICStools.autoscale_y(ax, bottomy=0) + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + + + ax=ax01 + var = self.profiles["ne(10^19/m^3)"] * 1e-1 + varL = "$n_e$ ($10^{20}/m^3$)" + if legYN: + lab = extralab + "e" + else: + lab = "" + ax.plot(rho, var, lw=lw, ls="-", label=lab, c=color) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + GRAPHICStools.autoscale_y(ax, bottomy=0) + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + + ax = ax10 + cont = 0 + for i in range(len(self.Species)): + if self.Species[i]["S"] == "therm": + var = self.profiles["ti(keV)"][:, i] + ax.plot( + rho, + var, + lw=lw, + ls=lines[cont], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + cont += 1 + varL = "Thermal $T_i$ (keV)" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + # ax.set_ylim(bottom=0); + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax20 + cont = 0 + for i in range(len(self.Species)): + if self.Species[i]["S"] == "fast": + var = self.profiles["ti(keV)"][:, i] + ax.plot( + rho, + var, + lw=lw, + ls=lines[cont], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + cont += 1 + varL = "Fast $T_i$ (keV)" + ax.plot( + rho, + self.profiles["ti(keV)"][:, 0], + lw=0.5, + ls="-", + alpha=0.5, + c=color, + label=extralab + "$T_{i,1}$", + ) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + # ax.set_ylim(bottom=0); + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax11 + cont = 0 + for i in range(len(self.Species)): + if self.Species[i]["S"] == "therm": + var = self.profiles["ni(10^19/m^3)"][:, i] * 1e-1 + ax.plot( + rho, + var, + lw=lw, + ls=lines[cont], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + cont += 1 + varL = "Thermal $n_i$ ($10^{20}/m^3$)" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + # ax.set_ylim(bottom=0); + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax21 + cont = 0 + for i in range(len(self.Species)): + if self.Species[i]["S"] == "fast": + var = self.profiles["ni(10^19/m^3)"][:, i] * 1e-1 * 1e5 + ax.plot( + rho, + var, + lw=lw, + ls=lines[cont], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + cont += 1 + varL = "Fast $n_i$ ($10^{15}/m^3$)" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + # ax.set_ylim(bottom=0); + ax.set_ylabel(varL) + if legYN and cont>0: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax02 + var = self.profiles["w0(rad/s)"] + ax.plot(rho, var, lw=lw, ls="-", c=color) + varL = "$\\omega_{0}$ (rad/s)" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + ax = ax12 + var = self.profiles["ptot(Pa)"] * 1e-6 + ax.plot(rho, var, lw=lw, ls="-", c=color, label=extralab + "ptot") + if "ptot_manual" in self.derived: + ax.plot( + rho, + self.derived["ptot_manual"], + lw=lw, + ls="--", + c=color, + label=extralab + "check", + ) + ax.plot( + rho, + self.derived["pthr_manual"], + lw=lw, + ls="-.", + c=color, + label=extralab + "check, thrm", + ) + + varL = "$p$ (MPa)" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + # ax.set_ylim(bottom=0) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax22 + var = self.profiles["q(-)"] + ax.plot(rho, var, lw=lw, ls="-", c=color) + varL = "$q$ profile" + ax.axhline(y=1.0, lw=0.5, ls="--", c="k") + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + +def plot_powers(self, axs2, legYN=True, extralab="", color="b", lw=1, fs=6): + + [ax00b, ax01b, ax10b, ax11b, ax20b, ax21b, ax30b, ax31b] = axs2 + + rho = self.profiles["rho(-)"] + + lines = GRAPHICStools.listLS() + + ax = ax00b + varL = "$MW/m^3$" + cont = 0 + var = -self.profiles["qei(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "i->e", c=color) + cont += 1 + if "qrfe(MW/m^3)" in self.profiles: + var = self.profiles["qrfe(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "rf", c=color) + cont += 1 + if "qfuse(MW/m^3)" in self.profiles: + var = self.profiles["qfuse(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "fus", c=color) + cont += 1 + if "qbeame(MW/m^3)" in self.profiles: + var = self.profiles["qbeame(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "beam", c=color) + cont += 1 + if "qione(MW/m^3)" in self.profiles: + var = self.profiles["qione(MW/m^3)"] + ax.plot( + rho, var, lw=lw / 2, ls=lines[cont], label=extralab + "extra", c=color + ) + cont += 1 + if "qohme(MW/m^3)" in self.profiles: + var = self.profiles["qohme(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "ohmic", c=color) + cont += 1 + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + ax.set_title("Electron Power Density") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + ax = ax01b + + ax.plot(rho, self.profiles["qmom(N/m^2)"], lw=lw, ls="-", c=color) + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("$N/m^2$, $J/m^3$") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + ax.set_title("Momentum Source Density") + + ax = ax10b + varL = "$MW/m^3$" + cont = 0 + var = self.profiles["qei(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "e->i", c=color) + cont += 1 + if "qrfi(MW/m^3)" in self.profiles: + var = self.profiles["qrfi(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "rf", c=color) + cont += 1 + if "qfusi(MW/m^3)" in self.profiles: + var = self.profiles["qfusi(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "fus", c=color) + cont += 1 + if "qbeami(MW/m^3)" in self.profiles: + var = self.profiles["qbeami(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=lines[cont], label=extralab + "beam", c=color) + cont += 1 + if "qioni(MW/m^3)" in self.profiles: + var = self.profiles["qioni(MW/m^3)"] + ax.plot( + rho, var, lw=lw / 2, ls=lines[cont], label=extralab + "extra", c=color + ) + cont += 1 + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + ax.set_title("Ion Power Density") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + ax = ax11b + cont = 0 + var = self.profiles["qpar_beam(1/m^3/s)"] * 1e-20 + ax.plot(rho, var, lw=lw, ls=lines[0], c=color, label=extralab + "beam") + var = self.profiles["qpar_wall(1/m^3/s)"] * 1e-20 + ax.plot(rho, var, lw=lw, ls=lines[1], c=color, label=extralab + "wall") + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + # ax.set_ylim(bottom=0); + ax.axhline(y=0, lw=0.5, ls="--", c="k") + ax.set_ylabel("$10^{20}m^{-3}s^{-1}$") + ax.set_title("Particle Source Density") + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + + ax = ax20b + varL = "$Q_{rad}$ ($MW/m^3$)" + if "qbrem(MW/m^3)" in self.profiles: + var = self.profiles["qbrem(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls="-", label=extralab + "brem", c=color) + if "qline(MW/m^3)" in self.profiles: + var = self.profiles["qline(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls="--", label=extralab + "line", c=color) + if "qsync(MW/m^3)" in self.profiles: + var = self.profiles["qsync(MW/m^3)"] + ax.plot(rho, var, lw=lw, ls=":", label=extralab + "sync", c=color) + + var = self.derived["qrad"] + ax.plot(rho, var, lw=lw * 1.5, ls="-", label=extralab + "Total", c=color) + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + # ax.set_ylim(bottom=0); + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + ax.set_title("Radiation Contributions") + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + + + ax = ax30b + ax.plot(rho, self.derived["qe_MWm2"], lw=lw, ls="-", label=extralab + "qe", c=color) + ax.plot(rho, self.derived["qi_MWm2"], lw=lw, ls="--", label=extralab + "qi", c=color) + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("Heat Flux ($MW/m^2$)") + if legYN: + ax.legend(loc="lower left", fontsize=fs) + ax.set_title("Heat flux per unit area (gacode: P/V')") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = ax31b + ax.plot( + rho, + self.derived["ge_10E20m2"], + lw=lw, + ls="-.", + label=extralab + "$\\Gamma_e$", + c=color, + ) + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("Particle Flux ($10^{20}/m^2/s$)") + if legYN: + ax.legend(loc="lower left", fontsize=fs) + ax.set_title("Particle Flux per unit area (gacode: P/V')") + ax.axhline(y=0, lw=0.5, ls="--", c="k") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + +def plot_gradients( + self, + axs4, + color="b", + fast_color=None, + lw=1.0, + label="", + ls="-o", + lastRho=0.89, + ms=2, + alpha=1.0, + useRoa=False, + predicted_rhoPlot=None, + plotImpurity=None, + plotRotation=False, + autoscale=True, + ): + + if predicted_rhoPlot is None: predicted_rhoPlot=[] + + if axs4 is None: + plt.ion() + fig, axs = plt.subplots( + ncols=3 + int(plotImpurity is not None) + int(plotRotation), + nrows=2, + figsize=(12, 5), + ) + + axs4 = [] + for i in range(axs.shape[-1]): + axs4.append(axs[0, i]) + axs4.append(axs[1, i]) + + ix = np.argmin(np.abs(self.profiles["rho(-)"] - lastRho)) + 1 + + xcoord = self.profiles["rho(-)"] if (not useRoa) else self.derived["roa"] + labelx = "$\\rho$" if (not useRoa) else "$r/a$" + + ax = axs4[0] + ax.plot( + xcoord, + self.profiles["te(keV)"], + ls, + c=color, + lw=lw, + label=label, + markersize=ms, + alpha=alpha, + ) + ax = axs4[2] + ax.plot( + xcoord, + self.profiles["ti(keV)"][:, 0], + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + ax = axs4[4] + ax.plot( + xcoord, + self.profiles["ne(10^19/m^3)"] * 1e-1, + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + + if "derived" in self.__dict__: + ax = axs4[1] + ax.plot( + xcoord[:ix], + self.derived["aLTe"][:ix], + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + ax = axs4[3] + ax.plot( + xcoord[:ix], + self.derived["aLTi"][:ix, 0], + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + if fast_color is not None: + for i in range(len(self.Species)): + if self.Species[i]["S"] != "therm": + ax.plot( + xcoord[:ix], + self.derived["aLTi"][:ix, i], + ls, + c=fast_color, + lw=lw, + markersize=ms, + alpha=alpha, + label=self.Species[i]["N"], + ) + ax.legend(loc="best", fontsize=7) + ax = axs4[5] + ax.plot( + xcoord[:ix], + self.derived["aLne"][:ix], + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + + for ax in axs4: + ax.set_xlim([0, 1]) + + ax = axs4[0] + ax.set_ylabel("$T_e$ (keV)") + ax.set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + ax.legend(loc="best", fontsize=7) + ax = axs4[2] + ax.set_ylabel("$T_i$ (keV)") + ax.set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + ax = axs4[4] + ax.set_ylabel("$n_e$ ($10^{20}m^{-3}$)") + ax.set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axs4[1] + ax.set_ylabel("$a/L_{Te}$") + ax.set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + ax = axs4[3] + ax.set_ylabel("$a/L_{Ti}$") + ax.set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + ax = axs4[5] + ax.set_ylabel("$a/L_{ne}$") + ax.axhline(y=0, ls="--", lw=0.5, c="k") + ax.set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + + cont = 0 + if plotImpurity is not None: + axs4[6 + cont].plot( + xcoord, + self.profiles["ni(10^19/m^3)"][:, plotImpurity] * 1e-1, + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + axs4[6 + cont].set_ylabel("$n_Z$ ($10^{20}m^{-3}$)") + axs4[6].set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + if "derived" in self.__dict__: + axs4[7 + cont].plot( + xcoord[:ix], + self.derived["aLni"][:ix, plotImpurity], + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + axs4[7 + cont].set_ylabel("$a/L_{nZ}$") + axs4[7 + cont].axhline(y=0, ls="--", lw=0.5, c="k") + axs4[7 + cont].set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + cont += 2 + + if plotRotation: + axs4[6 + cont].plot( + xcoord, + self.profiles["w0(rad/s)"] * 1e-3, + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + axs4[6 + cont].set_ylabel("$w_0$ (krad/s)") + axs4[6 + cont].set_xlabel(labelx) + if "derived" in self.__dict__: + axs4[7 + cont].plot( + xcoord[:ix], + self.derived["dw0dr"][:ix] * 1e-5, + ls, + c=color, + lw=lw, + markersize=ms, + alpha=alpha, + ) + axs4[7 + cont].set_ylabel("-$d\\omega_0/dr$ (krad/s/cm)") + axs4[7 + cont].axhline(y=0, ls="--", lw=0.5, c="k") + axs4[7 + cont].set_xlabel(labelx) + if autoscale: + GRAPHICStools.autoscale_y(ax, bottomy=0) + cont += 2 + + for x0 in predicted_rhoPlot: + ix = np.argmin(np.abs(self.profiles["rho(-)"] - x0)) + for ax in axs4: + ax.axvline(x=xcoord[ix], ls="--", lw=0.5, c=color) + + for i in range(len(axs4)): + ax = axs4[i] + GRAPHICStools.addDenseAxis(ax) + +def plot_other(self, axs6, color="b", lw=1.0, extralab="", fs=6): + + rho = self.profiles["rho(-)"] + lines = GRAPHICStools.listLS() + + # Others + ax = axs6[0] + ax.plot(self.profiles["rho(-)"], self.derived["dw0dr"] * 1e-5, c=color, lw=lw) + ax.set_ylabel("$-d\\omega_0/dr$ (krad/s/cm)") + ax.set_xlabel("$\\rho$") + ax.set_xlim([0, 1]) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + ax.axhline(y=0, lw=1.0, c="k", ls="--") + + ax = axs6[2] + ax.plot(self.profiles["rho(-)"], self.derived["q_fus"], c=color, lw=lw) + ax.set_ylabel("$q_{fus}$ ($MW/m^3$)") + ax.set_xlabel("$\\rho$") + ax.set_xlim([0, 1]) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axs6[3] + ax.plot(self.profiles["rho(-)"], self.derived["q_fus_MW"], c=color, lw=lw) + ax.set_ylabel("$P_{fus}$ ($MW$)") + ax.set_xlim([0, 1]) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axs6[4] + ax.plot(self.profiles["rho(-)"], self.derived["tite"], c=color, lw=lw) + ax.set_ylabel("$T_i/T_e$") + ax.set_xlabel("$\\rho$") + ax.set_xlim([0, 1]) + ax.axhline(y=1, ls="--", lw=1.0, c="k") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + ax = axs6[5] + if "MachNum" in self.derived: + ax.plot(self.profiles["rho(-)"], self.derived["MachNum"], c=color, lw=lw) + ax.set_ylabel("Mach Number") + ax.set_xlabel("$\\rho$") + ax.set_xlim([0, 1]) + ax.axhline(y=0, ls="--", c="k", lw=0.5) + ax.axhline(y=1, ls="--", c="k", lw=0.5) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + ax = axs6[6] + safe_division = np.divide( + self.derived["qi_MWm2"], + self.derived["qe_MWm2"], + where=self.derived["qe_MWm2"] != 0, + out=np.full_like(self.derived["qi_MWm2"], np.nan), + ) + ax.plot( + self.profiles["rho(-)"], + safe_division, + c=color, + lw=lw, + label=extralab + "$Q_i/Q_e$", + ) + safe_division = np.divide( + self.derived["qi_aux_MW"], + self.derived["qe_aux_MW"], + where=self.derived["qe_aux_MW"] != 0, + out=np.full_like(self.derived["qi_aux_MW"], np.nan), + ) + ax.plot( + self.profiles["rho(-)"], + safe_division, + c=color, + lw=lw, + ls="--", + label=extralab + "$P_{aux,i}/P_{aux,e}$", + ) + safe_division = np.divide( + self.derived["qi_aux_MW"]+self.derived['qi_fus_MW'], + self.derived["qe_aux_MW"]+self.derived['qe_fus_MW'], + where=(self.derived["qe_aux_MW"]+self.derived['qe_fus_MW']) != 0, + out=np.full_like(self.derived["qi_aux_MW"], np.nan), + ) + ax.plot( + self.profiles["rho(-)"], + safe_division, + c=color, + lw=lw, + ls="-.", + label=extralab + "$(P_{aux,i}+P_{fus,i})/(P_{aux,e}+P_{fus,e})$", + ) + ax.set_ylabel("Power ratios") + ax.set_xlabel("$\\rho$") + ax.set_xlim([0, 1]) + ax.axhline(y=1.0, ls="--", c="k", lw=1.0) + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax,bottomy=0)#ax.set_ylim(bottom=0) + ax.legend(loc="best", fontsize=fs) + + # Currents + + ax = axs6[1] + + var = self.profiles["johm(MA/m^2)"] + ax.plot(rho, var, "-", lw=lw, c=color, label=extralab + "$J_{OH}$") + var = self.profiles["jbs(MA/m^2)"] + ax.plot(rho, var, "--", lw=lw, c=color, label=extralab + "$J_{BS,par}$") + var = self.profiles["jbstor(MA/m^2)"] + ax.plot(rho, var, "-.", lw=lw, c=color, label=extralab + "$J_{BS,tor}$") + + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + GRAPHICStools.autoscale_y(ax,bottomy=0)#ax.set_ylim(bottom=0) + ax.set_ylabel("J ($MA/m^2$)") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axs6[7] + cont = 0 + if "vtor(m/s)" in self.profiles: + for i in range(len(self.Species)): + try: # REMOVE FOR FUTURE + var = self.profiles["vtor(m/s)"][:, i] * 1e-3 + ax.plot( + rho, + var, + lw=lw, + ls=lines[cont], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + cont += 1 + except: + break + varL = "$V_{tor}$ (km/s)" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axs6[8] + + ax.plot(rho, self.derived["B_unit"], "-", lw=lw, c=color, label=extralab + "$B_{unit}$") + ax.plot(rho, self.derived["B_ref"], "--", lw=lw, c=color, label=extralab + "$B_{ref}$") + ax.axhline(y=self.profiles["bcentr(T)"][0], lw=lw, ls=":", c=color, label=extralab + "$B_{centr}$") + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel("$B$ (T)") + + GRAPHICStools.addDenseAxis(ax) + + ax.legend(loc="best", fontsize=fs) + + + + +def plot_flows(self, axs=None, limits=None, ls="-", leg=True, showtexts=True): + if axs is None: + fig1 = plt.figure() + grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.3) + + axs = [ + fig1.add_subplot(grid[0, 0]), + fig1.add_subplot(grid[1, 0]), + fig1.add_subplot(grid[0, 1]), + fig1.add_subplot(grid[0, 2]), + fig1.add_subplot(grid[1, 1]), + fig1.add_subplot(grid[1, 2]), + ] + + # Profiles + + ax = axs[0] + axT = axs[1] + roa = self.derived['roa'] + Te = self.profiles["te(keV)"] + ne = self.profiles["ne(10^19/m^3)"] * 1e-1 + ni = self.profiles["ni(10^19/m^3)"] * 1e-1 + niT = np.sum(ni, axis=1) + Ti = self.profiles["ti(keV)"][:, 0] + ax.plot(roa, Te, lw=2, c="r", label="$T_e$" if leg else "", ls=ls) + ax.plot(roa, Ti, lw=2, c="b", label="$T_i$" if leg else "", ls=ls) + axT.plot(roa, ne, lw=2, c="m", label="$n_e$" if leg else "", ls=ls) + axT.plot(roa, niT, lw=2, c="c", label="$\\sum n_i$" if leg else "", ls=ls) + if limits is not None: + [roa_first, roa_last] = limits + ax.plot(roa_last, np.interp(roa_last, roa, Te), "s", c="r", markersize=3) + ax.plot(roa_first, np.interp(roa_first, roa, Te), "s", c="r", markersize=3) + ax.plot(roa_last, np.interp(roa_last, roa, Ti), "s", c="b", markersize=3) + ax.plot(roa_first, np.interp(roa_first, roa, Ti), "s", c="b", markersize=3) + axT.plot(roa_last, np.interp(roa_last, roa, ne), "s", c="m", markersize=3) + axT.plot(roa_first, np.interp(roa_first, roa, ne), "s", c="m", markersize=3) + + ax.set_xlabel("r/a") + ax.set_xlim([0, 1]) + axT.set_xlabel("r/a") + axT.set_xlim([0, 1]) + ax.set_ylabel("$T$ (keV)") + ax.set_ylim(bottom=0) + axT.set_ylabel("$n$ ($10^{20}m^{-3}$)") + axT.set_ylim(bottom=0) + # axT.set_ylim([0,np.max(ne)*1.5]) + ax.legend() + axT.legend() + ax.set_title("Final Temperature profiles") + axT.set_title("Final Density profiles") + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + GRAPHICStools.addDenseAxis(axT) + GRAPHICStools.autoscale_y(axT, bottomy=0) + + if showtexts: + if self.derived["Q"] > 0.005: + ax.text( + 0.05, + 0.05, + f"Pfus = {self.derived['Pfus']:.1f}MW, Q = {self.derived['Q']:.2f}", + color="k", + fontsize=10, + fontweight="normal", + horizontalalignment="left", + verticalalignment="bottom", + rotation=0, + transform=ax.transAxes, + ) + + axT.text( + 0.05, + 0.4, + "ne_20 = {0:.1f} (fG = {1:.2f}), Zeff = {2:.1f}".format( + self.derived["ne_vol20"], + self.derived["fG"], + self.derived["Zeff_vol"], + ), + color="k", + fontsize=10, + fontweight="normal", + horizontalalignment="left", + verticalalignment="bottom", + rotation=0, + transform=axT.transAxes, + ) + + # F + ax = axs[2] + P = ( + self.derived["qe_fus_MW"] + + self.derived["qe_aux_MW"] + + -self.derived["qe_rad_MW"] + + -self.derived["qe_exc_MW"] + ) + + ax.plot( + roa, + -self.derived["qe_MW"], + c="g", + lw=2, + label="$P_{e}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qe_fus_MW"], + c="r", + lw=2, + label="$P_{fus,e}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qe_aux_MW"], + c="b", + lw=2, + label="$P_{aux,e}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + -self.derived["qe_exc_MW"], + c="m", + lw=2, + label="$P_{exc,e}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + -self.derived["qe_rad_MW"], + c="c", + lw=2, + label="$P_{rad,e}$" if leg else "", + ls=ls, + ) + ax.plot(roa, -P, lw=1, c="y", label="sum" if leg else "", ls=ls) + + # Pe = self.profiles['te(keV)']*1E3*e_J*self.profiles['ne(10^19/m^3)']*1E-1*1E20 *1E-6 + # ax.plot(roa,Pe,ls='-',lw=3,alpha=0.1,c='k',label='$W_e$ (MJ/m^3)') + + ax.plot( + roa, + -self.derived["ce_MW"], + c="k", + lw=1, + label="($P_{conv,e}$)" if leg else "", + ) + + ax.set_xlabel("r/a") + ax.set_xlim([0, 1]) + ax.set_ylabel("$P$ (MW)") + # ax.set_ylim(bottom=0) + ax.set_title("Electron Thermal Flows") + ax.axhline(y=0.0, lw=0.5, ls="--", c="k") + GRAPHICStools.addLegendApart( + ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" + ) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axs[3] + P = ( + self.derived["qi_fus_MW"] + + self.derived["qi_aux_MW"] + + self.derived["qe_exc_MW"] + ) + + ax.plot( + roa, + -self.derived["qi_MW"], + c="g", + lw=2, + label="$P_{i}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qi_fus_MW"], + c="r", + lw=2, + label="$P_{fus,i}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qi_aux_MW"], + c="b", + lw=2, + label="$P_{aux,i}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qe_exc_MW"], + c="m", + lw=2, + label="$P_{exc,i}$" if leg else "", + ls=ls, + ) + ax.plot(roa, -P, lw=1, c="y", label="sum" if leg else "", ls=ls) + + # Pi = self.profiles['ti(keV)'][:,0]*1E3*e_J*self.profiles['ni(10^19/m^3)'][:,0]*1E-1*1E20 *1E-6 + # ax.plot(roa,Pi,ls='-',lw=3,alpha=0.1,c='k',label='$W_$ (MJ/m^3)') + + ax.set_xlabel("r/a") + ax.set_xlim([0, 1]) + ax.set_ylabel("$P$ (MW)") + # ax.set_ylim(bottom=0) + ax.set_title("Ion Thermal Flows") + ax.axhline(y=0.0, lw=0.5, ls="--", c="k") + GRAPHICStools.addLegendApart( + ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" + ) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + # F + ax = axs[4] + + ax.plot( + roa, + self.derived["ge_10E20"], + c="g", + lw=2, + label="$\\Gamma_{e}$" if leg else "", + ls=ls, + ) + # ax.plot(roa,self.profiles['ne(10^19/m^3)']*1E-1,lw=3,alpha=0.1,c='k',label='$n_e$ ($10^{20}/m^3$)' if leg else '',ls=ls) + + ax.set_xlabel("r/a") + ax.set_xlim([0, 1]) + ax.set_ylabel("$\\Gamma$ ($10^{20}/s$)") + ax.set_title("Particle Flows") + ax.axhline(y=0.0, lw=0.5, ls="--", c="k") + GRAPHICStools.addLegendApart( + ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" + ) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + # TOTAL + ax = axs[5] + P = ( + self.derived["qOhm_MW"] + + self.derived["qRF_MW"] + + self.derived["qFus_MW"] + + -self.derived["qe_rad_MW"] + + self.derived["qz_MW"] + + self.derived["qBEAM_MW"] + ) + + ax.plot( + roa, + -self.derived["q_MW"], + c="g", + lw=2, + label="$P$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qOhm_MW"], + c="k", + lw=2, + label="$P_{Oh}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qRF_MW"], + c="b", + lw=2, + label="$P_{RF}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qBEAM_MW"], + c="pink", + lw=2, + label="$P_{NBI}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qFus_MW"], + c="r", + lw=2, + label="$P_{fus}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + -self.derived["qe_rad_MW"], + c="c", + lw=2, + label="$P_{rad}$" if leg else "", + ls=ls, + ) + ax.plot( + roa, + self.derived["qz_MW"], + c="orange", + lw=1, + label="$P_{ionz.}$" if leg else "", + ls=ls, + ) + + # P = Pe+Pi + # ax.plot(roa,P,ls='-',lw=3,alpha=0.1,c='k',label='$W$ (MJ)') + + ax.plot(roa, -P, lw=1, c="y", label="sum" if leg else "", ls=ls) + ax.set_xlabel("r/a") + ax.set_xlim([0, 1]) + ax.set_ylabel("$P$ (MW)") + # ax.set_ylim(bottom=0) + ax.set_title("Total Thermal Flows") + + GRAPHICStools.addLegendApart( + ax, ratio=0.9, withleg=True, extraPad=0, size=None, loc="upper left" + ) + + ax.axhline(y=0.0, lw=0.5, ls="--", c="k") + # GRAPHICStools.drawLineWithTxt(ax,0.0,label='',orientation='vertical',color='k',lw=1,ls='--',alpha=1.0,fontsize=10,fromtop=0.85,fontweight='normal', + # verticalalignment='bottom',horizontalalignment='left',separation=0) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + + + +def plot_ions(self, axsImps, legYN=True, extralab="", color="b", lw=1, fs=6): + + rho = self.profiles["rho(-)"] + lines = GRAPHICStools.listLS() + + # Impurities + ax = axsImps[0] + for i in range(len(self.Species)): + var = ( + self.profiles["ni(10^19/m^3)"][:, i] + / self.profiles["ni(10^19/m^3)"][0, i] + ) + ax.plot( + rho, + var, + lw=lw, + ls=lines[i], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + varL = "$n_i/n_{i,0}$" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axsImps[1] + for i in range(len(self.Species)): + var = self.derived["fi"][:, i] + ax.plot( + rho, + var, + lw=lw, + ls=lines[i], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + varL = "$f_i$" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + ax.set_ylim([0, 1]) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axsImps[2] + + lastRho = 0.9 + + ix = np.argmin(np.abs(self.profiles["rho(-)"] - lastRho)) + 1 + ax.plot( + rho[:ix], self.derived["aLne"][:ix], lw=lw * 3, ls="-", c=color, label="e" + ) + for i in range(len(self.Species)): + var = self.derived["aLni"][:, i] + ax.plot( + rho[:ix], + var[:ix], + lw=lw, + ls=lines[i], + c=color, + label=extralab + f"{i + 1} = {self.profiles['name'][i]}", + ) + varL = "$a/L_{ni}$" + ax.set_xlim([0, 1]) + ax.set_xlabel("$\\rho$") + ax.set_ylabel(varL) + if legYN: + ax.legend(loc="best", fontsize=fs) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + ax = axsImps[3] + ax.plot(self.profiles["rho(-)"], self.derived["Zeff"], c=color, lw=lw) + ax.set_ylabel("$Z_{eff}$") + ax.set_xlabel("$\\rho$") + ax.set_xlim([0, 1]) + + GRAPHICStools.addDenseAxis(ax) + GRAPHICStools.autoscale_y(ax, bottomy=0) + + +def plotAll(profiles_list, figs=None, extralabs=None, lastRhoGradients=0.89): + if figs is not None: + fn = None + else: + from mitim_tools.misc_tools.GUItools import FigureNotebook + fn = FigureNotebook("Profiles", geometry="1800x900") + figs = add_figures(fn) + + axsProf_1, axsProf_2, axsProf_3, axsProf_4, axsFlows, axsProf_6, axsImps = add_axes(figs) + + ls = GRAPHICStools.listLS() + colors = GRAPHICStools.listColors() + for i, profiles in enumerate(profiles_list): + if extralabs is None: + extralab = f"#{i}, " + else: + extralab = f"{extralabs[i]}, " + + profiles.plot( + axs1=axsProf_1,axs2=axsProf_2,axs3=axsProf_3,axs4=axsProf_4,axsFlows=axsFlows,axs6=axsProf_6,axsImps=axsImps, + color=colors[i],legYN=True,extralab=extralab,lsFlows=ls[i],legFlows=i == 0,showtexts=False,lastRhoGradients=lastRhoGradients, + ) + + return fn diff --git a/src/mitim_tools/popcon_tools/FunctionalForms.py b/src/mitim_tools/popcon_tools/FunctionalForms.py index 45b60748..b0d8352c 100644 --- a/src/mitim_tools/popcon_tools/FunctionalForms.py +++ b/src/mitim_tools/popcon_tools/FunctionalForms.py @@ -1,10 +1,11 @@ +from matplotlib import markers import torch import numpy as np import matplotlib.pyplot as plt from mitim_tools.popcon_tools.utils import PRFfunctionals, FUNCTIONALScalc from mitim_tools.misc_tools import MATHtools, GRAPHICStools from mitim_tools.misc_tools.LOGtools import printMsg as print -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from IPython import embed @@ -65,11 +66,28 @@ def PRFfunctionals_Lmode(T_avol, n_avol, nu_n, rho=None, debug=False): print(f">> Gradient aLT outside of search range ({g1})", typeMsg="w") if debug: - fig, ax = plt.subplots() + fig, axs = plt.subplots(nrows=3, figsize=(6, 10)) + ax = axs[0] ax.plot(gs, Tvol, "-s") ax.axhline(y=T_avol) ax.axvline(x=g1) + ax = axs[1] + ax.plot(x[0], T, "-s", markersize=1) + ax.set_xlabel(r"$\rho$") + ax.set_ylabel(r"$T$") + GRAPHICStools.addDenseAxis(ax) + ax = axs[2] + aLT_reconstructed = CALCtools.derivation_into_Lx( + torch.from_numpy(x[0]), + torch.from_numpy(T), + ).cpu().numpy() + ax.plot(x[0], aLT_reconstructed, "-s", markersize=1) + ax.set_xlabel(r"$\rho$") + ax.set_ylabel(r"$1/L_T$") + GRAPHICStools.addDenseAxis(ax) + plt.tight_layout() plt.show() + embed() # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~ Density @@ -79,19 +97,6 @@ def PRFfunctionals_Lmode(T_avol, n_avol, nu_n, rho=None, debug=False): _, n = parabolic(Tbar=n_avol, nu=nu_n, rho=x[0], Tedge=n_roa1) - # g2_n = 1 - # g1Range_n = [0.1,10] - # xtransition_n = 0.8 - - # gs,nvol = np.linspace(g1Range_n[0],g1Range_n[1],points_search), [] - - # n = FUNCTIONALScalc.doubleLinear_aLT(x,gs,g2_n,xtransition_n,n_roa1) - # nvol = FUNCTIONALScalc.calculate_simplified_volavg(x,n) - # g1 = MATHtools.extrapolateCubicSpline(n_avol,nvol,gs) - # n = FUNCTIONALScalc.doubleLinear_aLT(np.atleast_2d(x[0]),g1,g2_n,xtransition_n,n_roa1)[0] - - # if g1g1Range_n[1]: print(f'>> Gradient aLn outside of search range ({g1})',typeMsg='w') - return x[0], T, n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -156,7 +161,7 @@ def MITIMfunctional_aLyTanh( aLy_profile[~linear_region] = aLy # Create core profile - Ycore = CALCtools.integrateGradient(torch.from_numpy(xcore).unsqueeze(0), + Ycore = CALCtools.integration_Lx(torch.from_numpy(xcore).unsqueeze(0), torch.from_numpy(aLy_profile).unsqueeze(0), y_top ).cpu().numpy()[0] @@ -182,7 +187,7 @@ def MITIMfunctional_aLyTanh( GRAPHICStools.addDenseAxis(ax) ax = axs[1] - aLy_reconstructed = CALCtools.produceGradient(torch.from_numpy(x), torch.from_numpy(y)).cpu().numpy() + aLy_reconstructed = CALCtools.derivation_into_Lx(torch.from_numpy(x), torch.from_numpy(y)).cpu().numpy() ax.plot(x, aLy_reconstructed, '-o', c='b', markersize=3, lw=1.0) ax.plot(xcore, aLy_profile, '-*', c='r', markersize=1, lw=1.0) diff --git a/src/mitim_tools/popcon_tools/POPCONtools.py b/src/mitim_tools/popcon_tools/POPCONtools.py index f5dde84c..e4226f94 100644 --- a/src/mitim_tools/popcon_tools/POPCONtools.py +++ b/src/mitim_tools/popcon_tools/POPCONtools.py @@ -21,42 +21,42 @@ def evaluate(self): self.results = self.algorithm.update_dataset(self.dataset) def update_from_gacode(self, - profiles_gacode: PROFILEStools.PROFILES_GACODE, + gacode_state: PROFILEStools.gacode_state, confinement_type="H98" ): - self.dataset['major_radius'].data = profiles_gacode.profiles['rcentr(m)'][-1] * ureg.meter + self.dataset['major_radius'].data = gacode_state.profiles['rcentr(m)'][-1] * ureg.meter - self.dataset["magnetic_field_on_axis"].data = np.abs(profiles_gacode.derived["B0"]) * ureg.tesla + self.dataset["magnetic_field_on_axis"].data = np.abs(gacode_state.derived["B0"]) * ureg.tesla - rmin = profiles_gacode.profiles["rmin(m)"][-1] - rmaj = profiles_gacode.profiles["rmaj(m)"][-1] + rmin = gacode_state.profiles["rmin(m)"][-1] + rmaj = gacode_state.profiles["rmaj(m)"][-1] self.dataset["inverse_aspect_ratio"].data = (rmin / rmaj) * ureg.dimensionless - kappa_a = 1.5 #profiles_gacode.derived["kappa_a"] - kappa_sep = profiles_gacode.profiles["kappa(-)"][-1] + kappa_a = 1.5 #gacode_state.derived["kappa_a"] + kappa_sep = gacode_state.profiles["kappa(-)"][-1] self.dataset["areal_elongation"].data = kappa_a * ureg.dimensionless self.dataset["elongation_ratio_sep_to_areal"].data = (kappa_sep / kappa_a) * ureg.dimensionless - delta_95 = np.interp(0.95, profiles_gacode.derived["psi_pol_n"], profiles_gacode.profiles["delta(-)"]) - delta_sep = profiles_gacode.profiles["delta(-)"][-1] + delta_95 = np.interp(0.95, gacode_state.derived["psi_pol_n"], gacode_state.profiles["delta(-)"]) + delta_sep = gacode_state.profiles["delta(-)"][-1] self.dataset["triangularity_psi95"].data = delta_95 * ureg.dimensionless self.dataset["triangularity_ratio_sep_to_psi95"].data = (delta_sep / delta_95) * ureg.dimensionless - ip = np.abs(profiles_gacode.profiles["current(MA)"][-1]) * 1e6 + ip = np.abs(gacode_state.profiles["current(MA)"][-1]) * 1e6 self.dataset["plasma_current"].data = ip * ureg.ampere - ne_vol_19 = profiles_gacode.derived["ne_vol20"] * 10 + ne_vol_19 = gacode_state.derived["ne_vol20"] * 10 self.dataset = self.dataset.assign_coords(dim_average_electron_density=np.array([ne_vol_19])) self.dataset["average_electron_density"].data = np.array([ne_vol_19]) * ureg._1e19_per_cubic_metre - #self.dataset["nesep_over_nebar"].data = profiles_gacode.profiles["ne(10^19/m^3)"][-1] / ne_vol_19 + #self.dataset["nesep_over_nebar"].data = gacode_state.profiles["ne(10^19/m^3)"][-1] / ne_vol_19 - te_vol_keV = profiles_gacode.derived["Te_vol"] + te_vol_keV = gacode_state.derived["Te_vol"] self.dataset = self.dataset.assign_coords(dim_average_electron_temp=np.array([te_vol_keV])) self.dataset["average_electron_temp"].data = np.array([te_vol_keV]) * ureg.kiloelectron_volt - impurity_zs = profiles_gacode.profiles["z"][np.where(profiles_gacode.profiles["z"] > 1)] - imputity_fs = profiles_gacode.derived["fi_vol"][np.where(profiles_gacode.profiles["z"] > 1)] + impurity_zs = gacode_state.profiles["z"][np.where(gacode_state.profiles["z"] > 1)] + imputity_fs = gacode_state.derived["fi_vol"][np.where(gacode_state.profiles["z"] > 1)] impurities = [] concentrations = [] named_options_array = [1,2,3,4,6,7,8,10,18,36,54,74] # atomicspecies built into cfspopcon @@ -79,33 +79,33 @@ def update_from_gacode(self, #from .formulas.impurities.impurity_array_helpers import make_impurity_concentration_array self.dataset['impurities'] = cfspopcon.formulas.impurities.impurity_array_helpers.make_impurity_concentration_array(impurities, concentrations) - arg_min_rho = np.argmin(np.abs(profiles_gacode.profiles["rho(-)"] - 0.4)) - arg_max_rho = np.argmin(np.abs(profiles_gacode.profiles["rho(-)"] - 0.8)) + arg_min_rho = np.argmin(np.abs(gacode_state.profiles["rho(-)"] - 0.4)) + arg_max_rho = np.argmin(np.abs(gacode_state.profiles["rho(-)"] - 0.8)) # calculate the predicted density peaking using the Angioni 2007 scaling - beta_percent = (profiles_gacode.derived["BetaN"] - *profiles_gacode.profiles["current(MA)"][-1] - / profiles_gacode.profiles["rmin(m)"][-1] - / profiles_gacode.profiles['bcentr(T)'][-1]) + beta_percent = (gacode_state.derived["BetaN"] + *gacode_state.profiles["current(MA)"][-1] + / gacode_state.profiles["rmin(m)"][-1] + / gacode_state.profiles['bcentr(T)'][-1]) - nu_n_scaling = cfspopcon.formulas.plasma_profiles.calc_density_peaking(profiles_gacode.derived["nu_eff"], + nu_n_scaling = cfspopcon.formulas.plasma_profiles.calc_density_peaking(gacode_state.derived["nu_eff"], beta_percent*1e-2, nu_noffset=0.0) - aLTe = profiles_gacode.derived["aLTe"][arg_min_rho:arg_max_rho].mean() + aLTe = gacode_state.derived["aLTe"][arg_min_rho:arg_max_rho].mean() self.dataset["normalized_inverse_temp_scale_length"].data = aLTe * ureg.dimensionless - nu_ne_offset = (nu_n_scaling - profiles_gacode.derived["ne_peaking"]) + nu_ne_offset = (nu_n_scaling - gacode_state.derived["ne_peaking"]) self.dataset["electron_density_peaking_offset"].data = nu_ne_offset * ureg.dimensionless self.dataset["ion_density_peaking_offset"].data = nu_ne_offset * ureg.dimensionless - nu_te = profiles_gacode.derived["Te_peaking"] + nu_te = gacode_state.derived["Te_peaking"] self.dataset["temperature_peaking"].data = nu_te * ureg.dimensionless - confinement_scalar = profiles_gacode.derived[confinement_type] + confinement_scalar = gacode_state.derived[confinement_type] self.dataset["confinement_time_scalar"].data = confinement_scalar * ureg.dimensionless - ti_over_te = profiles_gacode.derived["tite_vol"] + ti_over_te = gacode_state.derived["tite_vol"] self.dataset["ion_to_electron_temp_ratio"].data = ti_over_te * ureg.dimensionless def update_transport(self, @@ -124,7 +124,7 @@ def update_transport(self, self.dataset["ion_to_electron_temp_ratio"].data = ti_over_te * ureg.dimensionless def match_to_gacode(self, - profiles_gacode: PROFILEStools.PROFILES_GACODE, + gacode_state: PROFILEStools.gacode_state, confinement_type="H98", print_progress=False, plot_convergence=True, @@ -139,7 +139,7 @@ def match_to_gacode(self, print("Starting optimization...") print("... Initializing POPCON with GACODE parameters") - self.update_from_gacode(profiles_gacode=profiles_gacode, + self.update_from_gacode(gacode_state=gacode_state, confinement_type=confinement_type ) @@ -163,7 +163,7 @@ def match_to_gacode(self, res = minimize(self.match_pfus, x0, - args=(profiles_gacode, print_progress), + args=(gacode_state, print_progress), method='Nelder-Mead', bounds=bounds, options={'disp': True}, @@ -179,7 +179,7 @@ def match_to_gacode(self, def match_pfus(self, x, - profiles_gacode, + gacode_state, print_progress=False ): @@ -194,29 +194,29 @@ def match_pfus(self, point = self.results.isel(dim_average_electron_temp=0, dim_average_electron_density=0) - Pfus_residual = (point['P_fusion'].data.magnitude - profiles_gacode.derived['Pfus']) / profiles_gacode.derived['Pfus'] + Pfus_residual = (point['P_fusion'].data.magnitude - gacode_state.derived['Pfus']) / gacode_state.derived['Pfus'] #Psol_residual = ((point['P_LH_thresh'].data.magnitude #* point['ratio_of_P_SOL_to_P_LH'].data.magnitude) - #- profiles_gacode.derived['Psol']) / profiles_gacode.derived['Psol'] + #- gacode_state.derived['Psol']) / gacode_state.derived['Psol'] - Prad_residual = (point['P_radiation'].data.magnitude - profiles_gacode.derived['Prad']) / profiles_gacode.derived['Prad'] + Prad_residual = (point['P_radiation'].data.magnitude - gacode_state.derived['Prad']) / gacode_state.derived['Prad'] - Pin_derived = (profiles_gacode.derived['qi_aux_MWmiller'][-1] - +profiles_gacode.derived['qe_aux_MWmiller'][-1] + Pin_derived = (gacode_state.derived['qi_aux_MW'][-1] + +gacode_state.derived['qe_aux_MW'][-1] ) Pin_residual = (point['P_auxillary_launched'].data.magnitude - Pin_derived) / Pin_derived - te_profiles = np.interp(point["dim_rho"], point["dim_rho"].size*profiles_gacode.profiles["rho(-)"] ,profiles_gacode.profiles["te(keV)"]) + te_profiles = np.interp(point["dim_rho"], point["dim_rho"].size*gacode_state.profiles["rho(-)"] ,gacode_state.profiles["te(keV)"]) te_popcon = point["electron_temp_profile"].data.magnitude te_L2 = np.sum(((te_profiles-te_popcon)/te_profiles)**2) - ti_profiles = np.interp(point["dim_rho"], point["dim_rho"].size*profiles_gacode.profiles["rho(-)"] ,profiles_gacode.profiles["ti(keV)"][:, 0]) + ti_profiles = np.interp(point["dim_rho"], point["dim_rho"].size*gacode_state.profiles["rho(-)"] ,gacode_state.profiles["ti(keV)"][:, 0]) ti_popcon = point["ion_temp_profile"].data.magnitude ti_L2 = np.sum(((ti_profiles-ti_popcon)/ti_profiles)**2) - ne19_profiles = np.interp(point["dim_rho"], point["dim_rho"].size*profiles_gacode.profiles["rho(-)"] ,profiles_gacode.profiles["ne(10^19/m^3)"]) + ne19_profiles = np.interp(point["dim_rho"], point["dim_rho"].size*gacode_state.profiles["rho(-)"] ,gacode_state.profiles["ne(10^19/m^3)"]) ne19_popcon = point["electron_density_profile"].data.magnitude ne19_L2 = np.sum(((ne19_profiles-ne19_popcon)/ne19_profiles)**2) @@ -239,7 +239,7 @@ def match_pfus(self, def match_radiation(self, x, - profiles_gacode + gacode_state ): radiated_power_scalar = x[0] @@ -252,7 +252,7 @@ def match_radiation(self, point = self.results.isel(dim_average_electron_temp=0, dim_average_electron_density=0) - Prad_residual = (point['P_radiation'].data.magnitude - profiles_gacode.derived['Prad']) / profiles_gacode.derived['Prad'] + Prad_residual = (point['P_radiation'].data.magnitude - gacode_state.derived['Prad']) / gacode_state.derived['Prad'] return Prad_residual**2 @@ -292,7 +292,7 @@ def plot_convergence(self): plt.tight_layout() plt.show() - def plot_profile_comparison(self, profiles_gacode: PROFILEStools.PROFILES_GACODE): + def plot_profile_comparison(self, gacode_state: PROFILEStools.gacode_state): point = self.results.isel(dim_average_electron_temp=0, dim_average_electron_density=0) @@ -302,18 +302,18 @@ def plot_profile_comparison(self, profiles_gacode: PROFILEStools.PROFILES_GACODE x = np.linspace(0,1, point["dim_rho"].size) # normalized toroidal flux ax.plot(x, - np.interp(point["dim_rho"], point["dim_rho"].size*profiles_gacode.profiles["rho(-)"] ,profiles_gacode.profiles["te(keV)"]), + np.interp(point["dim_rho"], point["dim_rho"].size*gacode_state.profiles["rho(-)"] ,gacode_state.profiles["te(keV)"]), label="Te (GACODE)",color='tab:red') ax.plot(x,point["electron_temp_profile"], label="Te (POPCON)",ls="--",color='tab:red') ax.plot(x, - np.interp(point["dim_rho"], point["dim_rho"].size*profiles_gacode.profiles["rho(-)"] ,profiles_gacode.profiles["ti(keV)"][:, 0]), + np.interp(point["dim_rho"], point["dim_rho"].size*gacode_state.profiles["rho(-)"] ,gacode_state.profiles["ti(keV)"][:, 0]), label="Ti (GACODE)",color='tab:purple') ax.plot(x,point["ion_temp_profile"], label="Ti (POPCON)",ls="--",color='tab:purple') ax2.plot(x, - np.interp(point["dim_rho"], point["dim_rho"].size*profiles_gacode.profiles["rho(-)"] ,profiles_gacode.profiles["ne(10^19/m^3)"]), + np.interp(point["dim_rho"], point["dim_rho"].size*gacode_state.profiles["rho(-)"] ,gacode_state.profiles["ne(10^19/m^3)"]), label="ne (GACODE)",color='tab:blue') ax2.plot(x,point["electron_density_profile"], label="ne (POPCON)",ls="--",color='tab:blue') @@ -377,7 +377,7 @@ def plot(self, # Read template plot options if plot_template is None: - plot_template = __mitimroot__ / "templates" / "plot_popcon.yaml" + plot_template = __mitimroot__ / "templates" / "namelist.plot_popcon.yaml" plot_style = cfspopcon.read_plot_style(plot_template) # Update plot options @@ -405,7 +405,7 @@ def plot(self, def print_data(self, compare_to_gacode=False, - profiles_gacode=None + gacode_state=None ): try: @@ -418,26 +418,26 @@ def print_data(self, print(f"Operational point: ={point['average_electron_density'].data.magnitude}, ={point['average_electron_temp'].data.magnitude}") if compare_to_gacode: - if profiles_gacode is None: + if gacode_state is None: raise ValueError("No GACODE profiles passed to compare to.") - print(f"Pfus: ", f"POPCON: {point['P_fusion'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['Pfus']:.2f}", "(MW)") - print(f"Q: ", f"POPCON: {point['Q'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['Q']:.2f}") - print(f"TauE: ", f"POPCON: {point['energy_confinement_time'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['tauE']:.2f}", "(s)") - print(f"Beta_N:", f"POPCON: {point['normalized_beta'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['BetaN']:.2f}") + print(f"Pfus: ", f"POPCON: {point['P_fusion'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['Pfus']:.2f}", "(MW)") + print(f"Q: ", f"POPCON: {point['Q'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['Q']:.2f}") + print(f"TauE: ", f"POPCON: {point['energy_confinement_time'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['tauE']:.2f}", "(s)") + print(f"Beta_N:", f"POPCON: {point['normalized_beta'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['BetaN']:.2f}") print(f"P_sol: ", f"POPCON: {(point['P_LH_thresh'].data.magnitude *point['ratio_of_P_SOL_to_P_LH'].data.magnitude):.2f}", - f"GACODE: {profiles_gacode.derived['Psol']:.2f}","(MW)", f"(~%{point['ratio_of_P_SOL_to_P_LH'].data.magnitude*1e2:.2f} of LH threshold)") + f"GACODE: {gacode_state.derived['Psol']:.2f}","(MW)", f"(~%{point['ratio_of_P_SOL_to_P_LH'].data.magnitude*1e2:.2f} of LH threshold)") print(f"P_aux: ", f"POPCON: {point['P_auxillary_launched'].data.magnitude:.2f}", - f"GACODE: {(profiles_gacode.derived['qi_aux_MWmiller'][-1]+profiles_gacode.derived['qe_aux_MWmiller'][-1]):.2f}", + f"GACODE: {(gacode_state.derived['qi_aux_MW'][-1]+gacode_state.derived['qe_aux_MW'][-1]):.2f}", "(MW)") - print(f"P_rad: ", f"POPCON: {point['P_radiation'].data.magnitude:.2f}",f"GACODE: {profiles_gacode.derived['Prad']:.2f}","(MW)") + print(f"P_rad: ", f"POPCON: {point['P_radiation'].data.magnitude:.2f}",f"GACODE: {gacode_state.derived['Prad']:.2f}","(MW)") print(f"P_ext: ", f"POPCON: {point['P_external'].data.magnitude:.2f}","(MW)") - print(f"P_ohm: ", f"POPCON: {point['P_ohmic'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['qOhm_MWmiller'][-1]:.2f}","(MW)") + print(f"P_ohm: ", f"POPCON: {point['P_ohmic'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['qOhm_MW'][-1]:.2f}","(MW)") print(f"P_in: ", f"POPCON: {point['P_in'].data.magnitude:.2f}", - f"GACODE: {(profiles_gacode.derived['qOhm_MWmiller'][-1]+profiles_gacode.derived['qi_aux_MWmiller'][-1]+profiles_gacode.derived['qe_aux_MWmiller'][-1]+profiles_gacode.derived['Pfus']*0.2):.2f}", + f"GACODE: {(gacode_state.derived['qOhm_MW'][-1]+gacode_state.derived['qi_aux_MW'][-1]+gacode_state.derived['qe_aux_MW'][-1]+gacode_state.derived['Pfus']*0.2):.2f}", "(MW)") - print(f"q95: ", f"POPCON: {point['q_star'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['q95']:.2f}") - print(f"Wtot: ", f"POPCON: {point['plasma_stored_energy'].data.magnitude:.2f}", f"GACODE: {profiles_gacode.derived['Wthr']:.2f}", "(MJ)") + print(f"q95: ", f"POPCON: {point['q_star'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['q95']:.2f}") + print(f"Wtot: ", f"POPCON: {point['plasma_stored_energy'].data.magnitude:.2f}", f"GACODE: {gacode_state.derived['Wthr']:.2f}", "(MJ)") print(" ") print("Transport Parameters:") diff --git a/src/mitim_tools/popcon_tools/RAPIDStools.py b/src/mitim_tools/popcon_tools/RAPIDStools.py index 3d6ad27e..fae4690f 100644 --- a/src/mitim_tools/popcon_tools/RAPIDStools.py +++ b/src/mitim_tools/popcon_tools/RAPIDStools.py @@ -94,7 +94,7 @@ def rapids_evaluator(nn, aLT, aLn, TiTe, p_base, R=None, a=None, Bt=None, Ip=Non p.profiles['ne(10^19/m^3)'] = np.interp(p.derived['roa'], roa, ne) p.profiles['ni(10^19/m^3)'] = p_base.profiles['ni(10^19/m^3)'] * np.transpose(np.atleast_2d((p.profiles['ne(10^19/m^3)']/p_base.profiles['ne(10^19/m^3)']))) - p.deriveQuantities() + p.derive_quantities() # Change Zeff p.changeZeff(Zeff, ion_pos=3) @@ -116,7 +116,7 @@ def pedestal(p): p = eped_profiler(p, rhotop_assume, rhotop, Tetop_keV, Titop_keV, netop_20) # Derive quantities - p.deriveQuantities(rederiveGeometry=False) + p.derive_quantities(rederiveGeometry=False) BetaN_used = p.derived['BetaN_engineering'] * BetaN_multiplier @@ -135,10 +135,10 @@ def pedestal(p): raise Exception('BetaN error too high') # Power - power = STATEtools.powerstate(p,EvolutionOptions={"rhoPredicted": np.linspace(0.0, 0.9, 50)[1:]}) + power = STATEtools.powerstate(p,evolution_options={"rhoPredicted": np.linspace(0.0, 0.9, 50)[1:]}) power.calculate(None, folder='~/scratch/power/') - profiles_new = power.to_gacode(insert_highres_powers=True) + profiles_new = power.from_powerstate(insert_highres_powers=True) return ptop_kPa,profiles_new, eped_evaluation @@ -229,6 +229,13 @@ def scan_parameter(nn,p_base, xparam, x, nominal_parameters, core, xparamlab='', if vertical_at_nominal: axs[0,0].axvline(x=nominal_parameters[xparam],ls='-.',lw=1.0,c=c) axs[1,0].axvline(x=nominal_parameters[xparam],ls='-.',lw=1.0,c=c) + + fG_nominal = results1['fG'][np.argmin(np.abs(results1['x']- (nominal_parameters[xparam] if not relative else nominal_parameters[xparam])))] + axs[0,1].axvline(x=fG_nominal,ls='-.',lw=1.0,c=c) + axs[1,1].axvline(x=fG_nominal,ls='-.',lw=1.0,c=c) + + + axs[0,1].axvspan(1.0, 1.5, facecolor="k", alpha=0.1, edgecolor="none") axs[1,1].axvspan(1.0, 1.5, facecolor="k", alpha=0.1, edgecolor="none") @@ -316,7 +323,7 @@ def scan_density_additional(nn, p_base, nominal_parameters, core, r, param, para resultsS, ['r','b','g'], ): - results['profs'][0].plotGeometry(ax=ax, surfaces_rho=[1.0], color=c) + results['profs'][0].plot_state_flux_surfaces(ax=ax, surfaces_rho=[1.0], color=c) GRAPHICStools.addDenseAxis(ax) ax.set_xlabel("R (m)") diff --git a/src/mitim_tools/popcon_tools/scripts/test_functionals.py b/src/mitim_tools/popcon_tools/scripts/test_functionals.py index 949af0b4..7d6a1993 100644 --- a/src/mitim_tools/popcon_tools/scripts/test_functionals.py +++ b/src/mitim_tools/popcon_tools/scripts/test_functionals.py @@ -1,6 +1,6 @@ import torch, datetime import matplotlib.pyplot as plt -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.misc_tools import GRAPHICStools, IOtools, PLASMAtools from mitim_tools.popcon_tools.FunctionalForms import ( @@ -22,13 +22,13 @@ x, n = parabolic(Tbar=n_avol, nu=nu_n) axs[0, 0].plot(x, T, "-", c="b", label="Parabolic") -aLT = CALCtools.produceGradient(torch.from_numpy(x), torch.from_numpy(T)).cpu().numpy() +aLT = CALCtools.derivation_into_Lx(torch.from_numpy(x), torch.from_numpy(T)).cpu().numpy() axs[1, 0].plot(x, aLT, "-", c="b") print(f" Parabolic = {FUNCTIONALScalc.calculate_simplified_volavg(x,T)[0]:.3f}keV") # axs[0,1].plot(x,n,'-',c='b') -# aLn = CALCtools.produceGradient(torch.from_numpy(x),torch.from_numpy(n)).cpu().numpy() +# aLn = CALCtools.derivation_into_Lx(torch.from_numpy(x),torch.from_numpy(n)).cpu().numpy() # axs[1,1].plot(x,aLn,'-',c='b') # print(f' Parabolic = {FUNCTIONALScalc.calculate_simplified_volavg(x,n)[0]:.3f}') @@ -36,13 +36,13 @@ # x, T, n = PRFfunctionals_Hmode( T_avol, n_avol, nu_T, nu_n, aLT = 2.0 ) # axs[0,0].plot(x,T,'-',c='r',label='PRFfunctionals (H-mode)') -# aLT = CALCtools.produceGradient(torch.from_numpy(x),torch.from_numpy(T)).cpu().numpy() +# aLT = CALCtools.derivation_into_Lx(torch.from_numpy(x),torch.from_numpy(T)).cpu().numpy() # axs[1,0].plot(x,aLT,'-',c='r') # print(f' PRF H = {FUNCTIONALScalc.calculate_simplified_volavg(x,T)[0]:.3f}keV') # axs[0,1].plot(x,n,'-',c='r') -# aLn = CALCtools.produceGradient(torch.from_numpy(x),torch.from_numpy(n)).cpu().numpy() +# aLn = CALCtools.derivation_into_Lx(torch.from_numpy(x),torch.from_numpy(n)).cpu().numpy() # axs[1,1].plot(x,aLn,'-',c='r') # print(f' PRF H = {FUNCTIONALScalc.calculate_simplified_volavg(x,n)[0]:.3f}') @@ -52,13 +52,13 @@ print("\t* Took: " + IOtools.getTimeDifference(timeBeginning)) axs[0, 0].plot(x, T, "-", c="g", label="Piecewise linear gradient") -aLT = CALCtools.produceGradient(torch.from_numpy(x), torch.from_numpy(T)).cpu().numpy() +aLT = CALCtools.derivation_into_Lx(torch.from_numpy(x), torch.from_numpy(T)).cpu().numpy() axs[1, 0].plot(x, aLT, "-", c="g") print(f" PRF L = {FUNCTIONALScalc.calculate_simplified_volavg(x,T)[0]:.3f}keV") axs[0, 1].plot(x, n, "-", c="g") -aLn = CALCtools.produceGradient(torch.from_numpy(x), torch.from_numpy(n)).cpu().numpy() +aLn = CALCtools.derivation_into_Lx(torch.from_numpy(x), torch.from_numpy(n)).cpu().numpy() axs[1, 1].plot(x, aLn, "-", c="g") print(f" PRF L = {FUNCTIONALScalc.calculate_simplified_volavg(x,n)[0]:.3f}") diff --git a/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py b/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py index 4758f47a..b2a8e6e6 100644 --- a/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py +++ b/src/mitim_tools/popcon_tools/utils/FUNCTIONALScalc.py @@ -1,7 +1,7 @@ import torch import numpy as np from IPython import embed -from mitim_modules.powertorch.physics import CALCtools +from mitim_modules.powertorch.utils import CALCtools from mitim_tools.misc_tools.LOGtools import printMsg as print @@ -35,13 +35,10 @@ def doubleLinear_aLT(x, g1, g2, t, T1): def calculate_simplified_volavg(x, T): x = np.atleast_2d(x) dVdr = 2 * x - vol = CALCtools.integrateQuadPoly(torch.from_numpy(x), torch.ones(x.shape) * dVdr) + vol = CALCtools.volume_integration(torch.ones(x.shape), torch.from_numpy(x), dVdr) Tvol = ( - CALCtools.integrateQuadPoly(torch.from_numpy(x), torch.from_numpy(T) * dVdr)[ - :, -1 - ] - / vol[:, -1] + CALCtools.volume_integration(torch.from_numpy(T), torch.from_numpy(x), dVdr)[:, -1] / vol[:, -1] ).cpu().numpy() return Tvol diff --git a/src/mitim_tools/simulation_tools/SIMtools.py b/src/mitim_tools/simulation_tools/SIMtools.py new file mode 100644 index 00000000..4bc6518f --- /dev/null +++ b/src/mitim_tools/simulation_tools/SIMtools.py @@ -0,0 +1,1343 @@ +import shutil +import datetime +import time +import os +import copy +import numpy as np +from pathlib import Path +from mitim_tools import __version__ as mitim_version +from mitim_tools.gacode_tools import PROFILEStools +from mitim_tools.gacode_tools.utils import GACODEdefaults, NORMtools +from mitim_tools.misc_tools import FARMINGtools, IOtools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +from mitim_tools.misc_tools.PLASMAtools import md_u + +class mitim_simulation: + ''' + Main class for running GACODE simulations. + ''' + def __init__( + self, + rhos=[None], # rho locations of interest, e.g. [0.4,0.6,0.8] + ): + self.rhos = np.array(rhos) if rhos is not None else None + + self.ResultsFiles = [] + self.ResultsFiles_minimal = [] + + self.nameRunid = "0" + + self.results, self.scans = {}, {} + + self.run_specifications = None + + def prep( + self, + mitim_state, # A MITIM state class + FolderGACODE, # Main folder where all caculations happen (runs will be in subfolders) + cold_start=False, # If True, do not use what it potentially inside the folder, run again + forceIfcold_start=False, # Extra flag + ): + ''' + This method prepares the GACODE run from a MITIM state class by setting up the necessary input files and directories. + ''' + + print("> Preparation run from MITIM state class (direct conversion)") + + if self.run_specifications is None: + raise Exception("[MITIM] Simulation child class did not define run specifications") + + state_converter = self.run_specifications['state_converter'] # e.g. to_tglf + input_class = self.run_specifications['input_class'] # e.g. TGLFinput + input_file = self.run_specifications['input_file'] # e.g. input.tglf + + self.FolderGACODE = IOtools.expandPath(FolderGACODE) + + if cold_start or not self.FolderGACODE.exists(): + IOtools.askNewFolder(self.FolderGACODE, force=forceIfcold_start) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Prepare state + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + if isinstance(mitim_state, str) or isinstance(mitim_state, Path): + # If a string, assume it's a path to input.gacode + self.profiles = PROFILEStools.gacode_state(mitim_state) + else: + self.profiles = mitim_state + + # Keep a copy of the file + self.profiles.write_state(file=self.FolderGACODE / "input.gacode_torun") + + self.profiles.derive_quantities(mi_ref=md_u) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Initialize from state + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + # Call the method dynamically based on state_converter + conversion_method = getattr(self.profiles, state_converter) + self.inputs_files = conversion_method(r=self.rhos, r_is_rho=True) + + for rho in self.inputs_files: + + # Initialize class + self.inputs_files[rho] = input_class.initialize_in_memory(self.inputs_files[rho]) + + # Write input.tglf file + self.inputs_files[rho].file = self.FolderGACODE / f'{input_file}_{rho:.4f}' + self.inputs_files[rho].write_state() + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Definining normalizations + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + print("> Setting up normalizations") + self.NormalizationSets, cdf = NORMtools.normalizations(self.profiles) + + return cdf + + def run( + self, + subfolder, # 'neo1/', + code_settings=None, + extraOptions={}, + multipliers={}, + minimum_delta_abs={}, + ApplyCorrections=True, # Removing ions with too low density and that are fast species + Quasineutral=False, # Ensures quasineutrality. By default is False because I may want to run the file directly + launchSlurm=True, + cold_start=False, + forceIfcold_start=False, + extra_name="exe", + slurm_setup=None, # Cores per call (so, when running nR radii -> nR*4) + attempts_execution=1, + only_minimal_files=False, + run_type = 'normal', # 'normal': submit and wait; 'submit': submit and do not wait; 'prep': do not submit + additional_files_to_send = None, # Dict (rho keys) of files to send along with the run (e.g. for restart) + helper_lostconnection=False, # If True, it means that the connection to the remote machine was lost, but the files are there, so I just want to retrieve them not execute the commands + ): + + if slurm_setup is None: + slurm_setup = {"cores": self.run_specifications['default_cores'], "minutes": 10} + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Prepare inputs + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + code_executor, code_executor_full = self._run_prepare( + # + subfolder, + code_executor={}, + code_executor_full={}, + # + code_settings=code_settings, + extraOptions=extraOptions, + multipliers=multipliers, + # + cold_start=cold_start, + forceIfcold_start=forceIfcold_start, + only_minimal_files=only_minimal_files, + # + launchSlurm=launchSlurm, + slurm_setup=slurm_setup, + # + additional_files_to_send=additional_files_to_send, + # + ApplyCorrections=ApplyCorrections, + minimum_delta_abs=minimum_delta_abs, + Quasineutral=Quasineutral, + ) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Run NEO + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + self._run( + code_executor, + code_executor_full=code_executor_full, + code_settings=code_settings, + ApplyCorrections=ApplyCorrections, + Quasineutral=Quasineutral, + launchSlurm=launchSlurm, + cold_start=cold_start, + forceIfcold_start=forceIfcold_start, + extra_name=extra_name, + slurm_setup=slurm_setup, + only_minimal_files=only_minimal_files, + attempts_execution=attempts_execution, + run_type=run_type, + helper_lostconnection=helper_lostconnection, + ) + + return code_executor_full + + def _run_prepare( + self, + # ******************************** + # Required options + # ******************************** + subfolder_simulation, + code_executor=None, + code_executor_full=None, + # ******************************** + # Run settings + # ******************************** + code_settings=None, + extraOptions={}, + multipliers={}, + # ******************************** + # IO settings + # ******************************** + cold_start=False, + forceIfcold_start=False, + only_minimal_files=False, + # ******************************** + # Slurm settings (for warnings) + # ******************************** + launchSlurm=True, + slurm_setup=None, + # ******************************** + # Additional files to send (e.g. restarts). Must be a dictionary with rho keys + # ******************************** + additional_files_to_send = None, + # ******************************** + # Additional settings to correct/modify inputs + # ******************************** + **kwargs_control + ): + + if slurm_setup is None: + slurm_setup = {"cores": self.run_specifications['default_cores'], "minutes": 5} + + if self.run_specifications is None: + raise Exception("[MITIM] Simulation child class did not define run specifications") + + # Because of historical relevance, I allow both TGLFsettings and code_settings #TODO #TOREMOVE + if "TGLFsettings" in kwargs_control: + if code_settings is not None: + raise Exception('[MITIM] Cannot use both TGLFsettings and code_settings') + else: + code_settings = kwargs_control["TGLFsettings"] + del kwargs_control["TGLFsettings"] + # ------------------------------------------------------------------------------------ + + if code_executor is None: + code_executor = {} + if code_executor_full is None: + code_executor_full = {} + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Prepare for run + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + rhos = self.rhos + + inputs = copy.deepcopy(self.inputs_files) + Folder_sim = self.FolderGACODE / subfolder_simulation + + ResultsFiles_new = [] + for i in self.ResultsFiles: + if "mitim.out" not in i: + ResultsFiles_new.append(i) + self.ResultsFiles = ResultsFiles_new + + if only_minimal_files: + filesToRetrieve = self.ResultsFiles_minimal + else: + filesToRetrieve = self.ResultsFiles + + # Do I need to run all radii? + rhosEvaluate = cold_start_checker( + rhos, + filesToRetrieve, + Folder_sim, + cold_start=cold_start, + ) + + if len(rhosEvaluate) == len(rhos): + # All radii need to be evaluated + IOtools.askNewFolder(Folder_sim, force=forceIfcold_start) + + # Once created, expand here + Folder_sim = IOtools.expandPath(Folder_sim) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Change this specific run + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + latest_inputsFile, latest_inputsFileDict = change_and_write_code( + rhos, + inputs, + Folder_sim, + code_settings=code_settings, + extraOptions=extraOptions, + multipliers=multipliers, + addControlFunction=self.run_specifications['control_function'], + controls_file=self.run_specifications['controls_file'], + **kwargs_control + ) + + code_executor_full[subfolder_simulation] = {} + code_executor[subfolder_simulation] = {} + for irho in self.rhos: + code_executor_full[subfolder_simulation][irho] = { + "folder": Folder_sim, + "dictionary": latest_inputsFileDict[irho], + "inputs": latest_inputsFile[irho], + "extraOptions": extraOptions, + "multipliers": multipliers, + "additional_files_to_send": additional_files_to_send[irho] if additional_files_to_send is not None else None + } + if irho in rhosEvaluate: + code_executor[subfolder_simulation][irho] = code_executor_full[subfolder_simulation][irho] + + # Check input file problems + for irho in latest_inputsFileDict: + latest_inputsFileDict[irho].anticipate_problems() + + # Check cores problem + # if launchSlurm: + # self._check_cores(rhosEvaluate, slurm_setup) + + self.FolderSimLast = Folder_sim + + return code_executor, code_executor_full + + def _check_cores(self, rhosEvaluate, slurm_setup, warning = 32 * 2): + expected_allocated_cores = int(len(rhosEvaluate) * slurm_setup["cores"]) + + print(f'\t- Slurm job will be submitted with {expected_allocated_cores} cores ({len(rhosEvaluate)} radii x {slurm_setup["cores"]} cores/radius)', + typeMsg="" if expected_allocated_cores < warning else "q",) + + def _run( + self, + code_executor, + run_type = 'normal', # 'normal': submit and wait; 'submit': submit and do not wait; 'prep': do not submit + **kwargs_run + ): + """ + extraOptions and multipliers are not being grabbed from kwargs_NEOrun, but from code_executor for WF + """ + + if kwargs_run.get("only_minimal_files", False): + filesToRetrieve = self.ResultsFiles_minimal + else: + filesToRetrieve = self.ResultsFiles + + c = 0 + for subfolder_simulation in code_executor: + c += len(code_executor[subfolder_simulation]) + + if c == 0: + + print(f"\t- {self.run_specifications['code'].upper()} not run because all results files found (please ensure consistency!)",typeMsg="i") + + self.simulation_job = None + + else: + + # ---------------------------------------------------------------------------------------------------------------- + # Run simulation + # ---------------------------------------------------------------------------------------------------------------- + """ + launchSlurm = True -> Launch as a batch job in the machine chosen, if partition specified + launchSlurm = False -> Launch locally as a bash script + """ + + # Get code info + code = self.run_specifications.get('code', 'tglf') + input_file = self.run_specifications.get('input_file', 'input.tglf') + code_call = self.run_specifications.get('code_call', None) + code_slurm_settings = self.run_specifications.get('code_slurm_settings', None) + + # Get execution info + minutes = kwargs_run.get("slurm_setup", {}).get("minutes", 5) + cores_per_code_call = kwargs_run.get("slurm_setup", {}).get("cores", self.run_specifications['default_cores']) + launchSlurm = kwargs_run.get("launchSlurm", True) + + extraFlag = kwargs_run.get('extra_name', '') + name = f"{self.run_specifications['code']}_{self.nameRunid}{extraFlag}" + + attempts_execution = kwargs_run.get("attempts_execution", 1) + + tmpFolder = self.FolderGACODE / f"tmp_{code}" + IOtools.askNewFolder(tmpFolder, force=True) + + kkeys = [str(keys).replace('/','') for keys in code_executor.keys()] + log_simulation_file=self.FolderGACODE / f"mitim_simulation_{kkeys[0]}.log" # Refer with the first folder + self.simulation_job = FARMINGtools.mitim_job(tmpFolder, log_simulation_file=log_simulation_file) + + self.simulation_job.define_machine_quick(code,f"mitim_{name}") + + folders, folders_red = [], [] + for subfolder_sim in code_executor: + + rhos = list(code_executor[subfolder_sim].keys()) + + # --------------------------------------------- + # Prepare files and folders + # --------------------------------------------- + + for i, rho in enumerate(rhos): + print(f"\t- Preparing {code.upper()} execution ({subfolder_sim}) at rho={rho:.4f}") + + folder_sim_this = tmpFolder / subfolder_sim / f"rho_{rho:.4f}" + folders.append(folder_sim_this) + + folder_sim_this_rel = folder_sim_this.relative_to(tmpFolder) + folders_red.append(folder_sim_this_rel.as_posix() if self.simulation_job.machineSettings['machine'] != 'local' else str(folder_sim_this_rel)) + + folder_sim_this.mkdir(parents=True, exist_ok=True) + + input_file_sim = folder_sim_this / input_file + with open(input_file_sim, "w") as f: + f.write(code_executor[subfolder_sim][rho]["inputs"]) + + # Copy potential additional files to send + if code_executor[subfolder_sim][rho]["additional_files_to_send"] is not None: + for file in code_executor[subfolder_sim][rho]["additional_files_to_send"]: + shutil.copy(file, folder_sim_this / Path(file).name) + + # --------------------------------------------- + # Prepare command + # --------------------------------------------- + + # Grab machine local limits ------------------------------------------------- + machineSettings = FARMINGtools.mitim_job.grab_machine_settings(code) + max_cores_per_node = machineSettings["cores_per_node"] + + # If the run is local and not slurm, let's check the number of cores + if (machineSettings["machine"] == "local") and \ + not (launchSlurm and ("partition" in self.simulation_job.machineSettings["slurm"])): + + cores_in_machine = int(os.cpu_count()) + cores_allocated = int(os.environ.get('SLURM_CPUS_PER_TASK')) if os.environ.get('SLURM_CPUS_PER_TASK') is not None else None + + if cores_allocated is not None: + if max_cores_per_node is None or (cores_allocated < max_cores_per_node): + print(f"\t - Detected {cores_allocated} cores allocated by SLURM, using this value as maximum for local execution (vs {max_cores_per_node} specified)",typeMsg="i") + max_cores_per_node = cores_allocated + elif cores_in_machine is not None: + if max_cores_per_node is None or (cores_in_machine < max_cores_per_node): + print(f"\t - Detected {cores_in_machine} cores in machine, using this value as maximum for local execution (vs {max_cores_per_node} specified)",typeMsg="i") + max_cores_per_node = cores_in_machine + else: + # Default to just 16 just in case + if max_cores_per_node is None: + max_cores_per_node = 16 + else: + # For remote execution, default to just 16 just in case + if max_cores_per_node is None: + max_cores_per_node = 16 + # --------------------------------------------------------------------------- + + # Grab the total number of cores of this job -------------------------------- + total_simulation_executions = len(rhos) * len(code_executor) + total_cores_required = int(cores_per_code_call) * total_simulation_executions + # --------------------------------------------------------------------------- + + # If it's GPUS enable machine, do the comparison based on it + if machineSettings['gpus_per_node'] == 0: + max_cores_per_node_compare = max_cores_per_node + else: + print(f"\t - Detected {machineSettings['gpus_per_node']} GPUs in machine, using this value as maximum for non-array execution (vs {max_cores_per_node} specified)",typeMsg="i") + max_cores_per_node_compare = machineSettings['gpus_per_node'] + + if not (launchSlurm and ("partition" in self.simulation_job.machineSettings["slurm"])): + type_of_submission = "bash" + elif total_cores_required < max_cores_per_node_compare: + type_of_submission = "slurm_standard" + elif total_cores_required >= max_cores_per_node_compare: + type_of_submission = "slurm_array" + + shellPreCommands, shellPostCommands = None, None + + # Simply bash, no slurm + if type_of_submission == "bash": + + if cores_per_code_call > max_cores_per_node: + print(f"\t- Detected {cores_per_code_call} cores required, using this value as maximum for local execution (vs {max_cores_per_node} specified)",typeMsg="i") + max_cores_per_node = cores_per_code_call + + max_parallel_execution = max_cores_per_node // cores_per_code_call # Make sure we don't overload the machine when running locally (assuming no farming trans-node) + + print(f"\t- {code.upper()} will be executed as bash script (total cores: {total_cores_required}, cores per simulation: {cores_per_code_call}). MITIM will launch {total_simulation_executions // max_parallel_execution+1} sequential executions",typeMsg="i") + + # Build the bash script with job control enabled and a loop to limit parallel jobs + GACODEcommand = "#!/usr/bin/env bash\n" + GACODEcommand += "set -m\n" # Enable job control even in non-interactive mode + GACODEcommand += f"max_parallel_execution={max_parallel_execution}\n\n" # Set the maximum number of parallel processes + + # Create a bash array of folders + GACODEcommand += "folders=(\n" + for folder in folders_red: + GACODEcommand += f' "{folder}"\n' + GACODEcommand += ")\n\n" + + # Loop over each folder and launch code, waiting if we've reached max_parallel_execution + GACODEcommand += "for folder in \"${folders[@]}\"; do\n" + folder_str = '"$folder"' # literal double quotes around $folder + GACODEcommand += f' {code_call(folder=folder_str, n=cores_per_code_call, p=self.simulation_job.folderExecution)} &\n' + GACODEcommand += " while (( $(jobs -r | wc -l) >= max_parallel_execution )); do sleep 1; done\n" + GACODEcommand += "done\n\n" + GACODEcommand += "wait\n" + + # Standard job + elif type_of_submission == "slurm_standard": + + print(f"\t- {code.upper()} will be executed in SLURM as standard job (cpus: {total_cores_required})",typeMsg="i") + + # Code launches + GACODEcommand = "" + for folder in folders_red: + GACODEcommand += f' {code_call(folder = folder, n = cores_per_code_call, p = self.simulation_job.folderExecution)} &\n' + GACODEcommand += "\nwait" # This is needed so that the script doesn't end before each job + + # Job array + elif type_of_submission == "slurm_array": + + print(f"\t- {code.upper()} will be executed in SLURM as job array due to its size (cpus: {total_cores_required})",typeMsg="i") + + # As a pre-command, organize all folders in a simpler way + shellPreCommands = [] + shellPostCommands = [] + array_list = [] + for i, folder in enumerate(folders_red): + array_list.append(f"{i}") + folder_temp_array = f"run{i}" + folder_actual = folder + shellPreCommands.append(f"mkdir {self.simulation_job.folderExecution}/{folder_temp_array}; cp {self.simulation_job.folderExecution}/{folder_actual}/* {self.simulation_job.folderExecution}/{folder_temp_array}/.") + shellPostCommands.append(f"cp {self.simulation_job.folderExecution}/{folder_temp_array}/* {self.simulation_job.folderExecution}/{folder_actual}/.; rm -r {self.simulation_job.folderExecution}/{folder_temp_array}") + + # Code launches + indexed_folder = 'run"$SLURM_ARRAY_TASK_ID"' + GACODEcommand = code_call( + folder = indexed_folder, + n = cores_per_code_call, + p = self.simulation_job.folderExecution, + additional_command = f'1> {self.simulation_job.folderExecution}/{indexed_folder}/slurm_output.dat 2> {self.simulation_job.folderExecution}/{indexed_folder}/slurm_error.dat\n') + + # --------------------------------------------- + # Execute + # --------------------------------------------- + + slurm_settings = code_slurm_settings( + name=code, + minutes=minutes, + total_cores_required=total_cores_required, + cores_per_code_call=cores_per_code_call, + type_of_submission=type_of_submission, + array_list=array_list if type_of_submission == "slurm_array" else None, + raise_warning= run_type == 'normal' + ) + + self.simulation_job.define_machine( + code, + f"mitim_{name}", + launchSlurm=launchSlurm, + slurm_settings=slurm_settings, + ) + + # I would like the mitim_job to check if the retrieved folders were complete + check_files_in_folder = {} + for folder in folders_red: + check_files_in_folder[folder] = filesToRetrieve + # --------------------------------------------- + + self.simulation_job.prep( + GACODEcommand, + input_folders=folders, + output_folders=folders_red, + check_files_in_folder=check_files_in_folder, + shellPreCommands=shellPreCommands, + shellPostCommands=shellPostCommands, + ) + + # Submit run and wait + if run_type == 'normal': + + self.simulation_job.run( + removeScratchFolders=True, + attempts_execution=attempts_execution, + helper_lostconnection=kwargs_run.get("helper_lostconnection", False) + ) + + self._organize_results(code_executor, tmpFolder, filesToRetrieve) + + # Submit run but do not wait; the user should do checks and fetch results + elif run_type == 'submit': + + if type_of_submission == "slurm_array": + raise Exception("[MITIM] run_type='submit' with slurm_array not implemented yet because of issues about folders being moved around, TBD") + + self.simulation_job.run( + waitYN=False, + check_if_files_received=False, + removeScratchFolders=False, + removeScratchFolders_goingIn=kwargs_run.get("cold_start", False), + ) + + self.kwargs_organize = { + "code_executor": code_executor, + "tmpFolder": tmpFolder, + "filesToRetrieve": filesToRetrieve + } + + self.slurm_output = "slurm_output.dat" + + # Prepare how to search for the job without waiting for it + self.simulation_job.launchSlurm = True + self.simulation_job.slurm_settings['name'] = Path(self.simulation_job.folderExecution).name + + def check(self, every_n_minutes=None): + + if self.simulation_job is None: + print("- Not checking status because simulation job is not defined (not run)", typeMsg="i") + return + + if self.simulation_job.launchSlurm: + print("- Checker job status") + + while True: + self.simulation_job.check(file_output = self.slurm_output) + print(f'\t- Current status (as of {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}): {self.simulation_job.status} ({self.simulation_job.infoSLURM["STATE"]})') + if self.simulation_job.status == 2: + print("\n\t* Job considered finished (please do .fetch() to retrieve results)",typeMsg="i") + break + elif every_n_minutes is None: + print("\n\t* Job not finished yet") + break + else: + print(f"\n\t* Waiting {every_n_minutes} minutes") + time.sleep(every_n_minutes * 60) + else: + print("- Not checking status because this was run command line (not slurm)") + + def fetch(self): + """ + For a job that has been submitted but not waited for, once it is done, get the results + """ + + if self.simulation_job is None: + print("- Not fetching because simulation job is not defined (not run)", typeMsg="i") + return + + print("\n\n\t- Fetching results") + + if self.simulation_job.launchSlurm: + self.simulation_job.connect() + self.simulation_job.retrieve() + self.simulation_job.close() + + self._organize_results(**self.kwargs_organize) + + else: + print("- Not retrieving results because this was run command line (not slurm)") + + def delete(self): + + print("\n\n\t- Deleting job") + + self.simulation_job.launchSlurm = False + + self.simulation_job.prep( + f"scancel -n {self.simulation_job.slurm_settings['name']}", + label_log_files="_finish", + ) + + self.simulation_job.run() + + def _organize_results(self, code_executor, tmpFolder, filesToRetrieve): + + # --------------------------------------------- + # Organize + # --------------------------------------------- + + print("\t- Retrieving files and changing names for storing") + fineall = True + for subfolder_sim in code_executor: + + for rho in code_executor[subfolder_sim].keys(): + for file in filesToRetrieve: + original_file = f"{file}_{rho:.4f}" + final_destination = code_executor[subfolder_sim][rho]['folder'] / f"{original_file}" + + final_destination.unlink(missing_ok=True) + + temp_file = tmpFolder / subfolder_sim / f"rho_{rho:.4f}" / f"{file}" + temp_file.replace(final_destination) + + fineall = fineall and final_destination.exists() + + if not final_destination.exists(): + print(f"\t!! file {file} ({original_file}) could not be retrived",typeMsg="w",) + + if fineall: + print("\t\t- All files were successfully retrieved") + + # Remove temporary folder + IOtools.shutil_rmtree(tmpFolder) + + else: + print("\t\t- Some files were not retrieved", typeMsg="w") + + def run_scan( + self, + subfolder, # 'scan1', + multipliers={}, + minimum_delta_abs={}, + variable="RLTS_1", + varUpDown=[0.5, 1.0, 1.5], + variables_scanTogether=[], + relativeChanges=True, + **kwargs_run, + ): + + # ------------------------------------- + # Add baseline + # ------------------------------------- + if (1.0 not in varUpDown) and relativeChanges: + print("\n* Since variations vector did not include base case, I am adding it",typeMsg="i",) + varUpDown_new = [] + added = False + for i in varUpDown: + if i > 1.0 and not added: + varUpDown_new.append(1.0) + added = True + varUpDown_new.append(i) + else: + varUpDown_new = varUpDown + + + code_executor, code_executor_full, folders, varUpDown_new = self._prepare_scan( + subfolder, + multipliers=multipliers, + minimum_delta_abs=minimum_delta_abs, + variable=variable, + varUpDown=varUpDown_new, + variables_scanTogether=variables_scanTogether, + relativeChanges=relativeChanges, + **kwargs_run, + ) + + # Run them all + self._run( + code_executor, + code_executor_full=code_executor_full, + **kwargs_run, + ) + + # Read results + for cont_mult, mult in enumerate(varUpDown_new): + name = f"{variable}_{mult}" + self.read( + label=f"{self.subfolder_scan}_{name}", + folder=folders[cont_mult], + cold_startWF = False, + require_all_files=not kwargs_run.get("only_minimal_files",False), + ) + + return code_executor_full + + def _prepare_scan( + self, + subfolder, # 'scan1', + multipliers={}, + minimum_delta_abs={}, + variable="RLTS_1", + varUpDown=[0.5, 1.0, 1.5], + variables_scanTogether=[], + relativeChanges=True, + **kwargs_run, + ): + """ + Multipliers will be modified by adding the scaning variables, but I don't want to modify the original + multipliers, as they may be passed to the next scan + + Set relativeChanges=False if varUpDown contains the exact values to change, not multipleiers + """ + + completeVariation = self.run_specifications['complete_variation'] + + multipliers_mod = copy.deepcopy(multipliers) + + self.subfolder_scan = subfolder + + if relativeChanges: + for i in range(len(varUpDown)): + varUpDown[i] = round(varUpDown[i], 6) + + print(f"\n- Proceeding to scan {variable}{' together with '+', '.join(variables_scanTogether) if len(variables_scanTogether)>0 else ''}:") + + code_executor = {} + code_executor_full = {} + folders = [] + for cont_mult, mult in enumerate(varUpDown): + mult = round(mult, 6) + + if relativeChanges: + print(f"\n + Multiplier: {mult} -----------------------------------------------------------------------------------------------------------") + else: + print(f"\n + Value: {mult} ----------------------------------------------------------------------------------------------------------------") + + multipliers_mod[variable] = mult + + for variable_scanTogether in variables_scanTogether: + multipliers_mod[variable_scanTogether] = mult + + name = f"{variable}_{mult}" + + species = self.inputs_files[self.rhos[0]] # Any rho will do + + if completeVariation is not None: + multipliers_mod = completeVariation(multipliers_mod, species) + + if not relativeChanges: + for ikey in multipliers_mod: + kwargs_run["extraOptions"][ikey] = multipliers_mod[ikey] + multipliers_mod = {} + + # Force ensure quasineutrality if the + if variable in ["AS_3", "AS_4", "AS_5", "AS_6"]: + kwargs_run["Quasineutral"] = True + + # Only ask the cold_start in the first round + kwargs_run["forceIfcold_start"] = cont_mult > 0 or ("forceIfcold_start" in kwargs_run and kwargs_run["forceIfcold_start"]) + + code_executor, code_executor_full = self._run_prepare( + f"{self.subfolder_scan}_{name}", + code_executor=code_executor, + code_executor_full=code_executor_full, + multipliers=multipliers_mod, + minimum_delta_abs=minimum_delta_abs, + **kwargs_run, + ) + + folders.append(copy.deepcopy(self.FolderSimLast)) + + return code_executor, code_executor_full, folders, varUpDown + + def read( + self, + label="run1", + folder=None, # If None, search in the previously run folder + suffix=None, # If None, search with my standard _0.55 suffixes corresponding to rho of this TGLF class + **kwargs_to_class_output + ): + print("> Reading simulation results") + + class_output = self.run_specifications['output_class'] + + # If no specified folder, check the last one + if folder is None: + folder = self.FolderSimLast + + self.results[label] = { + 'output':[], + 'parsed': [], + "x": np.array(self.rhos), + } + + for rho in self.rhos: + + SIMout = class_output( + folder, + suffix=(f"_{rho:.4f}" if rho is not None else "") if suffix is None else suffix, + **kwargs_to_class_output + ) + + # Unnormalize + if 'NormalizationSets' in self.__dict__: + SIMout.unnormalize( + self.NormalizationSets["SELECTED"], + rho=rho, + ) + else: + print("No normalization sets found.") + + self.results[label]['output'].append(SIMout) + + self.results[label]['parsed'].append(buildDictFromInput(SIMout.inputFile) if SIMout.inputFile else None) + + def read_scan( + self, + label="scan1", + subfolder=None, + variable="RLTS_1", + positionIon=2, + variable_mapping=None, + variable_mapping_unn=None + ): + ''' + positionIon is the index in the input.tglf file... so if you want for ion RLNS_5, positionIon=5 + ''' + + if subfolder is None: + subfolder = self.subfolder_scan + + if variable_mapping is None: + variable_mapping = {} + if variable_mapping_unn is None: + variable_mapping_unn = {} + + self.scans[label] = {} + self.scans[label]["variable"] = variable + self.scans[label]["positionBase"] = None + self.scans[label]["unnormalization_successful"] = True + self.scans[label]["results_tags"] = [] + + self.positionIon_scan = positionIon + + # ---- + + scan = {} + for ikey in variable_mapping | variable_mapping_unn: + scan[ikey] = [] + + cont = 0 + for ikey in self.results: + isThisTheRightReadResults = (subfolder in ikey) and (variable== "_".join(ikey.split("_")[:-1]).split(subfolder + "_")[-1]) + + if isThisTheRightReadResults: + + self.scans[label]["results_tags"].append(ikey) + + # Initialize lists + scan0 = {} + for ikey2 in variable_mapping | variable_mapping_unn: + scan0[ikey2] = [] + + # Loop over radii + for irho_cont in range(len(self.rhos)): + irho = np.where(self.results[ikey]["x"] == self.rhos[irho_cont])[0][0] + + for ikey2 in variable_mapping: + + obj = self.results[ikey][variable_mapping[ikey2][0]][irho] + if not hasattr(obj, '__dict__'): + obj_dict = obj + else: + obj_dict = obj.__dict__ + var0 = obj_dict[variable_mapping[ikey2][1]] + scan0[ikey2].append(var0 if variable_mapping[ikey2][2] is None else var0[variable_mapping[ikey2][2]]) + + # Unnormalized + self.scans[label]["unnormalization_successful"] = True + for ikey2 in variable_mapping_unn: + obj = self.results[ikey][variable_mapping_unn[ikey2][0]][irho] + if not hasattr(obj, '__dict__'): + obj_dict = obj + else: + obj_dict = obj.__dict__ + + if variable_mapping_unn[ikey2][1] not in obj_dict: + self.scans[label]["unnormalization_successful"] = False + break + var0 = obj_dict[variable_mapping_unn[ikey2][1]] + scan0[ikey2].append(var0 if variable_mapping_unn[ikey2][2] is None else var0[variable_mapping_unn[ikey2][2]]) + + for ikey2 in variable_mapping | variable_mapping_unn: + scan[ikey2].append(scan0[ikey2]) + + if float(ikey.split('_')[-1]) == 1.0: + self.scans[label]["positionBase"] = cont + cont += 1 + + self.scans[label]["x"] = np.array(self.rhos) + + for ikey2 in variable_mapping | variable_mapping_unn: + self.scans[label][ikey2] = np.atleast_2d(np.transpose(scan[ikey2])) + +def change_and_write_code( + rhos, + inputs0, + Folder_sim, + code_settings=None, + extraOptions={}, + multipliers={}, + minimum_delta_abs={}, + ApplyCorrections=True, + Quasineutral=False, + addControlFunction=None, + controls_file='input.tglf.controls', + **kwargs +): + """ + Received inputs classes and gives text. + ApplyCorrections refer to removing ions with too low density and that are fast species + """ + + inputs = copy.deepcopy(inputs0) + + mod_input_file = {} + ns_max = [] + for i, rho in enumerate(rhos): + print(f"\t- Changing input file for rho={rho:.4f}") + input_sim_rho = modifyInputs( + inputs[rho], + code_settings=code_settings, + extraOptions=extraOptions, + multipliers=multipliers, + minimum_delta_abs=minimum_delta_abs, + position_change=i, + addControlFunction=addControlFunction, + controls_file=controls_file, + NS=inputs[rho].num_recorded, + ) + + input_file = input_sim_rho.file.name.split('_')[0] + + newfile = Folder_sim / f"{input_file}_{rho:.4f}" + + if code_settings is not None: + # Apply corrections + if ApplyCorrections: + print("\t- Applying corrections") + input_sim_rho.removeLowDensitySpecie() + input_sim_rho.remove_fast() + + # Ensure that plasma to run is quasineutral + if Quasineutral: + input_sim_rho.ensureQuasineutrality() + else: + print('\t- Not applying corrections because settings is None') + + input_sim_rho.write_state(file=newfile) + + mod_input_file[rho] = input_sim_rho + + ns_max.append(inputs[rho].num_recorded) + + # Convert back to a string because that's how the run operates + inputFile = inputToVariable(Folder_sim, rhos, file=input_file) + + if (np.diff(ns_max) > 0).any(): + print("> Each radial location has its own number of species... probably because of removal of fast or low density...",typeMsg="w") + print("\t * Reading of simulation results will fail... consider doing something before launching run",typeMsg="q") + + return inputFile, mod_input_file + +def inputToVariable(folder, rhos, file='input.tglf'): + """ + Entire text file to variable + """ + + inputFilesTGLF = {} + for rho in rhos: + fileN = folder / f"{file}_{rho:.4f}" + + with open(fileN, "r") as f: + lines = f.readlines() + inputFilesTGLF[rho] = "".join(lines) + + return inputFilesTGLF + +def cold_start_checker( + rhos, + ResultsFiles, + Folder_sim, + cold_start=False, + print_each_time=False, +): + """ + This function checks if the TGLF inputs are already in the folder. If they are, it returns True + """ + cont_each = 0 + if cold_start: + rhosEvaluate = rhos + else: + rhosEvaluate = [] + for ir in rhos: + existsRho = True + for j in ResultsFiles: + ffi = Folder_sim / f"{j}_{ir:.4f}" + existsThis = ffi.exists() + existsRho = existsRho and existsThis + if not existsThis: + if print_each_time: + print(f"\t* {ffi} does not exist") + else: + cont_each += 1 + if not existsRho: + rhosEvaluate.append(ir) + + if not print_each_time and cont_each > 0: + print(f'\t* {cont_each} files from expected set are missing') + + if len(rhosEvaluate) < len(rhos) and len(rhosEvaluate) > 0: + print("~ Not all radii are found, but not removing folder and running only those that are needed",typeMsg="i",) + + return rhosEvaluate + +def modifyInputs( + input_class, + code_settings=None, + extraOptions=None, + multipliers=None, + minimum_delta_abs=None, + position_change=0, + addControlFunction=None, + controls_file = 'input.tglf.controls', + **kwargs_to_function, +): + + if extraOptions is None: + extraOptions = {} + if multipliers is None: + multipliers = {} + if minimum_delta_abs is None: + minimum_delta_abs = {} + + # Check that those are valid flags + GACODEdefaults.review_controls(extraOptions, control = controls_file) + GACODEdefaults.review_controls(multipliers, control = controls_file) + # ------------------------------------------- + + if code_settings is not None: + CodeOptions = addControlFunction(code_settings, **kwargs_to_function) + + # ~~~~~~~~~~ Change with presets + print(f" \t- Using presets code_settings = {code_settings}", typeMsg="i") + input_class.controls = CodeOptions + + else: + print("\t- Input file was not modified by code_settings, using what was there before",typeMsg="i") + + # Make all upper case + #extraOptions = {ikey.upper(): value for ikey, value in extraOptions.items()} + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Change with external options -> Input directly, not as multiplier + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if len(extraOptions) > 0: + print("\t- External options:") + for ikey in extraOptions: + if isinstance(extraOptions[ikey], (list, np.ndarray)): + value_to_change_to = extraOptions[ikey][position_change] + else: + value_to_change_to = extraOptions[ikey] + + try: + isspecie = ikey.split("_")[0] in input_class.species[1] + except: + isspecie = False + + # is a species parameter? + if isspecie: + specie = int(ikey.split("_")[-1]) + varK = "_".join(ikey.split("_")[:-1]) + var_orig = input_class.species[specie][varK] + var_new = value_to_change_to + input_class.species[specie][varK] = var_new + # is a another parameter? + else: + if ikey in input_class.controls: + var_orig = input_class.controls[ikey] + var_new = value_to_change_to + input_class.controls[ikey] = var_new + elif ikey in input_class.plasma: + var_orig = input_class.plasma[ikey] + var_new = value_to_change_to + input_class.plasma[ikey] = var_new + else: + # If the variable in extraOptions wasn't in there, consider it a control param + print(f"\t\t- Variable {ikey} to change did not exist previously, creating now",typeMsg="i") + var_orig = None + var_new = value_to_change_to + input_class.controls[ikey] = var_new + + print(f"\t\t- Changing {ikey} from {var_orig} to {var_new}",typeMsg="i",) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Change with multipliers -> Input directly, not as multiplier + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + if len(multipliers) > 0: + print("\t\t- Variables change:") + for ikey in multipliers: + + if isinstance(multipliers[ikey], (list, np.ndarray)): + value_to_change_to = multipliers[ikey][position_change] + else: + value_to_change_to = multipliers[ikey] + + # is a specie one? + if "species" in input_class.__dict__.keys() and ikey.split("_")[0] in input_class.species[1]: + specie = int(ikey.split("_")[-1]) + varK = "_".join(ikey.split("_")[:-1]) + var_orig = input_class.species[specie][varK] + var_new = multiplier_input(var_orig, value_to_change_to, minimum_delta_abs = minimum_delta_abs.get(ikey,None)) + input_class.species[specie][varK] = var_new + else: + if ikey in input_class.controls: + var_orig = input_class.controls[ikey] + var_new = multiplier_input(var_orig, value_to_change_to, minimum_delta_abs = minimum_delta_abs.get(ikey,None)) + input_class.controls[ikey] = var_new + + elif ikey in input_class.plasma: + var_orig = input_class.plasma[ikey] + var_new = multiplier_input(var_orig, value_to_change_to, minimum_delta_abs = minimum_delta_abs.get(ikey,None)) + input_class.plasma[ikey] = var_new + + else: + print("\t- Variable to scan did not exist in original file, add it as extraOptions first",typeMsg="w",) + + print(f"\t\t\t- Changing {ikey} from {var_orig} to {var_new} (x{value_to_change_to})") + + return input_class + +def multiplier_input(var_orig, multiplier, minimum_delta_abs = None): + + delta = var_orig * (multiplier - 1.0) + + if minimum_delta_abs is not None: + if (multiplier != 1.0) and abs(delta) < minimum_delta_abs: + print(f"\t\t\t- delta = {delta} is smaller than minimum_delta_abs = {minimum_delta_abs}, enforcing",typeMsg="i") + delta = np.sign(delta) * minimum_delta_abs + + return var_orig + delta + +def buildDictFromInput(inputFile): + parsed = {} + + lines = inputFile.split("\n") + for line in lines: + if "=" in line: + splits = [i.split()[0] for i in line.split("=")] + if ("." in splits[1]) and (splits[1][0].split()[0] != "."): + try: + parsed[splits[0].split()[0]] = float(splits[1].split()[0]) + continue + except: + pass + + try: + parsed[splits[0].split()[0]] = int(splits[1].split()[0]) + except: + parsed[splits[0].split()[0]] = splits[1].split()[0] + + for i in parsed: + if isinstance(parsed[i], str): + if ( + parsed[i].lower() == "t" + or parsed[i].lower() == "true" + or parsed[i].lower() == ".true." + ): + parsed[i] = True + elif ( + parsed[i].lower() == "f" + or parsed[i].lower() == "false" + or parsed[i].lower() == ".false." + ): + parsed[i] = False + + return parsed + +class GACODEoutput: + def __init__(self, *args, **kwargs): + self.inputFile = None + + def unnormalize(self, *args, **kwargs): + print("No unnormalization implemented.") + +class GACODEinput: + def __init__(self, file=None, controls_file=None, code='', n_species=None): + self.file = IOtools.expandPath(file) if isinstance(file, (str, Path)) else None + + self.controls_file = controls_file + self.code = code + self.n_species = n_species + + self.num_recorded = 100 + + if self.file is not None and self.file.exists(): + with open(self.file, "r") as f: + lines = f.readlines() + file_txt = "".join(lines) + else: + file_txt = "" + input_dict = buildDictFromInput(file_txt) + + self.process(input_dict) + + @classmethod + def initialize_in_memory(cls, input_dict): + instance = cls() + instance.process(input_dict) + return instance + + def process(self, input_dict): + + if self.controls_file is not None: + options_check = [key for key in IOtools.generateMITIMNamelist(self.controls_file, caseInsensitive=False).keys()] + else: + options_check = [] + + self.controls, self.plasma = {}, {} + for key in input_dict.keys(): + if key in options_check: + self.controls[key] = input_dict[key] + else: + self.plasma[key] = input_dict[key] + + # Get number of recorded species + if self.n_species is not None and self.n_species in input_dict: + self.num_recorded = int(input_dict[self.n_species]) + + def write_state(self, file=None): + + if file is None: + file = self.file + + # Local formatter: floats -> 6 significant figures in exponential (uppercase), + # ints stay as ints, bools as 0/1, sequences space-separated with same rule. + def _fmt_num(x): + import numpy as _np + if isinstance(x, (bool, _np.bool_)): + return "True" if x else "False" + if isinstance(x, (_np.floating, float)): + # 6 significant figures in exponential => 5 digits after decimal + return f"{float(x):.5E}" + if isinstance(x, (_np.integer, int)): + return f"{int(x)}" + return str(x) + + def _fmt_value(val): + import numpy as _np + if isinstance(val, (list, tuple, _np.ndarray)): + # Flatten numpy arrays but keep ordering; join with spaces + if isinstance(val, _np.ndarray): + flat = val.flatten().tolist() + else: + flat = list(val) + return " ".join(_fmt_num(v) for v in flat) + return _fmt_num(val) + + with open(file, "w") as f: + f.write("#-------------------------------------------------------------------------\n") + f.write(f"# {self.code} input file modified by MITIM {mitim_version}\n") + f.write("#-------------------------------------------------------------------------\n") + + f.write("\n\n# Control parameters\n") + f.write("# ------------------\n\n") + for ikey in self.controls: + var = self.controls[ikey] + f.write(f"{ikey.ljust(23)} = {_fmt_value(var)}\n") + + f.write("\n\n# Plasma/Geometry parameters\n") + f.write("# ------------------\n\n") + for ikey in self.plasma: + var = self.plasma[ikey] + f.write(f"{ikey.ljust(23)} = {_fmt_value(var)}\n") + + def anticipate_problems(self): + pass + + def remove_fast(self): + pass + + def removeLowDensitySpecie(self, *args): + pass + \ No newline at end of file diff --git a/src/mitim_tools/simulation_tools/physics/GXtools.py b/src/mitim_tools/simulation_tools/physics/GXtools.py new file mode 100644 index 00000000..8739b5c0 --- /dev/null +++ b/src/mitim_tools/simulation_tools/physics/GXtools.py @@ -0,0 +1,464 @@ +import netCDF4 +import matplotlib.pyplot as plt +from mitim_tools.misc_tools import GRAPHICStools, IOtools, GUItools, CONFIGread +from mitim_tools.gacode_tools.utils import GACODEdefaults, CGYROutils +from mitim_tools.simulation_tools import SIMtools +from mitim_tools.simulation_tools.utils import SIMplot +from mitim_tools.misc_tools.LOGtools import printMsg as print +from mitim_tools import __mitimroot__ +from mitim_tools import __version__ as mitim_version +from IPython import embed + +class GX(SIMtools.mitim_simulation, SIMplot.GKplotting): + def __init__( + self, + rhos=[0.4, 0.6], # rho locations of interest + ): + + super().__init__(rhos=rhos) + + def code_call(folder, n, p, additional_command="", **kwargs): + return f"gx -n {n} {folder}/gxplasma.in > {folder}/gxplasma.mitim.log" + + def code_slurm_settings(name, minutes, total_cores_required, cores_per_code_call, type_of_submission, raise_warning=True,array_list=None): + + slurm_settings = { + "name": name, + "minutes": minutes, + } + + # Gather if this is a GPU enabled machine + machineSettings = CONFIGread.machineSettings(code='gx') + + if machineSettings['gpus_per_node'] == 0: + if raise_warning: + raise Exception("[MITIM] GX needs GPUs to run, but the selected machine does not have any GPU configured. Please select another machine in the config file with gpus_per_node>0.") + else: + print("[MITIM] Warning: GX needs GPUs to run, but the selected machine does not have any GPU configured. Running without GPUs, but this will likely fail.", typeMsg="w") + + if type_of_submission == "slurm_standard": + + slurm_settings['ntasks'] = total_cores_required + slurm_settings['gpuspertask'] = 1 # Because of MPI, each task needs a GPU, and I'm passing cores_per_code_call per task + slurm_settings['job_array'] = None + + elif type_of_submission == "slurm_array": + + slurm_settings['ntasks'] = cores_per_code_call + slurm_settings['gpuspertask'] = 1 + slurm_settings['job_array'] = ",".join(array_list) + + return slurm_settings + + self.run_specifications = { + 'code': 'gx', + 'input_file': 'gxplasma.in', + 'code_call': code_call, + 'code_slurm_settings': code_slurm_settings, + 'control_function': GACODEdefaults.addGXcontrol, + 'controls_file': 'input.gx.controls', + 'state_converter': 'to_gx', + 'input_class': GXinput, + 'complete_variation': None, + 'default_cores': 4, # Default gpus to use in the simulation + 'output_class': GXoutput, + } + + print("\n-----------------------------------------------------------------------------------------") + print("\t\t\t GX class module") + print("-----------------------------------------------------------------------------------------\n") + + self.ResultsFiles_minimal = [ + 'gxplasma.out.nc' + ] + + self.ResultsFiles = self.ResultsFiles_minimal + [ + 'gxplasma.eik.out', + 'gxplasma.eiknc.nc', + 'gxplasma.gx_geo.log', + 'gxplasma.big.nc', + 'gxplasma.mitim.log', + 'gxplasma.restart.nc', + ] + + ''' + Redefined here so that I handle restart properly and + I can choose numerical setup based on plasma + ''' + def run( + self, + subfolder, + numerics_based_on_plasma = None, # A dictionary with the parameters to match + **kwargs_sim_run + ): + + # ------------------------------------ + # Check about restarts + # ------------------------------------ + + # Assume every template writes a restart file named "gxplasma.restart.nc" + # If extraOptions indicate not to write a restart, remove the file + if not kwargs_sim_run.get('extraOptions', {}).get('save_for_restart', True): + self.ResultsFiles.remove("gxplasma.restart.nc") + print("\t- Not saving restart file") + + # If the name has changed, update the results files list + if kwargs_sim_run.get('extraOptions', {}).get('restart_to_file', None) is not None: + restart_name = kwargs_sim_run['extraOptions']['restart_to_file'] + self.ResultsFiles.remove("gxplasma.restart.nc") + self.ResultsFiles.append(restart_name) + print(f"\t- Saving restart file as {restart_name}") + + # ------------------------------------ + # Add numerical setup based on plasma + # ------------------------------------ + if numerics_based_on_plasma is not None: + pass + #TODO + + # ------------------------------------ + # Run the super run + # ------------------------------------ + + super().run(subfolder, **kwargs_sim_run) + + def plot( + self, + fn=None, + labels=["gx1"], + extratitle="", + fn_color=None, + colors=None, + ): + + if fn is None: + self.fn = GUItools.FigureNotebook("GX MITIM Notebook", geometry="1700x900", vertical=True) + else: + self.fn = fn + + if colors is None: + colors = GRAPHICStools.listColors() + + # Fluxes + fig = self.fn.add_figure(label=f"{extratitle}Transport Fluxes", tab_color=fn_color) + + grid = plt.GridSpec(1, 3, hspace=0.7, wspace=0.2) + + ax1 = fig.add_subplot(grid[0, 0]) + ax2 = fig.add_subplot(grid[0, 1]) + ax3 = fig.add_subplot(grid[0, 2]) + + i = 0 + for label in labels: + for irho in range(len(self.rhos)): + c = self.results[label]['output'][irho] + + typeLs = '-' if c.t.shape[0]>20 else '-s' + + self._plot_trace(ax1,self.results[label]['output'][irho],"Qe",c=colors[i],lw=1.0,ls='-',label_plot=f"{label}, Total") + self._plot_trace(ax2,self.results[label]['output'][irho],"Qi",c=colors[i],lw=1.0,ls='-',label_plot=f"{label}, Total") + self._plot_trace(ax3,self.results[label]['output'][irho],"Ge",c=colors[i],lw=1.0,ls='-',label_plot=f"{label}, Total") + + i += 1 + + for ax in [ax1, ax2, ax3]: + ax.set_xlabel("Time ($L_{ref}/c_s$)") + ax.set_xlim(left=0) + GRAPHICStools.addDenseAxis(ax) + + ax1.set_title('Electron heat flux') + ax1.set_ylabel("Electron heat flux ($Q_e/Q_{GB}$)") + ax1.legend(loc='best', prop={'size': 12}) + + ax2.set_title('Ion heat flux') + ax2.set_ylabel("Ion heat flux ($Q_i/Q_{GB}$)") + ax2.legend(loc='best', prop={'size': 12}) + + ax3.set_title('Electron particle flux') + ax3.set_ylabel("Electron particle flux ($\\Gamma_e/\\Gamma_{GB}$)") + ax3.legend(loc='best', prop={'size': 12}) + + plt.tight_layout() + + + # Linear stability + fig = self.fn.add_figure(label=f"{extratitle}Linear Stability", tab_color=fn_color) + + grid = plt.GridSpec(2, 2, hspace=0.7, wspace=0.2) + + + ax1 = fig.add_subplot(grid[0, 0]) + ax2 = fig.add_subplot(grid[1, 0]) + + i = 0 + for label in labels: + for irho in range(len(self.rhos)): + c = self.results[label]['output'][irho] + + typeLs = '-' if c.t.shape[0]>20 else '-s' + + for iky in range(len(c.ky)): + ax1.plot(c.t, c.w[:, iky], typeLs, label=f"{label} rho={self.rhos[irho]} ky={c.ky[iky]}", color=colors[i]) + ax2.plot(c.t, c.g[:, iky], typeLs, label=f"{label} rho={self.rhos[irho]} ky={c.ky[iky]}", color=colors[i]) + i += 1 + + for ax in [ax1, ax2]: + ax.set_xlabel("Time ($L_{ref}/c_s$)") + ax.set_xlim(left=0) + GRAPHICStools.addDenseAxis(ax) + ax1.set_ylabel("Real frequency") + ax1.legend(loc='best', prop={'size': 4}) + ax2.set_ylabel("Growth rate") + + ax3 = fig.add_subplot(grid[0, 1]) + ax4 = fig.add_subplot(grid[1, 1]) + + i = 0 + for label in labels: + for irho in range(len(self.rhos)): + c = self.results[label]['output'][irho] + ax3.plot(c.ky, c.w[-1, :], '-s', markersize = 5, label=f"{label} rho={self.rhos[irho]}", color=colors[i]) + ax4.plot(c.ky, c.g[-1, :], '-s', markersize = 5, label=f"{label} rho={self.rhos[irho]}", color=colors[i]) + i += 1 + + for ax in [ax3, ax4]: + ax.set_xlabel("$k_\\theta\\rho_s$") + ax.set_xlim(left=0) + GRAPHICStools.addDenseAxis(ax) + + ax3.set_ylabel("Real frequency") + ax3.legend(loc='best', prop={'size': 12}) + ax3.axhline(y=0, color='k', linestyle='--', linewidth=1) + ax4.set_ylabel("Growth rate") + ax4.set_ylim(bottom=0) + + plt.tight_layout() + + +class GXinput(SIMtools.GACODEinput): + def __init__(self, file=None): + super().__init__( + file=file, + controls_file= __mitimroot__ / "templates" / "input.gx.controls", + code='GX', + n_species='nspecies' + ) + + # GX has a very particular way to write its state + def write_state(self, file=None): + + if file is None: + file = self.file + + + with open(file, "w") as f: + f.write("#-------------------------------------------------------------------------\n") + f.write(f"# {self.code} input file modified by MITIM {mitim_version}\n") + f.write("#-------------------------------------------------------------------------\n") + + # title: [controls], [plasma] + blocks = { + '': + [ ['debug'], [] ], + '[Dimensions]': + [ ['ntheta', 'nperiod', 'ny', 'nx', 'nhermite', 'nlaguerre'], ['nspecies'] ], + '[Domain]': + [ ['y0', 'boundary'], [] ], + '[Physics]': + [ ['nonlinear_mode', 'ei_colls'], ['beta'] ], + '[Time]': + [ ['t_max', 'scheme', 'dt', 'nstep'], [] ], + '[Initialization]': + [ ['ikpar_init', 'init_field', 'init_amp', 'gaussian_init'], [] ], + '[Geometry]': + [ + ['geo_option'], + ['rhoc', 'Rmaj', 'R_geo', 'shift', 'qinp', 'shat', 'akappa', 'akappri', 'tri', 'tripri', 'betaprim'] + ], + '[Dissipation]': + [ ['closure_model', 'hypercollisions', 'nu_hyper_m', 'p_hyper_m', 'nu_hyper_l', 'p_hyper_l', 'hyper', 'D_hyper', 'p_hyper', 'D_H', 'w_osc', 'p_HB', 'HB_hyper'], [] ], + '[Restart]': + [ ['save_for_restart', 'nsave','restart_to_file', 'restart', 'restart_from_file'], [] ], + '[Diagnostics]': + [ ['nwrite', 'omega', 'fluxes', 'fields', 'moments'], [] ] + } + + param_written = [] + for block_name, params in blocks.items(): + param_written = self._write_block(f, f"{block_name}", params, param_written) + + param_written = self._write_block_species(f, param_written) + + # Check that parameters were all considerd in the blocks + for param in self.controls | self.plasma: + if param not in param_written: + print(f"Warning: {param} not written to file", typeMsg="q") + + def _write_block(self,f,name, param, param_written): + + # Local formatter: floats -> 6 significant figures in exponential (uppercase), + # ints stay as ints, bools as 0/1, sequences space-separated with same rule. + def _fmt_num(x): + import numpy as _np + if isinstance(x, (bool, _np.bool_)): + return "true" if x else "false" + if isinstance(x, (_np.floating, float)): + # 6 significant figures in exponential => 5 digits after decimal + return f"{float(x):.5E}" + if isinstance(x, (_np.integer, int)): + return f"{int(x)}" + return str(x) + + def _fmt_value(val): + import numpy as _np + if isinstance(val, (list, tuple, _np.ndarray)): + # Flatten numpy arrays but keep ordering; join with spaces + if isinstance(val, _np.ndarray): + flat = val.flatten().tolist() + else: + flat = list(val) + return " ".join(_fmt_num(v) for v in flat) + return _fmt_num(val) + + f.write(f'{name}\n') + for p in param[0]: + if p in self.controls: + if self.controls[p] is not None: + f.write(f" {p.ljust(23)} = {_fmt_value(self.controls[p])}\n") + param_written.append(p) + for p in param[1]: + if p in self.plasma: + if self.plasma[p] is not None: + f.write(f" {p.ljust(23)} = {_fmt_value(self.plasma[p])}\n") + param_written.append(p) + f.write(f'\n') + + return param_written + + def _write_block_species(self, f, param_written): + + # Local formatter: floats -> 6 significant figures in exponential (uppercase), + # ints stay as ints, bools as 0/1, sequences space-separated with same rule. + def _fmt_num(x): + import numpy as _np + if isinstance(x, (bool, _np.bool_)): + return "true" if x else "false" + if isinstance(x, (_np.floating, float)): + # 6 significant figures in exponential => 5 digits after decimal + return f"{float(x):.5E}" + if isinstance(x, (_np.integer, int)): + return f"{int(x)}" + return str(x) + + def _fmt_value(val): + import numpy as _np + if isinstance(val, (list, tuple, _np.ndarray)): + # Flatten numpy arrays but keep ordering; join with spaces + if isinstance(val, _np.ndarray): + flat = val.flatten().tolist() + else: + flat = list(val) + return " ".join(_fmt_num(v) for v in flat) + return _fmt_num(val) + + self.num_recorded = 0 + for i in range(1000): + if f"z_{i+1}" in self.plasma: + self.num_recorded += 1 + else: + break + + z, dens, temp, mass, fprim, tprim, vnewk, typeS = '[ ', '[ ', '[ ', '[ ', '[ ', '[ ', '[ ', '[ ' + for i in range(self.num_recorded): + typeS += f'"{_fmt_value(self.plasma[f"type_{i+1}"])}", ' + z += f'{_fmt_value(self.plasma[f"z_{i+1}"])}, ' + mass += f'{_fmt_value(self.plasma[f"mass_{i+1}"])}, ' + dens += f'{_fmt_value(self.plasma[f"dens_{i+1}"])}, ' + temp += f'{_fmt_value(self.plasma[f"temp_{i+1}"])}, ' + fprim += f'{_fmt_value(self.plasma[f"fprim_{i+1}"])}, ' + tprim += f'{_fmt_value(self.plasma[f"tprim_{i+1}"])}, ' + vnewk += f'{_fmt_value(self.plasma[f"vnewk_{i+1}"])}, ' + + param_written.append(f"type_{i+1}") + param_written.append(f"z_{i+1}") + param_written.append(f"mass_{i+1}") + param_written.append(f"dens_{i+1}") + param_written.append(f"temp_{i+1}") + param_written.append(f"fprim_{i+1}") + param_written.append(f"tprim_{i+1}") + param_written.append(f"vnewk_{i+1}") + + f.write("[species]\n") + f.write(f" type = {typeS[:-4]} ]\n") + f.write(f" z = {z[:-4]} ]\n") + f.write(f" mass = {mass[:-4]} ]\n") + f.write(f" dens = {dens[:-4]} ]\n") + f.write(f" temp = {temp[:-4]} ]\n") + f.write(f" fprim = {fprim[:-4]} ]\n") + f.write(f" tprim = {tprim[:-4]} ]\n") + f.write(f" vnewk = {vnewk[:-4]} ]\n") + + f.write("\n") + + return param_written + +class GXoutput(SIMtools.GACODEoutput): + def __init__(self, FolderGACODE, suffix="", tmin = 0.0, **kwargs): + super().__init__() + + self.FolderGACODE, self.suffix = FolderGACODE, suffix + + self.tmin = tmin + + if suffix == "": + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} without suffix") + else: + print(f"\t- Reading results from folder {IOtools.clipstr(FolderGACODE)} with suffix {suffix}") + + self.inputclass = GXinput(file=self.FolderGACODE / f"gxplasma.in{self.suffix}") + + self.read() + + def read(self): + + data = netCDF4.Dataset(self.FolderGACODE / f"gxplasma.out.nc{self.suffix}") + + self.t = data.groups['Grids'].variables['time'][:] # (time) + + # Growth rates + ikx = 0 + self.ky = data.groups['Grids'].variables['ky'][1:] # (ky) + self.w = data.groups['Diagnostics'].variables['omega_kxkyt'][:,1:,ikx,0] # (time, ky) + self.g = data.groups['Diagnostics'].variables['omega_kxkyt'][:,1:,ikx,1] # (time, ky) + + # Fluxes + Q = data.groups['Diagnostics'].variables['HeatFlux_st'] # (time, species) + G = data.groups['Diagnostics'].variables['ParticleFlux_st'] # (time, species) + + # Assume electrons are always last + self.Qe = Q[:,-1] + self.QiAll = Q[:,:-1] + self.Qi = self.QiAll.sum(axis=1) + self.Ge = G[:,-1] + self.GiAll = G[:,:-1] + self.Gi = self.GiAll.sum(axis=1) + + self._signal_analysis() + + def _signal_analysis(self): + + flags = [ + 'Qe', + 'Qi', + 'Ge', + ] + + for iflag in flags: + self.__dict__[iflag+'_mean'], self.__dict__[iflag+'_std'] = CGYROutils.apply_ac( + self.t, + self.__dict__[iflag], + tmin=self.tmin, + label_print=iflag, + print_msg=True, + ) + diff --git a/src/mitim_tools/simulation_tools/utils/SIMplot.py b/src/mitim_tools/simulation_tools/utils/SIMplot.py new file mode 100644 index 00000000..b8830a36 --- /dev/null +++ b/src/mitim_tools/simulation_tools/utils/SIMplot.py @@ -0,0 +1,53 @@ +from mitim_tools.misc_tools import GRAPHICStools +from mitim_tools.misc_tools.LOGtools import printMsg as print +from IPython import embed + +class GKplotting: + def _plot_trace(self, ax, object_or_label, variable, c="b", lw=1, ls="-", label_plot='', meanstd=True, var_meanstd= None): + + if isinstance(object_or_label, str): + object_grab = self.results[object_or_label] + else: + object_grab = object_or_label + + t = object_grab.t + + if not isinstance(variable, str): + z = variable + if var_meanstd is not None: + z_mean = var_meanstd[0] + z_std = var_meanstd[1] + + else: + z = object_grab.__dict__[variable] + if meanstd and (f'{variable}_mean' in object_grab.__dict__): + z_mean = object_grab.__dict__[variable + '_mean'] + z_std = object_grab.__dict__[variable + '_std'] + else: + z_mean = None + z_std = None + + ax.plot( + t, + z, + ls=ls, + lw=lw, + c=c, + label=label_plot, + ) + + if meanstd and z_std>0.0: + GRAPHICStools.fillGraph( + ax, + t[t>object_grab.tmin], + z_mean, + y_down=z_mean + - z_std, + y_up=z_mean + + z_std, + alpha=0.1, + color=c, + lw=0.5, + islwOnlyMean=True, + label=label_plot + f" $\\mathbf{{{z_mean:.3f} \\pm {z_std:.3f}}}$ (1$\\sigma$)", + ) \ No newline at end of file diff --git a/src/mitim_tools/transp_tools/CDFtools.py b/src/mitim_tools/transp_tools/CDFtools.py index fcb1745d..27e69d5d 100644 --- a/src/mitim_tools/transp_tools/CDFtools.py +++ b/src/mitim_tools/transp_tools/CDFtools.py @@ -130,7 +130,7 @@ def __init__( # Capability to provide folder and just find the CDF in there if self.LocationCDF.is_dir(): - self.LocationCDF = IOtools.findFileByExtension(self.LocationCDF, ".CDF", agnostic_to_case=True) + self.LocationCDF = IOtools.findFileByExtension(self.LocationCDF, ".CDF", agnostic_to_case=True,do_not_consider_files=['PH.CDF']) if self.LocationCDF is None: raise ValueError(f"[MITIM] Could not find a CDF file in {self.LocationCDF}") # ---------------------------- @@ -513,7 +513,7 @@ def evaluateReactorMetrics(self, ReactorTextFile=None, EvaluateExtraAnalysis=Non """ # ****** Settings ********* - TGLFsettings = 5 + code_settings = 5 d_perp_cm = {0.7: 0.757 / np.sqrt(2) / (np.cos(11 * (np.pi / 180)))} # ************************* @@ -540,7 +540,7 @@ def evaluateReactorMetrics(self, ReactorTextFile=None, EvaluateExtraAnalysis=Non quantity=quantity, rho=location, time=self.t[index], - TGLFsettings=TGLFsettings, + code_settings=code_settings, d_perp_cm=d_perp_cm, ) success = success and True @@ -7682,12 +7682,14 @@ def plotICRF_t(self, fig=None): if fig is None: fig = plt.figure() - grid = plt.GridSpec(2, 2, hspace=0.3, wspace=0.2) + grid = plt.GridSpec(2, 3, hspace=0.3, wspace=0.4) ax1 = fig.add_subplot(grid[0, 0]) ax2 = fig.add_subplot(grid[0, 1], sharex=ax1) ax3 = fig.add_subplot(grid[1, 0], sharex=ax1) ax4 = fig.add_subplot(grid[1, 1], sharex=ax1) + ax5 = fig.add_subplot(grid[0, 2], sharex=ax1) + ax6 = fig.add_subplot(grid[1, 2], sharex=ax1) # ELECTRONS ax = ax1 @@ -7774,10 +7776,10 @@ def plotICRF_t(self, fig=None): # ax.plot(self.x_lw,tot,lw=3,label='$P_{ICH}$') ax.plot(self.t, self.PichT, c="r", lw=3, label="$P_{ICH}$") - ax.plot(self.t, self.PichT_min, lw=2, label="$P_{ICH->min}$") ax.plot(self.t, self.PiichT_dir, lw=2, label="$P_{ICH->i}$") ax.plot(self.t, self.PeichT_dir, lw=2, label="$P_{ICH->e}$") ax.plot(self.t, self.PfichT_dir, lw=2, label="$P_{ICH->fast}$") + ax.plot(self.t, self.PichT_min, lw=2, label="$P_{ICH->min}$") ax.plot(self.t, self.PichT_check, lw=2, ls="--", c="y", label="check (sum)") ax.set_title("Total Balance") @@ -7788,6 +7790,26 @@ def plotICRF_t(self, fig=None): GRAPHICStools.addLegendApart(ax, ratio=0.85, size=self.mainLegendSize) GRAPHICStools.addDenseAxis(ax) + # TOTAL + ax = ax6 + + ax.plot(self.t, self.PichT, c="r", lw=3, label="$P_{ICH}$") + ax.plot(self.t, self.PiichT, lw=2, label="$P_{ICH,i}$") + ax.plot(self.t, self.PeichT, lw=2, label="$P_{ICH,e}$") + ax.plot(self.t, self.PfichT_dir, lw=2, label="$P_{ICH,fast}$") + ax.plot(self.t, self.GainminT, lw=2, label="$dW_{min}/dt$") + P = self.PeichT + self.PiichT + self.PfichT_dir + self.GainminT + ax.plot(self.t, P, lw=2, ls="--", c="y", label="check (sum)") + + ax.set_title("Total Balance (after thermalization)") + ax.set_ylabel("Power (MW)") + ax.set_xlabel("Time (s)") + ax.set_ylim(bottom=0) + + GRAPHICStools.addLegendApart(ax, ratio=0.85, size=self.mainLegendSize) + GRAPHICStools.addDenseAxis(ax) + + def plotRelevantResonances(self, ax, Fich, time=None, legendYN=False, lw=3): if time is None: i1 = self.ind_saw @@ -8195,13 +8217,11 @@ def plotSeparateSystems(self, fig=None): if np.sum(self.PichT) > 1.0e-5: for i in range(len(self.PichT_ant)): ax.plot(self.t, self.PichT_ant[i], lw=2, label=f"{i + 1}") - ax.plot( - self.t, self.PeichT + self.PiichT, c="y", ls="--", label="to species" - ) + ax.plot(self.t, self.PeichT + self.PiichT + self.PfichT_dir + self.GainminT, c="y", ls="--", label="to species (e+i+f+dWmin/dt)") timeb = 0.25 it1 = np.argmin(np.abs(self.t - (self.t[-1] - timeb))) - mean = np.mean(self.PeichT[it1:] + self.PiichT[it1:]) + mean = np.mean(self.PeichT[it1:] + self.PiichT[it1:] + self.PfichT_dir[it1:] + self.GainminT[it1:]) ax.axhline( y=mean, alpha=0.5, @@ -8213,7 +8233,7 @@ def plotSeparateSystems(self, fig=None): ax.plot( self.t, - self.PeichT + self.PiichT + self.PichTOrbLoss, + self.PeichT + self.PiichT + self.PfichT_dir + self.GainminT + self.PichTOrbLoss, c="c", ls="--", label="+ orb losses", @@ -8462,7 +8482,7 @@ def plotHeating(self, fig=None): if fig is None: fig = plt.figure() - grid = plt.GridSpec(nrows=2, ncols=4, hspace=0.3, wspace=0.2) + grid = plt.GridSpec(nrows=2, ncols=4, hspace=0.3, wspace=0.4) ax1 = fig.add_subplot(grid[0, 0]) ax2 = fig.add_subplot(grid[0, 1]) @@ -8478,9 +8498,7 @@ def plotHeating(self, fig=None): ax1.plot(self.t, self.PichT, "r", ls="-", lw=2, label="$P_{ICH}$") ax1.plot(self.t, self.PeichT, "b", ls="-", lw=1, label="$P_{ICH,e}$") ax1.plot(self.t, self.PiichT, "g", ls="-", lw=1, label="$P_{ICH,i}$") - ax1.plot( - self.t, self.PeichT + self.PiichT, "y", ls="--", lw=1, label="$P_{ICH,e+i}$" - ) + ax1.plot(self.t, self.PeichT + self.PiichT, "y", ls="--", lw=1, label="$P_{ICH,e+i}$") ax1.plot(self.t, self.PichT_min, "r", ls="--", lw=1, label="$P_{min}$") ax1.plot(self.t, self.PeichT_dir, "r", ls="-.", lw=1, label="$P_{dir,e}$") @@ -14023,13 +14041,13 @@ def runTGLFstandalone( cold_startPreparation=False, plotCompare=True, extraflag="", - onlyThermal_TGYRO=False, + remove_fast=False, forceIfcold_start=True, **kwargs_TGLFrun, ): """ Note: If this plasma had fast paricles but not at the time I'm running TGLF, then it will fail if I - set onlyThermal_TGYRO=False because at that time the particles are zero + set remove_fast=False because at that time the particles are zero """ if time is None: @@ -14060,7 +14078,7 @@ def runTGLFstandalone( cdf = self.TGLFstd[nameF].prep( folderGACODE, cold_start=cold_startPreparation, - onlyThermal_TGYRO=onlyThermal_TGYRO, + remove_fast=remove_fast, cdf_open=self, forceIfcold_start=forceIfcold_start, ) @@ -14068,7 +14086,7 @@ def runTGLFstandalone( labelTGLF = kwargs_TGLFrun.get("label", "tglf1") self.TGLFstd[nameF].run( - subFolderTGLF=labelTGLF, + subfolder=labelTGLF, forceIfcold_start=forceIfcold_start, **kwargs_TGLFrun, ) @@ -14104,7 +14122,7 @@ def transportAnalysis( rho=0.50, time=None, avTime=0.0, - TGLFsettings=1, + code_settings=1, d_perp_cm=None, ): if time is None: @@ -14126,10 +14144,10 @@ def transportAnalysis( if typeAnalysis == "CHIPERT": self.TGLFstd[int(time * 1000)].runAnalysis( - subFolderTGLF="chi_per", + subfolder="chi_per", label="chi_pert", analysisType="e", - TGLFsettings=TGLFsettings, + code_settings=code_settings, ) value = self.TGLFstd[int(time * 1000)].scans["chi_pert"]["chi_inc"][0] @@ -14139,10 +14157,10 @@ def transportAnalysis( addTrace = [40, 173] self.TGLFstd[int(time * 1000)].runAnalysis( - subFolderTGLF="impurity", + subfolder="impurity", label="impurity", analysisType="Z", - TGLFsettings=TGLFsettings, + code_settings=code_settings, trace=addTrace, ) @@ -14155,8 +14173,8 @@ def transportAnalysis( if "FLUC" in typeAnalysis: self.TGLFstd[int(time * 1000)].run( - subFolderTGLF="fluctuations", - TGLFsettings=TGLFsettings, + subfolder="fluctuations", + code_settings=code_settings, forceIfcold_start=True, ) @@ -14204,13 +14222,13 @@ def plotStdTRANSP(self, tglfRun="tglf1", fig=None, label="", time=None, leg=True TGLFstd_x = self.TGLFstd[int(time * 1000)].results[tglfRun]["x"] TGLFstd_Qe, TGLFstd_Qi = [], [] for i in range( - len(self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"]) + len(self.TGLFstd[int(time * 1000)].results[tglfRun]["output"]) ): TGLFstd_Qe.append( - self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"][i].Qe_unn + self.TGLFstd[int(time * 1000)].results[tglfRun]["output"][i].Qe_unn ) TGLFstd_Qi.append( - self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"][i].Qi_unn + self.TGLFstd[int(time * 1000)].results[tglfRun]["output"][i].Qi_unn ) TGLFstd_Qe, TGLFstd_Qi = np.array(TGLFstd_Qe), np.array(TGLFstd_Qi) @@ -14343,16 +14361,16 @@ def plotGRTRANSP(self, tglfRun="tglf1", fig=None, label="", time=None): else: TGLFstd_ky, TGLFstd_gamma, TGLFstd_freq = [], [], [] for i in range( - len(self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"]) + len(self.TGLFstd[int(time * 1000)].results[tglfRun]["output"]) ): TGLFstd_ky.append( - self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"][i].ky + self.TGLFstd[int(time * 1000)].results[tglfRun]["output"][i].ky ) TGLFstd_gamma.append( - self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"][i].g[0] + self.TGLFstd[int(time * 1000)].results[tglfRun]["output"][i].g[0] ) TGLFstd_freq.append( - self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"][i].f[0] + self.TGLFstd[int(time * 1000)].results[tglfRun]["output"][i].f[0] ) TGLFstd_ky, TGLFstd_gamma, TGLFstd_freq = ( @@ -14500,19 +14518,19 @@ def plotFLTRANSP(self, tglfRun="tglf1", fig=None, label="", time=None): else: TGLFstd_ky, TGLFstd_te, TGLFstd_ne = [], [], [] for i in range( - len(self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"]) + len(self.TGLFstd[int(time * 1000)].results[tglfRun]["output"]) ): TGLFstd_ky.append( - self.TGLFstd[int(time * 1000)].results[tglfRun]["TGLFout"][i].ky + self.TGLFstd[int(time * 1000)].results[tglfRun]["output"][i].ky ) TGLFstd_te.append( self.TGLFstd[int(time * 1000)] - .results[tglfRun]["TGLFout"][i] + .results[tglfRun]["output"][i] .AmplitudeSpectrum_Te ) TGLFstd_ne.append( self.TGLFstd[int(time * 1000)] - .results[tglfRun]["TGLFout"][i] + .results[tglfRun]["output"][i] .AmplitudeSpectrum_ne ) @@ -14658,7 +14676,7 @@ def compareChiPert( time=None, rhoRange=[0.4, 0.8], timeRange=0.5, - TGLFsettings=1, + code_settings=1, cold_start=False, plotYN=True, ): @@ -14679,10 +14697,10 @@ def compareChiPert( self.ChiPert_tglf = TGLFtools.TGLF(cdf=self.LocationCDF, time=time, rhos=rhos) self.ChiPert_tglf.prep(self.FolderCDF / "chi_per_calc", cold_start=cold_start) self.ChiPert_tglf.runAnalysis( - subFolderTGLF="chi_per", + subfolder="chi_per", label="chi_pert", analysisType="e", - TGLFsettings=TGLFsettings, + code_settings=code_settings, cold_start=cold_start, cdf_open=self, ) @@ -15425,7 +15443,7 @@ def grid_interpolation_method_to_zero(x,y): for key in ['ne(10^19/m^3)', 'ni(10^19/m^3)', 'te(keV)', 'ti(keV)', 'rmin(m)']: profiles[key] = profiles[key].clip(min=minimum) - p = PROFILEStools.PROFILES_GACODE.scratch(profiles) + p = PROFILEStools.gacode_state.scratch(profiles) return p diff --git a/src/mitim_tools/transp_tools/NMLtools.py b/src/mitim_tools/transp_tools/NMLtools.py index 0b8663d1..94cb966b 100644 --- a/src/mitim_tools/transp_tools/NMLtools.py +++ b/src/mitim_tools/transp_tools/NMLtools.py @@ -180,7 +180,7 @@ def _default_params(self,**transp_params): self.grTGLF = transp_params.get("grTGLF",False) self.Te_edge = transp_params.get("Te_edge",80.0) self.Ti_edge = transp_params.get("Ti_edge",80.0) - self.TGLFsettings = transp_params.get("TGLFsettings",5) + self.code_settings = transp_params.get("code_settings",5) def populate(self, **transp_params): @@ -1335,10 +1335,8 @@ def addGLF23(self): self.contents_ptr_glf23 = "\n".join(lines) + "\n" def addTGLF(self): - TGLFoptions, label = GACODEdefaults.TGLFinTRANSP(self.TGLFsettings) - print( - f"\t- Adding TGLF control parameters with TGLFsettings = {self.TGLFsettings} ({label})" - ) + TGLFoptions = GACODEdefaults.TGLFinTRANSP(self.code_settings) + print(f"\t- Adding TGLF control parameters with code_settings = {self.code_settings}") lines = [ "!------ TGLF namelist", diff --git a/src/mitim_tools/transp_tools/src/TRANSPsingularity.py b/src/mitim_tools/transp_tools/src/TRANSPsingularity.py index 7a182af3..44906133 100644 --- a/src/mitim_tools/transp_tools/src/TRANSPsingularity.py +++ b/src/mitim_tools/transp_tools/src/TRANSPsingularity.py @@ -445,6 +445,9 @@ def runSINGULARITY( shellPreCommands=shellPreCommands, ) + if 'exclusive' not in transp_job.machineSettings["slurm"] or not transp_job.machineSettings["slurm"]["exclusive"]: + print("\tTRANSP typically requires exclusive node allocation, but that has not been requested, prone to failure", typeMsg="w") + transp_job.run(waitYN=False) IOtools.shutil_rmtree(folderWork / 'tmp_inputs') @@ -472,7 +475,7 @@ def interpretRun(infoSLURM, log_file): Case is not running (finished or failed) """ - if "TERMINATE THE RUN (NORMAL EXIT)" in "\n".join(log_file): + if "TERMINATE THE RUN (NORMAL EXIT)" in "\n".join(log_file) or "Finished TRANSP run app." in "\n".join(log_file): status = 1 info["info"]["status"] = "finished" elif ("Error termination" in "\n".join(log_file)) or ( @@ -489,10 +492,7 @@ def interpretRun(infoSLURM, log_file): status = -1 info["info"]["status"] = "stopped" else: - print( - "\t- No error nor termination found, assuming it is still running", - typeMsg="w", - ) + print("\t- No error nor termination found, assuming it is still running",typeMsg="w",) pringLogTail(log_file, typeMsg="i") status = 0 info["info"]["status"] = "running" @@ -517,6 +517,7 @@ def pringLogTail(log_file, howmanylines=100, typeMsg="w"): print(txt, typeMsg=typeMsg) def runSINGULARITY_finish(folderWork, runid, tok, job_name): + transp_job = FARMINGtools.mitim_job(folderWork) transp_job.define_machine( @@ -568,7 +569,7 @@ def runSINGULARITY_finish(folderWork, runid, tok, job_name): if item.is_file(): shutil.copy2(item, folderWork) elif item.is_dir(): - shutil.copytree(item, folderWork / item.name) + shutil.copytree(item, folderWork / item.name, dirs_exist_ok=True) def runSINGULARITY_look(folderWork, folderTRANSP, runid, job_name, times_retry_look = 3): diff --git a/src/mitim_tools/transp_tools/utils/PLASMASTATEtools.py b/src/mitim_tools/transp_tools/utils/NTCCtools.py similarity index 96% rename from src/mitim_tools/transp_tools/utils/PLASMASTATEtools.py rename to src/mitim_tools/transp_tools/utils/NTCCtools.py index af29a8ca..2c9da34a 100644 --- a/src/mitim_tools/transp_tools/utils/PLASMASTATEtools.py +++ b/src/mitim_tools/transp_tools/utils/NTCCtools.py @@ -1,15 +1,9 @@ import copy import numpy as np +import matplotlib.pyplot as plt from collections import OrderedDict - -try: - import xarray as xr -except: - pass -try: - from IPython import embed -except: - pass +import xarray as xr +from IPython import embed """ This set of routines is used to create a Plasmastate class by reading a standard @@ -54,18 +48,14 @@ def modify_default( print(f"\t- Modifying {self.CDFfile} Plasmastate file...") - self.lumpChargeStates(self.CDFfile_new + "_1") + self.lumpChargeStates( self.CDFfile_new.with_name(self.CDFfile_new.name + '_1')) + try: - self.removeExtraFusionIons( - self.CDFfile_new + "_2", speciesNames=RemoveFusionIons - ) + self.removeExtraFusionIons( self.CDFfile_new.with_name(self.CDFfile_new.name + '_2'), speciesNames=RemoveFusionIons) except: - print( - " --> I could not remove extra fusion ions. Probably because TRANSP was run without fusion reactions", - ) - self.removeExtraTHERMALIons( - self.CDFfile_new + "_3", speciesNames=RemoveTHERMALIons - ) + print(" --> I could not remove extra fusion ions. Probably because TRANSP was run without fusion reactions",) + + self.removeExtraTHERMALIons( self.CDFfile_new.with_name(self.CDFfile_new.name + '_3'), speciesNames=RemoveTHERMALIons) self.addShotNumber(self.CDFfile_new, shotNumber) @@ -129,9 +119,7 @@ def lumpChargeStates(self, fileNew): self.plasma = xr.Dataset(NewData) else: - print( - f"Charge States for impurity {impName} could not be found...", - ) + print(f"Charge States for impurity {impName} could not be found...",) # ------------------------------------------------------------------ # Writting New PLASMASTATE diff --git a/src/mitim_tools/transp_tools/utils/TRANSPhelpers.py b/src/mitim_tools/transp_tools/utils/TRANSPhelpers.py index 0b78795e..a0f46799 100644 --- a/src/mitim_tools/transp_tools/utils/TRANSPhelpers.py +++ b/src/mitim_tools/transp_tools/utils/TRANSPhelpers.py @@ -1,10 +1,8 @@ -import os import shutil import numpy as np import matplotlib.pyplot as plt from mitim_tools.transp_tools import TRANSPtools, CDFtools, UFILEStools, NMLtools from mitim_tools.gs_tools import GEQtools -from mitim_tools.gacode_tools import PROFILEStools from mitim_tools.misc_tools import IOtools, MATHtools, PLASMAtools, GRAPHICStools, FARMINGtools from mitim_tools.misc_tools.LOGtools import printMsg as print from IPython import embed @@ -762,7 +760,8 @@ def from_profiles(self, time, profiles_file, Vsurf = 0.0): self.time = time if isinstance(profiles_file, str): - self.p = PROFILEStools.PROFILES_GACODE(profiles_file) + from mitim_tools.gacode_tools import PROFILEStools + self.p = PROFILEStools.gacode_state(profiles_file) else: self.p = profiles_file @@ -809,7 +808,7 @@ def _produce_quantity_profiles(self, var = 'Te', Vsurf = None): z = self.p.derived['Zeff'] elif var == 'PichT': x = [1] - z = self.p.derived['qRF_MWmiller'][-1]*1E6 + z = self.p.derived['qRF_MW'][-1]*1E6 return x,z @@ -821,7 +820,7 @@ def _produce_geometry_profiles(self): # Separatrix # -------------------------------------------------------------- - self.geometry['R_sep'], self.geometry['Z_sep'] = self.p.derived["R_surface"][-1], self.p.derived["Z_surface"][-1] + self.geometry['R_sep'], self.geometry['Z_sep'] = self.p.derived["R_surface"][0,-1,:], self.p.derived["Z_surface"][0,-1,:] # -------------------------------------------------------------- # VV @@ -1356,7 +1355,7 @@ def default_nml( "Fuel": 2.5, }, pservers=[1, 1, 0], - TGLFsettings=5, + code_settings=5, useMMX = False, isolver = False, grTGLF = False # Disable by default because it takes disk space and time... enable for 2nd preditive outside of this routine @@ -1415,7 +1414,7 @@ def default_nml( AddHe4ifDT=AddHe4ifDT, isolver=isolver, PTsolver=True, - TGLFsettings=TGLFsettings, + code_settings=code_settings, grTGLF=grTGLF, **transp_params ) diff --git a/templates/config_user_example.json b/templates/config_user_example.json index 0aadcac0..6dd6f325 100644 --- a/templates/config_user_example.json +++ b/templates/config_user_example.json @@ -6,21 +6,37 @@ "profiles_gen": "engaging", "tgyro": "engaging", "tglf": "engaging", + "neo": "engaging", "cgyro": "engaging", + "gx": "engaging", "astra": "engaging", "eq": "mfews", "scruncher": "mfews", "ntcc": "mfews", "get_fbm": "mfews", "transp": "globus", - "idl": "mfews" + "idl": "mfews", + "eped": "engaging" }, "local": { "machine": "local", "username": "exampleusername", "scratch": "/Users/exampleusername/scratch/", "modules": "", - "cores_per_node": 8 + "cores_per_node": 8, + "gpus_per_node": 0 + }, + "engaging": { + "machine": "orcd-login001.mit.edu", + "username": "exampleusername", + "slurm": { + "partition": "sched_mit_psfc", + "exclusive": false + }, + "scratch": "/orcd/pool/003/exampleusername/scratch/", + "modules": "", + "cores_per_node": 64, + "gpus_per_node": 0 }, "perlmutter": { "machine": "perlmutter.nersc.gov", @@ -35,18 +51,8 @@ "exclusive": false, "email": "optional@email" }, - "cores_per_node": 32 - }, - "engaging": { - "machine": "eofe7.mit.edu", - "username": "exampleusername", - "slurm": { - "partition": "sched_mit_psfc", - "exclusive": true - }, - "scratch": "/nobackup1/exampleusername/", - "modules": "", - "cores_per_node": 32 + "cores_per_node": 32, + "gpus_per_node": 0 }, "mfews": { "machine": "mfews02.psfc.mit.edu", @@ -54,7 +60,8 @@ "port": 9224, "scratch": "/home/exampleusername/scratch/", "modules": "", - "cores_per_node": 8 + "cores_per_node": 8, + "gpus_per_node": 0 }, "globus": { "username": "exampleusername", @@ -75,6 +82,7 @@ "email": "optional@email", "exclude": "node584" }, - "cores_per_node": 8 + "cores_per_node": 8, + "gpus_per_node": 0 } } \ No newline at end of file diff --git a/templates/input.cgyro.controls b/templates/input.cgyro.controls index 220481f8..47306e1f 100644 --- a/templates/input.cgyro.controls +++ b/templates/input.cgyro.controls @@ -1,15 +1,4 @@ -#============================================================= -#CGYRO ion-scale input file -#============================================================= -# -#Simulation notes: ITER Baseline Scenario ~134 x 118 rho_s_D box ; -#0.053 < k_theta rhos_D <1.2. Focused on r/a=0.55 -#ExB shear on and new profiles set at 430 acs -#Startup phase starts with ExB shear on. Profiles adopted from -#Holland JPP 2023 and Mantica PPCF 2020 -#------------------------------------------------------------- - -#============================ +#======================== #Basic Simulation Parameters #============================ @@ -17,10 +6,9 @@ NONLINEAR_FLAG=1 #Radius used for simulation -RMIN=0.55 -#Use Experimental or Specified Inputs -PROFILE_MODEL=2 +#Data source +PROFILE_MODEL=1 # 1: Use inputs in this file; 2: Useinput.gacode (indicate RMIN=) #Geometry type (1=s-alpha,2=MXH) EQUILIBRIUM_MODEL=2 @@ -31,6 +19,9 @@ N_FIELD=3 #Data field output flag MOMENT_PRINT_FLAG=1 +#Make EM fields available as output +FIELD_PRINT_FLAG=1 + #Velocity Order #VELOCITY_ORDER=2 @@ -99,6 +90,9 @@ PRINT_STEP=100 #Max simulation time (units a/c_s) MAX_TIME=100 #900 +#Frequency Tolerance +FREQ_TOL=0.001 + #Number of data outputs before saving a cold_start (DELTA_T*PRINT_STEP is one data output) RESTART_STEP=10 @@ -113,22 +107,20 @@ COLLISION_MODEL=4 #Rotation Scaling of Exp. Values ROTATION_MODEL=2 SHEAR_METHOD=2 -GAMMA_E_SCALE=0.0 -GAMMA_P_SCALE=0.0 -MACH_SCALE=0.0 +GAMMA_E_SCALE=1.0 +GAMMA_P_SCALE=1.0 +MACH_SCALE=1.0 #Scaling of Electron beta and Lambda Debye BETAE_UNIT_SCALE=1.0 BETA_STAR_SCALE=1.0 LAMBDA_STAR_SCALE=1.0 - -#============================== -#Species Specification -#============================== - -#Number of gyrokinetic species -N_SPECIES=2 - EXCH_FLAG=1 +# Gradient scalings +DLNTDR_SCALE_1 = 1.0 +DLNTDR_SCALE_2 = 1.0 +DLNTDR_SCALE_3 = 1.0 +DLNTDR_SCALE_4 = 1.0 +DLNTDR_SCALE_5 = 1.0 diff --git a/templates/input.cgyro.models.json b/templates/input.cgyro.models.json deleted file mode 100644 index 4b45ea68..00000000 --- a/templates/input.cgyro.models.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "0": { - "label": "Linear", - "controls": { - "NONLINEAR_FLAG": 0 - } - } -} \ No newline at end of file diff --git a/templates/input.cgyro.models.yaml b/templates/input.cgyro.models.yaml new file mode 100644 index 00000000..a7a5e1fb --- /dev/null +++ b/templates/input.cgyro.models.yaml @@ -0,0 +1,7 @@ +"Linear": + controls: + NONLINEAR_FLAG: 0 + +"Nonlinear": + controls: + NONLINEAR_FLAG: 1 \ No newline at end of file diff --git a/templates/input.gx.controls b/templates/input.gx.controls new file mode 100644 index 00000000..d6e06340 --- /dev/null +++ b/templates/input.gx.controls @@ -0,0 +1,67 @@ +#============================================================= +# GX example +#============================================================= + + debug = false + +[Dimensions] + ntheta = 24 # number of points along field line (theta) per 2pi segment + nperiod = 1 # number of 2pi segments along field line is 2*nperiod-1 + ny = 64 # number of real-space grid-points in y, nky = 1 + (ny-1)/3 + nx = 192 # number of real-space grid-points in x, nkx = 1 + 2*(nx-1)/3 + + nhermite = 48 # number of hermite moments (v_parallel resolution) + nlaguerre = 16 # number of laguerre moments (mu B resolution) + +[Domain] + y0 = 20.0 # controls box length in y (in units of rho_ref) and minimum ky, so that ky_min*rho_ref = 1/y0 --> ky_min = 0.0714, ky_max = 2.57, L_y = 88 + boundary = "linked" # use twist-shift boundary conditions along field line + +[Physics] + nonlinear_mode = true # this is a linear calculation + ei_colls = false # turn off electron-ion collisions + +[Time] + t_max = 500.0 # end time (in units of L_ref/vt_ref) + scheme = "rk3" # use RK3 timestepping scheme (with adaptive timestepping) + +[Initialization] + gaussian_init = true # initial perturbation is a gaussian in theta + ikpar_init = 0 # parallel wavenumber of initial perturbation + init_field = "density" # initial condition set in density + init_amp = 1.0e-3 # amplitude of initial condition + +[Geometry] + geo_option = "miller" # use Miller geometry (values provided below) + +[Dissipation] + closure_model = "none" # no closure assumptions (just truncation) + hypercollisions = true # use hypercollision model + hyper = true # use hyperdiffusion + HB_hyper = false # use Hammett-Belli hyperdiffusivity model + + nu_hyper_m = 0.5 # coefficient of hermite hypercollisions + nu_hyper_l = 0.5 # coefficient of laguerre hypercollisions + D_hyper = 0.5 # coefficient of hyperdiffusion + p_hyper_m = 6 # exponent of hermite hypercollisions + p_hyper_l = 6 # exponent of laguerre hypercollisions + p_hyper = 2 # exponent of hyperdiffusion is 2*p_hyper = 4 + D_H = 0.5 # coefficient of H-B hyperdiffusion + w_osc = 0.0 # frequency parameter in the H-B model + p_HB = 2 # exponent for the H-B model + +[Restart] + save_for_restart = true + nsave = 100 # save restart file every nsave timesteps +#restart_to_file = "" # Specify restart file name, otherwise “[input_stem].restart.nc” + +restart = false +restart_from_file = "none" # Specify restart file name, otherwise “[input_stem].restart.nc” + + +[Diagnostics] + nwrite = 100 # write diagnostics every nwrite timesteps (this is NOT a/c_s) (=1 doesn't work) + omega = true # compute and write linear growth rate and frequency + fluxes = true # compute and write heat and particle fluxes + fields = true # compute and write electric and magnetic fields + moments = true # write moments on the grid diff --git a/templates/input.gx.models.yaml b/templates/input.gx.models.yaml new file mode 100644 index 00000000..b7d24a10 --- /dev/null +++ b/templates/input.gx.models.yaml @@ -0,0 +1,18 @@ +"Linear": + # Linear with 10 ky modes + controls: + nonlinear_mode: false + hyper: false + HB_hyper: false + nperiod: 2 + nx: 1 + ny: 28 + +"Nonlinear": + controls: + nonlinear_mode: true + hyper: true + HB_hyper: true + nperiod: 1 + nx: 192 + ny: 64 diff --git a/templates/input.neo.controls b/templates/input.neo.controls new file mode 100644 index 00000000..3dd4b5be --- /dev/null +++ b/templates/input.neo.controls @@ -0,0 +1,24 @@ +#------------------------------------------------------------------------- +# Template input.neo file (controls-only) +#------------------------------------------------------------------------- + +SILENT_FLAG=0 # 0: output files are written + +# Setup +EQUILIBRIUM_MODEL=2 # 2: Miller +PROFILE_MODEL=1 # 1: local (one radius) +PROFILE_ERAD0_MODEL=1 # 1: Use the profile of the Erad0 parameter as specified in input.profiles + +# Resolution +N_RADIAL=1 # Number of radial grid points +N_THETA=17 # Number of poloidal grid points +N_ENERGY=6 # Number of v polynomials +N_XI=17 # Number of xi polynomials + +# Models +COLLISION_MODEL=4 # 4: Full linearized Fokker-Plank operator +SIM_MODEL=2 # 2: Numerical solution and analytic theory only +SPITZER_MODEL=0 # 0: solve the standard neoclassical transport problem +ROTATION_MODEL=2 # 2: Sonic rotation effects included (solves the Hinton-Wong generalized +THREED_MODEL=0 # 0: Toroidally axisymmetric limit (2D). +THREED_EXB_MODEL=0 # 0: higher-order drift velocity not included. \ No newline at end of file diff --git a/templates/input.neo.models.yaml b/templates/input.neo.models.yaml new file mode 100644 index 00000000..974bc4c5 --- /dev/null +++ b/templates/input.neo.models.yaml @@ -0,0 +1,3 @@ +"Sonic": + controls: + ROTATION_MODEL: 2 \ No newline at end of file diff --git a/templates/input.tglf.models.json b/templates/input.tglf.models.json deleted file mode 100644 index 56361582..00000000 --- a/templates/input.tglf.models.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "1": { - "label": "SAT1", - "controls": { - "SAT_RULE": 1, - "UNITS": "GYRO" - } - }, - "2": { - "label": "SAT0", - "controls": { - "SAT_RULE": 0, - "UNITS": "GYRO", - "ETG_FACTOR": 1.25 - } - }, - "3": { - "label": "SAT1geo", - "controls": { - "SAT_RULE": 1, - "UNITS": "CGYRO" - } - }, - "4": { - "label": "SAT2", - "controls": { - "SAT_RULE": 2, - "UNITS": "CGYRO", - "XNU_MODEL": 3, - "WDIA_TRAPPED": 1.0 - } - }, - "5": { - "label": "SAT2em", - "controls": { - "SAT_RULE": 2, - "UNITS": "CGYRO", - "XNU_MODEL": 3, - "WDIA_TRAPPED": 1.0, - "USE_BPER": true - } - }, - "6": { - "label": "SAT3", - "controls": { - "SAT_RULE": 3, - "UNITS": "CGYRO", - "XNU_MODEL": 3, - "WDIA_TRAPPED": 1.0 - } - }, - "100": { - "label": "settings as in ASTRA-TGLF framework", - "controls": { - "UNITS": "CGYRO", - "USE_BPER": true, - "USE_AVE_ION_GRID": true, - "SAT_RULE": 2, - "KYGRID_MODEL": 4, - "XNU_MODEL": 3, - "NBASIS_MAX": 6, - "B_MODEL_SA": 1, - "FT_MODEL_SA": 1 - } - }, - "101": { - "label": "[Experimentation] SAT3em basis", - "controls": { - "SAT_RULE": 2, - "UNITS": "CGYRO", - "XNU_MODEL": 3, - "WDIA_TRAPPED": 1.0, - "USE_BPER": true, - "KYGRID_MODEL": 4, - "NBASIS_MAX": 6 - } - } -} \ No newline at end of file diff --git a/templates/input.tglf.models.yaml b/templates/input.tglf.models.yaml new file mode 100644 index 00000000..de175cfe --- /dev/null +++ b/templates/input.tglf.models.yaml @@ -0,0 +1,69 @@ +"SAT0": + deprecated_descriptor: "2" + controls: + SAT_RULE: 0 + UNITS: GYRO + ETG_FACTOR: 1.25 + +"SAT1": + deprecated_descriptor: "1" + controls: + SAT_RULE: 1 + UNITS: GYRO + +"SAT1geo": + deprecated_descriptor: "3" + controls: + SAT_RULE: 1 + UNITS: CGYRO + +"SAT2": + deprecated_descriptor: "4" + controls: + SAT_RULE: 2 + UNITS: CGYRO + XNU_MODEL: 3 + WDIA_TRAPPED: 1.0 + +"SAT2astra": + deprecated_descriptor: "100" + controls: + UNITS: CGYRO + USE_BPER: true + USE_AVE_ION_GRID: true + SAT_RULE: 2 + KYGRID_MODEL: 4 + XNU_MODEL: 3 + NBASIS_MAX: 6 + B_MODEL_SA: 1 + FT_MODEL_SA: 1 + +"SAT3": + deprecated_descriptor: "6" + controls: + SAT_RULE: 3 + UNITS: CGYRO + XNU_MODEL: 3 + WDIA_TRAPPED: 1.0 + +# Experimentation +"SAT3em basis": + deprecated_descriptor: "101" + controls: + SAT_RULE: 2 + UNITS: CGYRO + XNU_MODEL: 3 + WDIA_TRAPPED: 1.0 + USE_BPER: true + KYGRID_MODEL: 4 + NBASIS_MAX: 6 + +#TODO: To be removed in the future +"SAT2em": + deprecated_descriptor: "5" + controls: + SAT_RULE: 2 + UNITS: CGYRO + XNU_MODEL: 3 + WDIA_TRAPPED: 1.0 + USE_BPER: true diff --git a/templates/maestro_namelist.json b/templates/maestro_namelist.json deleted file mode 100644 index 70dcb052..00000000 --- a/templates/maestro_namelist.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "flag": "SPARC PRD", - "seed": 0, - "machine": { - "Bt": 12.2, - "Ip": 8.7, - "separatrix": { - "type": "freegs", - "parameters": { - "R": 1.85, - "a": 0.57, - "delta_sep": 0.57, - "kappa_sep": 1.97, - "n_mxh": 5, - "geqdsk_file": "" - } - }, - "heating": { - "type": "ICRH", - "parameters": { - "P_icrh": 11.0, - "minority": [2,3], - "fmini": 0.05 - } - } - }, - "assumptions": { - "Zeff": 1.5, - "mix":{ - "fmain": 0.85, - "ZW":50, - "fW": 1.5E-5 - }, - "initialization": { - "BetaN":1.0, - "density_peaking":1.3, - "assume_neped": true, - "neped_20": 2.5, - "nesep_ratio": 0.3 - }, - "Tesep_eV": 75.0 - }, - "maestro": { - "keep_all_files": true, - "beats": ["transp_soft", "transp", "eped", "portals", "eped", "portals"], - "eped_beat": { - "use_default": false, - "eped_namelist":{ - "nn_location": "$MFEIM_PATH/private_code_mitim/NN_DATA/EPED-NN-SPARC/EPED-NN-MODEL-SPARC.keras", - "norm_location": "$MFEIM_PATH/private_code_mitim/NN_DATA/EPED-NN-SPARC/EPED-NN-NORMALIZATION-SPARC.txt", - "corrections_set": { - "Bt": 12.2, - "R": 1.85, - "a": 0.57 - }, - "ptop_multiplier": 1.0, - "TioverTe": 1.0 - } - - }, - "portals_beat": { - "use_default": false, - "portals_namelist" : { - "PORTALSparameters": { - "forceZeroParticleFlux": true, - "keep_full_model_folder": false, - "cores_per_tglf_instance": 1 - }, - "MODELparameters": { - "RoaLocations": [0.35,0.45,0.55,0.65,0.75,0.875,0.9], - "Physics_options": {"TypeTarget": 3}, - "transport_model": { - "turbulence": "TGLF", - "TGLFsettings": 100, - "extraOptionsTGLF": {"USE_BPER": true} - } - }, - "INITparameters": { - "FastIsThermal": true, - "quasineutrality": true - }, - "exploration_ranges": { - "ymax_rel": 1.0, - "ymin_rel": 1.0, - "hardGradientLimits": [null, 4] - }, - "change_last_radial_call" : true, - "use_previous_surrogate_data" : true, - "try_flux_match_only_for_first_point" : true - }, - "transport_preprocessing": { - "lumpImpurities": true, - "enforce_same_density_gradients": true - } - }, - "portals_soft_beat":{ - "use_default": true - }, - "transp_beat":{ - "use_default": true - }, - "transp_soft_beat":{ - "use_default": true - } - } -} diff --git a/templates/main.namelist.json b/templates/main.namelist.json deleted file mode 100644 index 9dc26580..00000000 --- a/templates/main.namelist.json +++ /dev/null @@ -1,99 +0,0 @@ -{ - "problem_options": { - "ofs": ["y0", "y1"], - "dvs": ["x0", "x1"], - "dvs_min": [0.8, 0.8], - "dvs_base": null, - "dvs_max": [1.2, 1.2] - }, - "evaluation_options": { - "parallel_evaluations": 1, - "train_Ystd": null - }, - "convergence_options": { - "maximum_iterations": 5, - "stopping_criteria": null, - "stopping_criteria_parameters": { - "maximum_value": -1e-3, - "maximum_value_is_rel": false, - "minimum_dvs_variation": [10, 3, 0.01] - } - }, - "initialization_options": { - "initial_training": 5, - "type_initialization": 3, - "read_initial_training_from_csv": false, - "initialization_fun": null, - "ensure_within_bounds": false, - "expand_bounds": true - }, - "acquisition_options": { - "type": "noisy_logei_mc", - "parameters": { - "mc_samples": 1024 - }, - "optimizers": ["botorch"], - "optimizer_options": { - "botorch": { - "num_restarts": 64, - "raw_samples": 4096, - "maxiter": 1000, - "sequential_q": true, - "keep_best": 1 - }, - "root": { - "num_restarts": 5, - "solver": "lm", - "maxiter": 1000, - "relative_improvement_for_stopping": 1e-4, - "keep_best": 1 - }, - "sr": { - "num_restarts": 5, - "maxiter": 1000, - "relative_improvement_for_stopping": 1e-3, - "relax": 0.1, - "relax_dyn": true, - "keep_best": 1 - }, - "ga": { - "num_restarts": 1, - "keep_best": 32 - } - }, - "relative_improvement_for_stopping": null, - "favor_proximity_type": 0, - "ensure_new_points": true, - "points_per_step": 1 - }, - "surrogate_options": { - "TypeKernel": 0, - "TypeMean": 0, - "selectSurrogate": null, - "FixedNoise": true, - "ExtraNoise": false, - "additional_constraints": null, - "ConstrainNoise": -1e-3, - "MinimumRelativeNoise": null, - "stds_outside": null, - "stds_outside_checker": 5, - "extrapointsFile": null, - "extrapointsModels": null, - "extrapointsModelsAvoidContent": null - }, - "strategy_options": { - "AllowedExcursions": [0.0, 0.0], - "HitBoundsIncrease": [1.0, 1.0], - "boundsRefine": null, - "RandomRangeBounds": 0.5, - "ToleranceNiche": 1e-3, - "applyCorrections": true, - "SwitchIterationsReduction": [null, null], - "TURBO_options": { - "apply": false, - "points": 32, - "bounds": [0.75, 1.33], - "metrics": [3, 3] - } - } -} \ No newline at end of file diff --git a/templates/namelist.maestro.yaml b/templates/namelist.maestro.yaml new file mode 100644 index 00000000..5d600baa --- /dev/null +++ b/templates/namelist.maestro.yaml @@ -0,0 +1,201 @@ +# (**************************************************************************************************************) +# +# This is a complete template for a MAESTRO simulation +# +# (**************************************************************************************************************) + +# ------------------------------------------------------------------------------------------------------------ +# Plasma +# ------------------------------------------------------------------------------------------------------------ + +# Name of this simulation +flag: SPARC PRD + +# Master random seed for reproducibility, to be sent to all beats that can receive seed +seed: 0 + +# Machine parameters +machine: + + # Engineering parameters (if using values in geqdsk, these must be null, otherwise they will override the geqdsk values if separatrix type is geqdsk) + Bt: 12.2 + Ip: 8.7 + + # Separatrix specification + separatrix: + + # Type of separatrix (freegs, geqdsk) + type: freegs + + # Parameters for this type of separatrix + parameters: + + # If the separatrix type receives boundary parameterization, parameters here + R: 1.85 + a: 0.57 + delta_sep: 0.57 + kappa_sep: 1.97 + + # If the separatrix type receives a geqdsk file directly, provide path here + geqdsk_file: "" + + # For both geqdsk and freegs, number of MXH coefficients to parametrize the boundary + n_mxh: 5 + + # Heating parameters + heating: + + # Only ICRH heating supported for now + type: ICRH + + parameters: + + # ICRF input power + P_icrh: 11.0 + # Minority species [Z,A] + minority: [2, 3] + # Minority fraction to be added to the mix + fmini: 0.05 + +# Simulation assumptions +assumptions: + + # Impurities + Zeff: 1.5 + mix: + fmain: 0.85 + ZW: 50 + fW: 1.5e-5 + + # Edge parameters + Tesep_eV: 75.0 + + # Initialization of profiles + initialization: + + # neped as the input (engineering) parameter + assume_neped: true + neped_20: 2.5 + + # ne_sep / ne_ped ratio to assume (e.g. for EPED) and construct profiles with + nesep_ratio: 0.3 + + # Profiles will be initialized such that these parameters are matched as close as possible + BetaN: 1.0 + density_peaking: 1.3 + +# ------------------------------------------------------------------------------------------------------------ +# MAESTRO workflow parameters +# ------------------------------------------------------------------------------------------------------------ + +maestro: + + # Sequence of beats + beats: ["transp_soft", "transp", "eped", "portals", "eped", "portals"] + + # Remove intermediate files to avoid heavy MAESTRO simulation folders + keep_all_files: true + + # --------------------------------------------------------------------------- + # Each individual beat parameters + # --------------------------------------------------------------------------- + + eped_beat: + + use_default: false + + eped_namelist: + + # Location of EPED NN files (if null, use full EPED) + nn_location: $MFEIM_PATH/private_code_mitim/NN_DATA/EPED-NN-SPARC/EPED-NN-MODEL-SPARC.keras + norm_location: $MFEIM_PATH/private_code_mitim/NN_DATA/EPED-NN-SPARC/EPED-NN-NORMALIZATION-SPARC.txt + + # Corrections set for EPED indicate the values that I force them to be exact, to avoid NN issues with non-trained parameters + corrections_set: + Bt: 12.2 + R: 1.85 + a: 0.57 + + # Operations after EPED pressure predictions + ptop_multiplier: 1.0 # Multiplies ptop + TioverTe: 1.0 # Ti/Te assumption at the pedestal top + + eped_initializer_beat: + use_default: false + eped_initializer_namelist: + nn_location: $MFEIM_PATH/private_code_mitim/NN_DATA/EPED-NN-SPARC/EPED-NN-MODEL-SPARC.keras + norm_location: $MFEIM_PATH/private_code_mitim/NN_DATA/EPED-NN-SPARC/EPED-NN-NORMALIZATION-SPARC.txt + corrections_set: + Bt: 12.2 + R: 1.85 + a: 0.57 + ptop_multiplier: 1.0 + TioverTe: 1.0 + + portals_beat: + + use_default: false + + portals_namelist: + + # If null, it will use the default namelist at: __mitimroot__ / "templates" / "namelist.portals.yaml" + portals_namelist_location: null + + # ------------------------------------------------------------------------------------------------ + # PORTALS parameters (only those parameters to change the main PORTALS namelist) + # ------------------------------------------------------------------------------------------------ + portals_parameters: + solution: + predicted_roa: [0.35, 0.45, 0.55, 0.65, 0.75, 0.875, 0.9] + keep_full_model_folder: false + exploration_ranges: + ymax: 1.0 + ymin: 4.0 + yminymax_atleast: [null, 4] + transport: + options: + tglf: + run: + code_settings: "SAT2astra" + extraOptions: + USE_BPER: true + keep_files: "none" + target: + options: + force_zero_particle_flux: true + + optimization_options: + convergence_options: + maximum_iterations: 30 + strategy_options: + AllowedExcursions: [0.25, 0.0] + + # Operations to do to mitim_state before passing it to PORTALS + initialization_parameters: + thermalize_fast: true + quasineutrality: true + + # If the width of the pedestal has changed, modify the BC (last in predicted_rX) for PORTALS accordingly + change_last_radial_call: true + + # Later beats to utilize past beats surrogate data if available (e.g. if position has not changed) + use_previous_surrogate_data: true + + # If surrogates exist, use them to find the first training point (and limit to only one point) + try_flux_match_only_for_first_point: true + + # Operations that will be included as part of the profiles_postprocessing_fun + transport_preprocessing: + # By default, run PORTALS with all impurities lumped into one species + lumpImpurities: true + # By default, enforce that all species have the same density gradient + enforce_same_density_gradients: true + + portals_soft_beat: + use_default: true + + transp_beat: + use_default: true + + transp_soft_beat: + use_default: true \ No newline at end of file diff --git a/templates/namelist.optimization.yaml b/templates/namelist.optimization.yaml new file mode 100644 index 00000000..83f10e29 --- /dev/null +++ b/templates/namelist.optimization.yaml @@ -0,0 +1,89 @@ +problem_options: + ofs: ["y0", "y1"] + dvs: ["x0", "x1"] + dvs_min: [0.8, 0.8] + dvs_base: null + dvs_max: [1.2, 1.2] + +evaluation_options: + parallel_evaluations: 1 + train_Ystd: null + +# Convergence options for the BO optimization process +convergence_options: + maximum_iterations: 5 + stopping_criteria: "import::mitim_tools.opt_tools.STRATEGYtools.stopping_criteria_default" + stopping_criteria_parameters: + maximum_value: -0.001 + maximum_value_is_rel: false + minimum_inputs_variation: [10, 3, 0.01] + +initialization_options: + initial_training: 5 + type_initialization: 3 + read_initial_training_from_csv: false + initialization_fun: null + ensure_within_bounds: false + expand_bounds: true + +acquisition_options: + type: noisy_logei_mc + parameters: + mc_samples: 1024 + optimizers: ["botorch"] + optimizer_options: + botorch: + num_restarts: 64 + raw_samples: 4096 + maxiter: 1000 + sequential_q: true + keep_best: 1 + root: + num_restarts: 5 + solver: lm + maxiter: 1000 + relative_improvement_for_stopping: 0.0001 + keep_best: 1 + sr: + num_restarts: 5 + maxiter: 1000 + relative_improvement_for_stopping: 0.001 + relax: 0.1 + relax_dyn: true + keep_best: 1 + ga: + num_restarts: 1 + keep_best: 32 + relative_improvement_for_stopping: null + favor_proximity_type: 0 + ensure_new_points: true + points_per_step: 1 + +surrogate_options: + TypeKernel: 0 + TypeMean: 0 + surrogate_selection: null + FixedNoise: true + ExtraNoise: false + additional_constraints: null + ConstrainNoise: -0.001 + MinimumRelativeNoise: null + stds_outside: null + stds_outside_checker: 5 + extrapointsFile: null + extrapointsModels: null + extrapointsModelsAvoidContent: null + +strategy_options: + AllowedExcursions: [0.0, 0.0] + HitBoundsIncrease: [1.0, 1.0] + boundsRefine: null + RandomRangeBounds: 0.5 + ToleranceNiche: 0.001 + applyCorrections: true + SwitchIterationsReduction: [null, null] + TURBO_options: + apply: false + points: 32 + bounds: [0.75, 1.33] + metrics: [3, 3] \ No newline at end of file diff --git a/templates/plot_popcon.yaml b/templates/namelist.plot_popcon.yaml similarity index 100% rename from templates/plot_popcon.yaml rename to templates/namelist.plot_popcon.yaml diff --git a/templates/namelist.portals.yaml b/templates/namelist.portals.yaml new file mode 100644 index 00000000..7a7597e6 --- /dev/null +++ b/templates/namelist.portals.yaml @@ -0,0 +1,302 @@ +# (**************************************************************************************************************) +# +# This is a complete template for a PORTALS-TGLF+NEO profile prediction. +# The user is welcomed to change any of the parameters below to fit their specific use case, +# by copying this template namelist and instantiating the PORTALS object with its path: +# +# portals_fun = PORTALSmain.portals(folder, portals_namelist=PATH_TO_NAMELIST) +# +# Alternatively, you can simply use the default parameters provided in this template by not +# specifying a namelist and then, in the launching script change the dictionary values +# (*before* you perform the portals_fun.prep() command): +# +# portals_fun = PORTALSmain.portals(folder) +# portals_fun.portals_parameters["solution"]['predicted_roa'] = [0.25, 0.45, 0.65, 0.85] +# portals_fun.portals_parameters["solution"]['predicted_channels'] = ["te", "ti", "ne", "nZ", 'w0'] +# ... +# +# The dictionary follows the same structure as the YAML namelist, except the optimization_options that must +# be passed as a separate dictionary (because they are common among optimization problems): +# +# portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 10 +# +# (**************************************************************************************************************) + +# ----------------------------------------------------------------- +# Main solver parameters +# ----------------------------------------------------------------- + +solution: + + # Specification of radial locations (if both provided, predicted_roa is used instead of predicted_rho) + predicted_roa: null + predicted_rho: [0.35, 0.55, 0.75, 0.875, 0.9] + + # Channels to be predicted (Options: ["te", "ti", "ne", "nZ", "w0"]) + predicted_channels: ["te", "ti", "ne"] + + # Run turbulent exchange as surrogate + turbulent_exchange_as_surrogate: false + + # Impurity to do flux-matching for if nZ enabled (name of first impurity instance AFTER postprocessing), e.g. "W" + trace_impurity: null + + # Options that define boundaries for the optimization + exploration_ranges: + + # Are ymax and ymin relative or absolute + limits_are_relative: true + + # Bounds for the input parameters. ymax/ymin can be float (common for all radii, channels) or a dictionary:: + # ymax: + # 'te': [1.0, 0.5, 0.5, 0.5] + # 'ti': [0.5, 0.5, 0.5, 0.5] + # ... + ymax: 1.0 + ymin: 1.0 + yminymax_atleast: null # Defines minimum range of exploration, e.g. [0,2] means the gradient range will be at least from 0 to 2.0 even if not achieved via the ymin/ymax specification + + # enforce_finite_aLT is used to be able to select ymin = 2.0 for ne but ensure that te, ti is at, e.g., enforce_finite_aLT = 0.95 + enforce_finite_aLT: null + + define_ranges_from_profiles: null + + # start_from_folder is a folder from which to grab optimization_data and optimization_extra + # (if used with reevaluate_targets>0, change targets by reevaluating with different parameters) + start_from_folder: null + + reevaluate_targets: 0 + + # DEPCRECATED, #TOREMOVE + fixed_gradients: null + + # If False, remove full model folder after evaluation, to avoid carrying large folders (e.g. in MAESTRO runs) + keep_full_model_folder: true + + # If True, fit surrogate model to GZ/nZ instead of GZ, valid on the trace limit + impurity_trick: true + + # If provided, using fZ0_as_weight/fZ_0 as scaling factor for GZ, where fZ_0 is the original impurity concentration on axis + fZ0_as_weight: null + fImp_orig: 1.0 + + # [Qe,Qi,Ge,Mt,GZ] multipliers to calculate scalarized function + scalar_multipliers: [1.0, 1.0, 1.0, 1.0, 1.0] + + # Physics-informed parameters to fit surrogates (numbers are the last iteration to consider that line; keys are the physics-informed parameters + # and the values are the corresponding variables to check if they have varied, that affect that input) + portals_transformation_variables: + 10: {aLte: [aLte], aLti: [aLti], aLne: [aLne], aLw0_n: [aLw0]} + 30: {aLte: [aLte], aLti: [aLti], aLne: [aLne], aLw0_n: [aLw0], nuei: [te, ne], tite: [te, ti], w0_n: [w0]} + 10000: {aLte: [aLte], aLti: [aLti], aLne: [aLne], aLw0_n: [aLw0], nuei: [te, ne], tite: [te, ti], w0_n: [w0], beta_e: [te, ne]} + + # Physics-informed parameters to fit surrogates for trace impurities + portals_transformation_variables_trace: + 10: {aLte: [aLte], aLti: [aLti], aLne: [aLne], aLw0_n: [aLw0], aLnZ: [aLnZ]} + 30: {aLte: [aLte], aLti: [aLti], aLne: [aLne], aLw0_n: [aLw0], nuei: [te, ne], tite: [te, ti], w0_n: [w0], aLnZ: [aLnZ]} + 10000: {aLte: [aLte], aLti: [aLti], aLne: [aLne], aLw0_n: [aLw0], nuei: [te, ne], tite: [te, ti], w0_n: [w0], beta_e: [te, ne], aLnZ: [aLnZ]} + +# ----------------------------------------------------------------- +# Transport model parameters +# ----------------------------------------------------------------- + +transport: + + # Transport model class + evaluator: "import::mitim_modules.powertorch.utils.TRANSPORTtools.portals_transport_model" + + # Select transport models (when instantiating the evaluator, assign these parameters) + evaluator_instance_attributes: + + # Turbulent transport model + turbulence_model: "tglf" + + # Neoclassical transport model + neoclassical_model: "neo" + + # Simulation kwargs to be passed directly to run and read commands (defaults here) + options: + + # ********************************************************************************************************* + # TGLF + # ********************************************************************************************************* + tglf: + + # Kwargs to be passed to the run command + run: + code_settings: "SAT3" + extraOptions: {} + + # Kwargs to be passed to the read command + read: {} + + # If not None, use TGLF scan trick to calculate TGLF errors with this maximum delta + use_scan_trick_for_stds: 0.02 + + # [EXPERIMENTAL] If True, store previous evaluations and reuse them if they are within the delta of all inputs (to capture combinations) + reuse_scan_ball: false + + # Files to keep from simulation (none: all runs will only keep minimal files; base: minimal files for scans only; all: retrieve all files) + keep_files: "base" + + # Number of cores to use per TGLF instance + cores_per_tglf_instance: 1 + + # (%) Error (std, in percent) of model evaluation TGLF if not scan trick + percent_error: 5.0 + + # If True, and fast ions have been included, sum fast. This only occurs if the specie is considered fast by TGLF (it could be fast in input.gacode but thermal for TGLF) + Qi_includes_fast: false + + # ********************************************************************************************************* + # NEO + # ********************************************************************************************************* + neo: + + run: + code_settings: "Sonic" + + read: {} + + # (%) Error (std, in percent) of model evaluation + percent_error: 10.0 + + # ********************************************************************************************************* + # CGYRO + # ********************************************************************************************************* + cgyro: + + run: + code_settings: "Nonlinear" + extraOptions: {} + + # Run type: normal (submit and wait), submit (submit and do not wait), prep (do not submit) + run_type: "prep" + + read: + tmin: 0.0 + + # For CGYRO runs, MW/m^2 of Qi below which the case is considered stable + Qi_stable_criterion: 0.01 + + # (%) For CGYRO runs, minimum error based on target if case is considered stable + Qi_stable_percent_error: 5.0 + + # If True, always run base TGLF to keep track of discrepancies + run_base_tglf: True + + # ********************************************************************************************************* + # GX + # ********************************************************************************************************* + gx: + + run: + + code_settings: "Nonlinear" + extraOptions: {} + + # Run type: normal (submit and wait), submit (submit and do not wait), prep (do not submit) + run_type: "prep" + + read: + tmin: 0.0 + + # Function to post-process input.gacode only *before* passing to transport codes + profiles_postprocessing_fun: null + + # Corrections to be applied to each iteration input.gacode file + applyCorrections: + Ti_thermals: true # Keep all thermal ion temperatures equal to the main Ti + ni_thermals: true # Adjust for quasineutrality by modifying the thermal ion densities together with ne + recalculate_ptot: true # Recompute PTOT to insert in input file each time + Tfast_ratio: false # Keep the ratio of Tfast/Te constant throughout the Te evolution + force_mach: null # Change w0 to match this Mach number when Ti varies + + +# ----------------------------------------------------------------- +# Target model parameters +# ----------------------------------------------------------------- + +target: + + # Target model evaluator + evaluator: "import::mitim_modules.powertorch.physics_models.targets_analytic.analytical_model" + + options: + + # Targets to evolve (Options: ["qie", "qrad", "qfus"]) + targets_evolve: ["qie", "qrad", "qfus"] + + # Method to calculate targets (tgyro or powerstate) + target_evaluator_method: "powerstate" + + # If True, ignore particle flux profile and assume zero for all radii + force_zero_particle_flux: false + + # If not None, calculate targets with this radial resolution + targets_resolution: 20 + + # (%) Error (std, in percent) of model evaluation + percent_error: 1 + +# ----------------------------------------------------------------- +# Optimization options namelist +# ----------------------------------------------------------------- + +optimization_namelist_location: null # If null, it will grab the default at: __mitimroot__ / "templates" / "namelist.optimization.yaml" + +# ----------------------------------------------------------------- +# Optimization options (to change the main optimization namelist) +# ----------------------------------------------------------------- + +optimization_options: + + initialization_options: + + # PORTALS works well with 5 initial training points obtained with simple relaxation + initial_training: 5 + initialization_fun: "import::mitim_modules.portals.utils.PORTALSoptimization.initialization_simple_relax" + + # Convergence options for the optimization process + convergence_options: + + # Criterion 1: Maximum iterations + maximum_iterations: 50 + + # Stopping function for the rest of criteria + stopping_criteria: "import::mitim_modules.portals.PORTALStools.stopping_criteria_portals" + + stopping_criteria_parameters: + + # Criterion 2: Residual reduction + maximum_value: 5.e-3 # Reducing residual by 200x is enough for most PORTALS runs + maximum_value_is_rel: true # Indicates that maximum_value is relative to iteration #0 + + # Criterion 3: Variation of input parameters + minimum_inputs_variation: [10, 5, 0.1] # After iteration 10, Check if 5 consecutive DVs are varying less than 0.1% from the rest that has been evaluated + + # Criterion 4: Ricci metric + ricci_value: 0.05 + ricci_d0: 2.0 + ricci_lambda: 0.5 + + acquisition_options: + + # Relative improvement for stopping criteria of the acquisition optimization + relative_improvement_for_stopping: 1.e-2 # Reducing residual by 100x is enough + + # Type of acquisition function (Options: ["posterior_mean", "noisy_logei_mc", ...]) + type: "posterior_mean" + + # Optimizers to apply sequentially (Options: ["sr", "root", "botorch"]) + optimizers: ["sr", "root"] + + surrogate_options: + + # Function to select the GP parameters depending on each channel/radius (superseeds the main optimization namelist) + surrogate_selection: "import::mitim_modules.portals.PORTALStools.surrogate_selection_portals" + + strategy_options: + + # Allow excursions from the bounds + AllowedExcursions: [0.0, 0.0] \ No newline at end of file diff --git a/tests/CGYRO_workflow.py b/tests/CGYRO_workflow.py index 557f085f..ac5f97b1 100644 --- a/tests/CGYRO_workflow.py +++ b/tests/CGYRO_workflow.py @@ -12,37 +12,64 @@ folder.mkdir(parents=True, exist_ok=True) -cgyro = CGYROtools.CGYRO() +cgyro = CGYROtools.CGYRO(rhos = [0.5, 0.7]) -cgyro.prep(folder,gacode_file) +cgyro.prep(gacode_file,folder) + +# --------------- +# Standalone run +# --------------- + +run_type = 'submit' # 'normal': submit and wait; 'submit': Just prepare and submit, do not wait [requies cgyro.check() and cgyro.fetch()] cgyro.run( 'linear', - roa = 0.55, - CGYROsettings=0, + code_settings="Linear", extraOptions={ - 'KY':0.3 - }) + 'KY':0.5, + 'MAX_TIME': 10.0, # Short, I just want to test the run. Enough to get the restart file + }, + slurm_setup={ + 'cores':16, # Each CGYRO instance (each radius will have this number of cores or gpus) + 'minutes': 10, + }, + cold_start=cold_start, + forceIfcold_start=True, + run_type=run_type, + ) + +if run_type == 'submit': + cgyro.check(every_n_minutes=1) + cgyro.fetch() + cgyro.read(label="cgyro1") +cgyro.plot(labels=["cgyro1"]) -cgyro.run( - 'linear', - roa = 0.55, - CGYROsettings=0, - extraOptions={ - 'KY':0.5 - }) -cgyro.read(label="cgyro2") +# --------------- +# Scan of KY +# --------------- -cgyro.run( - 'linear', - roa = 0.55, - CGYROsettings=0, +run_type = 'normal' + +cgyro.run_scan( + 'scan1', + code_settings="Linear", extraOptions={ - 'KY':0.7 - }) -cgyro.read(label="cgyro3") + 'MAX_TIME': 10.0, # Short, I just want to test the run. Enough to get the restart file + }, + variable='KY', + varUpDown=[0.3,0.4], + slurm_setup={ + 'cores':16 + }, + cold_start=cold_start, + forceIfcold_start=True, + run_type=run_type + ) + +cgyro.plot(labels=["scan1_KY_0.3","scan1_KY_0.4"], fn = cgyro.fn) +fig = cgyro.fn.add_figure(label="Quick linear") +cgyro.plot_quick_linear(labels=["scan1_KY_0.3","scan1_KY_0.4"], fig = fig) -cgyro.plotLS() -cgyro.fnLS.show() +cgyro.fn.show() diff --git a/tests/EPED_workflow.py b/tests/EPED_workflow.py new file mode 100644 index 00000000..66a38662 --- /dev/null +++ b/tests/EPED_workflow.py @@ -0,0 +1,46 @@ +import os +import matplotlib.pyplot as plt +from mitim_tools.eped_tools import EPEDtools +from mitim_tools import __mitimroot__ + +cold_start = True + +folder = __mitimroot__ / "tests" / "scratch" / "eped_test2" + +if cold_start and os.path.exists(folder): + os.system(f"rm -r {folder}") + +eped = EPEDtools.EPED(folder=folder) + +eped.run( + subfolder = 'case1', + input_params = { + 'ip': 8.7, + 'bt': 12.16, + 'r': 1.85, + 'a': 0.57, + 'kappa': 1.9, + 'delta': 0.5, + 'neped': 30.0, + 'betan': 1.0, + 'zeffped': 1.5, + 'nesep': 10.0, + 'tesep': 100.0, + 'zeta': 0.01 + # Can add zeta if your implementation of EPED supports it + }, + scan_param = {'variable': 'neped', 'values': [15.0, 30.0, 45.0, 60.0, 75.0]}, + keep_nsep_ratio = 0.4, + nproc_per_run = 64, + cold_start = cold_start, + job_array_limit=5, + removeScratchFolders = True, #ONLY CHANGE THIS FOR DEBUGGING, if you make this False, your EPED runs will be saved and they are enormous + +) + +eped.read(subfolder='case1') + +eped.plot(labels=['case1']) + +eped.fn.show() + diff --git a/tests/GX_workflow.py b/tests/GX_workflow.py new file mode 100644 index 00000000..36424da8 --- /dev/null +++ b/tests/GX_workflow.py @@ -0,0 +1,43 @@ +import os +from mitim_tools.gacode_tools.PROFILEStools import gacode_state +from mitim_tools.simulation_tools.physics import GXtools +from mitim_tools import __mitimroot__ + +cold_start = True + +(__mitimroot__ / 'tests' / 'scratch').mkdir(parents=True, exist_ok=True) + +folder = __mitimroot__ / "tests" / "scratch" / "gx_test" +input_gacode = __mitimroot__ / "tests" / "data" / "input.gacode" + +# Reduce the ion species to just 1 +p = gacode_state(input_gacode) +p.lumpIons() +# -------------------------------- + +if cold_start and folder.exists(): + os.system(f"rm -r {folder.resolve()}") + +gx = GXtools.GX(rhos=[0.5, 0.6]) +gx.prep(p, folder) + +gx.run( + 'gx1/', + cold_start=cold_start, + code_settings="Linear", + extraOptions={ + 't_max':5.0, # Run up to 5 a/c_s (should take ~2min using 8 A100s) + 'y0' :5.0, # kymin = 1/y0 = 0.2 + 'ny': 34, # nky = 1 + (ny-1)/3 = 12 -> ky_range = 0.2 - 2.4 + }, + slurm_setup = { + "cores": 4, # Each of the two radius with 4 GPUs each + "minutes": 10 + } + ) +gx.read('gx1') + +gx.plot(labels=['gx1']) + +gx.fn.show() +gx.fn.close() diff --git a/tests/MAESTRO_workflow.py b/tests/MAESTRO_workflow.py index b206c428..ec8857c6 100644 --- a/tests/MAESTRO_workflow.py +++ b/tests/MAESTRO_workflow.py @@ -6,7 +6,7 @@ cold_start = True folder = __mitimroot__ / "tests" / "scratch" / "maestro_test" -template = __mitimroot__ / "templates" / "maestro_namelist.json" +template = __mitimroot__ / "templates" / "namelist.maestro.yaml" if cold_start and os.path.exists(folder): os.system(f"rm -r {folder}") diff --git a/tests/NEO_workflow.py b/tests/NEO_workflow.py new file mode 100644 index 00000000..35fef115 --- /dev/null +++ b/tests/NEO_workflow.py @@ -0,0 +1,35 @@ +import os +import numpy as np +from mitim_tools.gacode_tools import NEOtools +from mitim_tools import __mitimroot__ + +cold_start = True + +(__mitimroot__ / 'tests' / 'scratch').mkdir(parents=True, exist_ok=True) + +folder = __mitimroot__ / "tests" / "scratch" / "neo_test" +input_gacode = __mitimroot__ / "tests" / "data" / "input.gacode" + +if cold_start and folder.exists(): + os.system(f"rm -r {folder.resolve()}") + +neo = NEOtools.NEO(rhos=np.linspace(0.1,0.95,5)) +neo.prep(input_gacode, folder) + +neo.run('neo1/', cold_start=cold_start) +neo.read('NEO default') + +neo.run('neo2/', cold_start=cold_start, extraOptions={'N_ENERGY':5,'N_XI': 11, 'N_THETA': 11}) +neo.read('NEO low res') + +neo.run('neo3/', cold_start=cold_start, extraOptions={'N_ENERGY':5,'N_XI': 11, 'N_THETA': 11}, multipliers={'DLNTDR_1': 1.5}) +neo.read('NEO low res + 50% aLTe') + +neo.plot(labels=['NEO default', 'NEO low res', 'NEO low res + 50% aLTe']) + +neo.run_scan('scan1', cold_start=cold_start, variable='DLNTDR_1', varUpDown=np.linspace(0.5, 1.5, 4)) +neo.read_scan(label='scan1',variable = 'DLNTDR_1') +neo.plot_scan(labels=['scan1'], fn = neo.fn) + +neo.fn.show() +neo.fn.close() diff --git a/tests/OPT_workflow.py b/tests/OPT_workflow.py index d05bcf53..f6118a9d 100644 --- a/tests/OPT_workflow.py +++ b/tests/OPT_workflow.py @@ -62,7 +62,7 @@ def scalarized_objective(self, Y): # ----- Inputs # ----------------------------------------------------------------------------------------------------- -namelist = __mitimroot__ / "templates" / "main.namelist.json" +namelist = __mitimroot__ / "templates" / "namelist.optimization.yaml" folderWork = __mitimroot__ / "tests" / "scratch" / "opt_test" if cold_start and os.path.exists(folderWork): @@ -75,7 +75,7 @@ def scalarized_objective(self, Y): # Initialize class opt_fun1D = opt_class(folderWork, namelist) -# Changes to namelist in templates/main.namelist.json +# Changes to namelist in templates/namelist.optimization.yaml opt_fun1D.optimization_options["initialization_options"]["initial_training"] = 2 # Initialize BO framework diff --git a/tests/PORTALS_workflow.py b/tests/PORTALS_workflow.py index 427a935d..79f5f296 100644 --- a/tests/PORTALS_workflow.py +++ b/tests/PORTALS_workflow.py @@ -17,32 +17,32 @@ if cold_start and folderWork.exists(): os.system(f"rm -r {folderWork.resolve()}") -# Let's not consume the entire computer resources when running test... limit threads -torch.set_num_threads(8) - -# -------------------------------------------------------------------------------------------- +# --------------------------------------------------------------------------------------------------------------------- # Optimization Class -# -------------------------------------------------------------------------------------------- +# --------------------------------------------------------------------------------------------------------------------- -# Initialize class +# Initialize class with the default namelist in templates/namelist.portals.yaml but modify some of its parameters portals_fun = PORTALSmain.portals(folderWork) -portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 2 -portals_fun.optimization_options["initialization_options"]["initial_training"] = 3 -portals_fun.MODELparameters["RhoLocations"] = [0.25, 0.45, 0.65, 0.85] -portals_fun.MODELparameters['ProfilesPredicted'] = ["te", "ti", "ne", "nZ", 'w0'] -portals_fun.PORTALSparameters['ImpurityOfInterest'] = 'N' -portals_fun.PORTALSparameters['surrogateForTurbExch'] = True -portals_fun.INITparameters["removeFast"] = True -portals_fun.INITparameters["quasineutrality"] = True -portals_fun.INITparameters["sameDensityGradients"] = True -portals_fun.MODELparameters["transport_model"]["TGLFsettings"] = 2 +portals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 1 +portals_fun.optimization_options["initialization_options"]["initial_training"] = 2 + +portals_fun.portals_parameters["solution"]['turbulent_exchange_as_surrogate'] = True + +portals_fun.portals_parameters["solution"]["predicted_rho"] = [0.25, 0.45, 0.65, 0.85] +portals_fun.portals_parameters["solution"]["predicted_channels"] = ["te", "ti", "ne", "nZ", 'w0'] +portals_fun.portals_parameters["solution"]["trace_impurity"] = 'N' +portals_fun.portals_parameters["transport"]["options"]["tglf"]["run"]["code_settings"] = "SAT0" + +# Prepare case to run +plasma_state = PROFILEStools.gacode_state(inputgacode) +plasma_state.correct(options={"recalculate_ptot": True, "remove_fast": True, "quasineutrality": True, "enforce_same_aLn": True}) # Prepare run -portals_fun.prep(inputgacode) +portals_fun.prep(plasma_state) -# -------------------------------------------------------------------------------------------- +# --------------------------------------------------------------------------------------------------------------------- # Run -# -------------------------------------------------------------------------------------------- +# --------------------------------------------------------------------------------------------------------------------- # Run mitim_bo = STRATEGYtools.MITIM_BO(portals_fun, cold_start=cold_start, askQuestions=False) @@ -52,12 +52,12 @@ portals_fun.plot_optimization_results(analysis_level=4) # For fun and to show capabilities, let's do a flux match of the current surrogates and plot in the same notebook -# PORTALSoptimization.flux_match_surrogate( -# mitim_bo.steps[-1],PROFILEStools.PROFILES_GACODE(inputgacode), -# fn = portals_fun.fn, -# plot_results = True, -# keep_within_bounds = False -# ) +PORTALSoptimization.flux_match_surrogate( + mitim_bo.steps[-1],PROFILEStools.gacode_state(inputgacode), + fn = portals_fun.fn, + plot_results = True, + keep_within_bounds = False + ) # Required if running in non-interactive mode portals_fun.fn.show() diff --git a/tests/POWERTORCH_workflow.py b/tests/POWERTORCH_workflow.py index 6175176f..ac093fcf 100644 --- a/tests/POWERTORCH_workflow.py +++ b/tests/POWERTORCH_workflow.py @@ -3,19 +3,19 @@ import numpy as np from mitim_tools.gacode_tools import PROFILEStools from mitim_modules.powertorch import STATEtools -from mitim_modules.powertorch.physics import TRANSPORTtools +from mitim_modules.powertorch.physics_models import transport_analytic from mitim_tools import __mitimroot__ # Inputs -inputgacode = PROFILEStools.PROFILES_GACODE(__mitimroot__ / "tests" / "data" / "input.gacode") +inputgacode = PROFILEStools.gacode_state(__mitimroot__ / "tests" / "data" / "input.gacode") rho = torch.from_numpy(np.linspace(0.1,0.9,9)).to(dtype=torch.double) s = STATEtools.powerstate(inputgacode, - EvolutionOptions = { 'ProfilePredicted': ['te', 'ti'], + evolution_options = { 'ProfilePredicted': ['te', 'ti'], 'rhoPredicted': rho }, - TransportOptions = { 'transport_evaluator': TRANSPORTtools.diffusion_model, - 'ModelOptions': { + transport_options = { 'transport_evaluator': transport_analytic.diffusion_model, + "options": { 'chi_e': torch.ones(rho.shape[0]).to(rho)*0.8, 'chi_i': torch.ones(rho.shape[0]).to(rho)*1.2 } diff --git a/tests/TGLF_workflow.py b/tests/TGLF_workflow.py index 5c912513..439ab26a 100644 --- a/tests/TGLF_workflow.py +++ b/tests/TGLF_workflow.py @@ -13,31 +13,43 @@ os.system(f"rm -r {folder.resolve()}") tglf = TGLFtools.TGLF() -tglf.prep_from_tglf(folder, input_tglf) +tglf.prep_from_file(folder, input_tglf) tglf.run( - subFolderTGLF="run1/", - TGLFsettings=None, + "run1/", + code_settings='SAT1', cold_start=cold_start, + runWaveForms = [0.67, 10.0], forceIfcold_start=True, extraOptions={"USE_BPER": False, "USE_BPAR": False}, slurm_setup={"cores": 4, "minutes": 1}, ) -tglf.read(label="ES") +tglf.read(label="ES (SAT1)") tglf.run( - subFolderTGLF="run2/", - TGLFsettings=None, + "run2/", + code_settings='SAT1', cold_start=cold_start, forceIfcold_start=True, extraOptions={"USE_BPER": True, "USE_BPAR": True}, slurm_setup={"cores": 4, "minutes": 1}, ) -tglf.read(label="EM") +tglf.read(label="EM (SAT1)") -tglf.plot(labels=["EM","ES"]) +tglf.run( + "run3/", + code_settings='SAT3', + cold_start=cold_start, + forceIfcold_start=True, + extraOptions={"USE_BPER": True, "USE_BPAR": True}, + slurm_setup={"cores": 4, "minutes": 1}, +) + +tglf.read(label="EM (SAT3)") + +tglf.plot(labels=["EM (SAT1)","ES (SAT1)", "EM (SAT3)"]) # Required if running in non-interactive mode tglf.fn.show() \ No newline at end of file diff --git a/tests/TGLFfull_workflow.py b/tests/TGLFfull_workflow.py index 88789a4f..5c7cf628 100644 --- a/tests/TGLFfull_workflow.py +++ b/tests/TGLFfull_workflow.py @@ -18,11 +18,11 @@ os.system(f"rm -r {folder}") tglf = TGLFtools.TGLF(cdf=cdf_file, time=2.5, avTime=0.02, rhos=np.array([0.6, 0.8])) -_ = tglf.prep(folder, cold_start=cold_start) +_ = tglf.prep_using_tgyro(folder, cold_start=cold_start) tglf.run( - subFolderTGLF="runSAT2", - TGLFsettings=5, + subfolder="runSAT2", + code_settings=5, runWaveForms=[0.1,0.3], cold_start=cold_start, forceIfcold_start=True, @@ -30,8 +30,8 @@ tglf.read(label="runSAT2", d_perp_cm={0.6: 0.5, 0.8: 0.5}) tglf.run( - subFolderTGLF="runSAT0", - TGLFsettings=2, + subfolder="runSAT0", + code_settings=2, runWaveForms=[0.5], cold_start=cold_start, forceIfcold_start=True, @@ -39,8 +39,8 @@ tglf.read(label="runSAT0", d_perp_cm={0.6: 0.5, 0.8: 0.5}) tglf.run( - subFolderTGLF="runSAT3", - TGLFsettings=6, + subfolder="runSAT3", + code_settings=6, runWaveForms=[0.5], cold_start=cold_start, forceIfcold_start=True, diff --git a/tests/TGLFscan_workflow.py b/tests/TGLFscan_workflow.py index 5cf22f23..1b991ffc 100644 --- a/tests/TGLFscan_workflow.py +++ b/tests/TGLFscan_workflow.py @@ -14,23 +14,25 @@ os.system(f"rm -r {folder.resolve()}") tglf = TGLFtools.TGLF(rhos=[0.5, 0.7]) -tglf.prep(folder, inputgacode=input_gacode, cold_start=cold_start) +tglf.prep(input_gacode,folder, cold_start=cold_start) -tglf.runScan( subFolderTGLF = 'scan1', - TGLFsettings = None, +tglf.run_scan( subfolder = 'scan1', + code_settings = None, + extraOptions = {"USE_BPER": [False, True]}, # extraOptions can receive a list to provide different values per rho cold_start = cold_start, runWaveForms = [0.67, 10.0], variable = 'RLTS_1', - varUpDown = np.linspace(0.5,1.5,16)) -tglf.readScan(label='scan1',variable = 'RLTS_1') + varUpDown = np.linspace(0.5,1.5,4)) +tglf.read_scan(label='scan1',variable = 'RLTS_1') -tglf.plotScan(labels=['scan1']) +tglf.plot_scan(labels=['scan1']) tglf.fn.show() tglf.fn.close() tglf.runScanTurbulenceDrives( - subFolderTGLF = 'turb_drives', - TGLFsettings = None, + subfolder = 'turb_drives', + code_settings = None, + resolutionPoints=3, cold_start = cold_start) tglf.plotScanTurbulenceDrives(label='turb_drives') diff --git a/tests/TGYRO_workflow.py b/tests/TGYRO_workflow.py index 2f6f2cd7..b2ec7557 100644 --- a/tests/TGYRO_workflow.py +++ b/tests/TGYRO_workflow.py @@ -18,7 +18,7 @@ if cold_start and folder.exists(): os.system(f"rm -r {folder.resolve()}") -profiles = PROFILEStools.PROFILES_GACODE(gacode_file) +profiles = PROFILEStools.gacode_state(gacode_file) tgyro = TGYROtools.TGYRO() tgyro.prep(folder, profilesclass_custom=profiles, cold_start=True, forceIfcold_start=True) diff --git a/tests/VITALS_workflow.py b/tests/VITALS_workflow.py index df0ebf1f..36d72caf 100644 --- a/tests/VITALS_workflow.py +++ b/tests/VITALS_workflow.py @@ -18,7 +18,7 @@ os.system(f"rm -r {folderWork}") rho = 0.5 -TGLFsettings = 2 +code_settings = 2 dvs = ["RLTS_1", "RLTS_2", "RLNS_1", "ZEFF", "TAUS_2"] ofs = ["Qe", "Qi", "TeFluct", "neTe"] @@ -30,8 +30,8 @@ # ******************************************************************************** tglf = TGLFtools.TGLF(rhos=[rho]) -cdf = tglf.prep(folderWork, cold_start=cold_start, inputgacode=inputgacode) -tglf.run(subFolderTGLF="run_base/", TGLFsettings=TGLFsettings, cold_start=cold_start) +cdf = tglf.prep_using_tgyro(folderWork, cold_start=cold_start, inputgacode=inputgacode) +tglf.run(subfolder="run_base/", code_settings=code_settings, cold_start=cold_start) # ******************************************************************************** # Then, add experimental data of fluctuation information and error bars @@ -70,7 +70,7 @@ vitals_fun = VITALSmain.vitals(folderWork) vitals_fun.optimization_options["convergence_options"]["maximum_iterations"] = 2 -vitals_fun.TGLFparameters["TGLFsettings"] = TGLFsettings +vitals_fun.TGLFparameters["code_settings"] = code_settings vitals_fun.prep(file, rho, ofs, dvs, dvs_min, dvs_max) diff --git a/tests/data/FolderTRANSP/12345X01TR.DAT b/tests/data/FolderTRANSP/12345X01TR.DAT index 7c9d45a5..b4e80213 100644 --- a/tests/data/FolderTRANSP/12345X01TR.DAT +++ b/tests/data/FolderTRANSP/12345X01TR.DAT @@ -33,7 +33,7 @@ tgrid2 = 1e-3 ! Control of time resolution of 2D input data dtmaxg = 0.001 ! Max time step for MHD -!----- MPI Settings +!----- MPI code_settings nbi_pserve =0 ntoric_pserve =1 diff --git a/tutorials/PORTALS_tutorial.py b/tutorials/PORTALS_tutorial.py index ea36678c..36d20804 100644 --- a/tutorials/PORTALS_tutorial.py +++ b/tutorials/PORTALS_tutorial.py @@ -8,38 +8,37 @@ # Starting input.gacode file inputgacode = __mitimroot__ / "tests" / "data" / "input.gacode" -folder = __mitimroot__ / "tests" / "scratch" / "portals_tut" +folder = __mitimroot__ / "tests" / "scratch" / "portals_tutorial" # Initialize PORTALS class portals_fun = PORTALSmain.portals(folder) -# Radial locations (RhoLocations or RoaLocations [last one preceeds]) -portals_fun.MODELparameters["RhoLocations"] = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85] +# Radial locations (predicted_rho or predicted_roa [last one preceeds]) +portals_fun.portals_parameters["solution"]["predicted_rho"] = [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85] # Profiles to predict -portals_fun.MODELparameters["ProfilesPredicted"] = ["te", "ti", "ne"] +portals_fun.portals_parameters["solution"]["predicted_channels"] = ["te", "ti", "ne"] # Codes to use -from mitim_modules.powertorch.physics import TRANSPORTtools -portals_fun.PORTALSparameters["transport_evaluator"] = TRANSPORTtools.tgyro_model +from mitim_modules.powertorch.physics_models.transport_tgyro import tgyro_model +portals_fun.portals_parameters["transport"]["evaluator"] = tgyro_model # TGLF specifications -portals_fun.MODELparameters["transport_model"] = { - "turbulence":'TGLF', - "TGLFsettings": 6, # Check out templates/input.tglf.models.json for more options +portals_fun.portals_parameters["transport"]["options"] = { + "code_settings": 6, # Check out templates/input.tglf.models.yaml for more options "extraOptionsTGLF": {"USE_BPER": False} # Turn off BPER } # Plasma preparation: remove fast species, adjust quasineutrality -portals_fun.INITparameters["removeFast"] = True -portals_fun.INITparameters["quasineutrality"] = True +portals_fun.portals_parameters["initialization"]["remove_fast"] = True +portals_fun.portals_parameters["initialization"]["quasineutrality"] = True # Stopping criterion 1: 100x improvement in residual portals_fun.optimization_options['convergence_options']['stopping_criteria_parameters']["maximum_value"] = 1e-2 portals_fun.optimization_options['convergence_options']['stopping_criteria_parameters']["maximum_value_is_rel"] = True # Prepare run: search +-100% the original gradients -portals_fun.prep(inputgacode, ymax_rel=1.0, ymin_rel=1.0) +portals_fun.prep(inputgacode) # -------------------------------------------------------------------------------------------- # Run (optimization following namelist: templates/main.namelists.json) diff --git a/tutorials/TGLF_tutorial.py b/tutorials/TGLF_tutorial.py index c9927795..501dbfc3 100644 --- a/tutorials/TGLF_tutorial.py +++ b/tutorials/TGLF_tutorial.py @@ -10,7 +10,8 @@ tglf = TGLFtools.TGLF(rhos=[0.5, 0.7]) # Prepare the TGLF class -cdf = tglf.prep(folder, inputgacode=inputgacode_file, cold_start=False) +tglf.prep(inputgacode_file,folder, cold_start=False) + ''' *************************************************************************** @@ -20,8 +21,8 @@ # Run TGLF in subfolder tglf.run( - subFolderTGLF="yes_em_folder", - TGLFsettings=5, + subfolder="yes_em_folder", + code_settings="SAT2em", extraOptions={}, cold_start=False ) @@ -31,8 +32,8 @@ # Run TGLF in a different subfolder with different settings tglf.run( - subFolderTGLF="no_em_folder", - TGLFsettings=5, + subfolder="no_em_folder", + code_settings="SAT2em", extraOptions={"USE_BPER": False}, cold_start=False, ) @@ -49,16 +50,16 @@ *************************************************************************** ''' -tglf.runScan( subFolderTGLF = 'scan1', - TGLFsettings = 5, +tglf.runScan( subfolder = 'scan1', + code_settings = "SAT2em", cold_start = False, variable = 'RLTS_1', varUpDown = np.linspace(0.5,1.5,3)) tglf.readScan(label='scan1',variable = 'RLTS_1') -tglf.runScan( subFolderTGLF = 'scan2', - TGLFsettings = 5, +tglf.runScan( subfolder = 'scan2', + code_settings = "SAT2em", cold_start = False, variable = 'RLTS_2', varUpDown = np.linspace(0.5,1.5,3)) @@ -74,22 +75,22 @@ ''' tglf.runScanTurbulenceDrives( - subFolderTGLF = 'turb_drives', - TGLFsettings = 5, + subfolder = 'turb_drives', + code_settings = "SAT2em", cold_start = False) tglf.plotScanTurbulenceDrives(label='turb_drives') ''' *************************************************************************** -Automatic scan of turbulence drives +Analysis of chi incremental *************************************************************************** ''' tglf.runAnalysis( - subFolderTGLF = 'chi_e', + subfolder = 'chi_e', analysisType = 'chi_e', - TGLFsettings = 5, + code_settings = "SAT2em", cold_start = False, label = 'chi_eu') @@ -103,10 +104,11 @@ for i in[1,2,3,4,5,6]: tglf.run( - subFolderTGLF = f'settings{i}', + subfolder = f'settings{i}', runWaveForms = [0.67], - TGLFsettings = i, + code_settings = i, cold_start = False) tglf.read(label=f'settings{i}') tglf.plot(labels=[f'settings{i}' for i in range(1,6)]) + diff --git a/tutorials/run_slurm_array_tutorial/test_launcher.py b/tutorials/run_slurm_array_tutorial/test_launcher.py new file mode 100644 index 00000000..28c299dd --- /dev/null +++ b/tutorials/run_slurm_array_tutorial/test_launcher.py @@ -0,0 +1,27 @@ +import json +import os +from mitim_tools.opt_tools.scripts.slurm import run_slurm_array +from mitim_tools.opt_tools.scripts.slurm import run_slurm +from mitim_tools import __mitimroot__ +from mitim_tools.misc_tools.CONFIGread import load_settings + +# You have to have a slurm partition specified for your local machine for this to work!! +partition = load_settings()['local']['slurm']['partition'] +print(f"Using partition: {partition}") + + +# code_settings for slurm job +cpus = 2 +hours = 1 +memory = '100GB' +folder = __mitimroot__ / "tutorials" / "run_slurm_array_tutorial" / "scratch" +script = f'python {folder}/../test_script.py {folder} ' + +# Input the array runs +array_input = [62, 63, 81] + +# To use run_slurm_array +run_slurm_array(script, array_input, folder,partition, max_concurrent_jobs = 2, hours=hours,n=cpus,mem=memory) + +# For comparison run_slurm +# run_slurm(f'python test_script.py {folder} 84', '/home/audreysa/test_script/scratch_run_slurm', partition, environment, hours=hours, n=cpus, mem=memory) diff --git a/tutorials/run_slurm_array_tutorial/test_script.py b/tutorials/run_slurm_array_tutorial/test_script.py new file mode 100644 index 00000000..4f347cad --- /dev/null +++ b/tutorials/run_slurm_array_tutorial/test_script.py @@ -0,0 +1,13 @@ +import sys +import os + +folder = str(sys.argv[1]) +# print('Folder:', folder) + + +i = str(sys.argv[2]) +# print('i:', i) + +with open(f"{folder}/file_successfully_created_{i}.txt", "w") as f: + input_text = 'Successfully created file from job #:' + i + f.write(f"{input_text}\n") \ No newline at end of file