|
4 | 4 | cmor:
|
5 | 5 | # If test true it will just run the setup but not launch the job automatically
|
6 | 6 | test: false
|
7 |
| - appdir: /g/data/ua8/Working/packages/ACCESS-MOPPeR |
| 7 | + # working directory if default uses current directory |
| 8 | + appdir: default |
8 | 9 | # output directory for all generated data (CMORISED files & logs)
|
9 | 10 | # if default it is set to /scratch/$project/$user/MOPPER_OUTPUT<exp>
|
10 | 11 | outpath: default
|
|
29 | 30 | access_version: CM2
|
30 | 31 | # reference date for time units (set as 'default' to use start_date)
|
31 | 32 | reference_date: 1970-01-01
|
| 33 | + # Path and file templates can be changed based on the experiment. |
| 34 | + # The example below should be considered a minimum requirement. |
| 35 | + # Consider adding 'table_id" if using the "all tables" option to list |
| 36 | + # the variables to process as variables can be present at same frequency |
| 37 | + # in more than one tables |
32 | 38 | path_template: "{product_version}/{frequency}"
|
33 | 39 | # date_range is automatically added at the end of filename
|
34 | 40 | file_template: "{variable_id}_{source_id}_{experiment_id}_{frequency}"
|
|
41 | 47 | shuffle: 1
|
42 | 48 | # Variables to CMORise:
|
43 | 49 | # CMOR table/variable to process; default is 'all'.
|
| 50 | + # 'all' will use all the tables listed in the mapping file |
44 | 51 | # Or create a yaml file listing variables to process (VAR_SUBSET[_LIST]).
|
45 | 52 | # each line: <table: [var1, var2, var3 ..]>
|
46 | 53 | tables: CMIP6_Amon
|
@@ -81,16 +88,21 @@ cmor:
|
81 | 88 | _AXIS_ENTRY_FILE: "ACDD_coordinate.json"
|
82 | 89 | _FORMULA_VAR_FILE: "ACDD_formula_terms.json"
|
83 | 90 | grids: "ACDD_grids.json"
|
84 |
| - # Additional NCI information: |
| 91 | +# Additional NCI information: |
85 | 92 | # NCI project to charge compute; $PROJECT = your default project
|
86 | 93 | project: v45
|
87 |
| - # additional NCI projects to be included in the storage flags |
| 94 | + # additional NCI projects to be included in the storage flags, comma separated list |
88 | 95 | addprojs: []
|
89 | 96 | # queue and memory (GB) per CPU (depends on queue),
|
90 |
| - # hugemem is reccomended for high reoslution data and/or derived variables |
| 97 | + # hugemem is recommended for high reoslution data and/or derived variables |
91 | 98 | # hugemem requires a minimum of 6 cpus this is handled by the code
|
92 | 99 | queue: hugemem
|
93 | 100 | mem_per_cpu: 32
|
| 101 | + max_cpus: 24 |
| 102 | + # Mopper uses multiprocessing to produce files in parallel, usually 1 cpu per worker |
| 103 | + # is a good compromise, occasionally you might want to pass a higher number |
| 104 | + # if running out of memory |
| 105 | + cpuxworker: 1 |
94 | 106 | # walltime in "hh:mm:ss"
|
95 | 107 | walltime: '8:00:00'
|
96 | 108 | mode: custom
|
|
99 | 111 | # you can override that by supplying the env to pass to "source"
|
100 | 112 | # Ex
|
101 | 113 | # conda_env: <custom-env-path>/bin/activate
|
102 |
| - # or you can set "test: true" and modify mopper_job.sh manually |
| 114 | + # to allow other settings use "test: true" and modify mopper_job.sh manually |
103 | 115 | conda_env: default
|
104 | 116 |
|
105 | 117 | #
|
@@ -174,4 +186,4 @@ attrs:
|
174 | 186 | parent: !!bool false
|
175 | 187 | # CMOR will add a tracking_id if you want to define a prefix add here
|
176 | 188 | tracking_id_prefix:
|
177 |
| - comment: "post-processed using ACCESS-MOPPeR v1.0.0 https://doi.org/10.5281/zenodo.12747219" |
| 189 | + comment: "post-processed using ACCESS-MOPPeR v1.1.0 https://doi.org/10.5281/zenodo.13841181" |
0 commit comments