Skip to content

Commit f9572be

Browse files
committed
update README.md
1 parent d23c211 commit f9572be

File tree

1 file changed

+36
-44
lines changed

1 file changed

+36
-44
lines changed

README.md

Lines changed: 36 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -85,80 +85,72 @@ Switch to `<address>:<port>/#images` for 2d (and max projections of 3d) tensors
8585
```yaml
8686
toolbox: # defines yaml anchors for convenience (is ignored by hylfm setup)
8787
eval_batch_size: &eval_batch_size 1 # example for yaml placeholders
88+
<key>: <file name.yml> # any subconfig part can be substituted by a yml file in configs/<key>/<file name.yml>
8889

89-
precision: float # model precision
9090
model: # model setup
9191
name: <str> # model name as specified in hyflm.models
9292
kwargs: {...} # model key word arguments
93-
checkpoint: # path to hylfm checkpoint to load instead of random initialization
94-
partial_weights: <bool> #
93+
checkpoint: <str> # path to hylfm checkpoint to load instead of random initialization
94+
partial_weights: <bool> # if true ignore additionl/missing tensors in model checkpoint with different architecture
95+
precision: <float|half> # model precision
9596

9697
stages: # list of stages, each stage may be a training or a test stage, and will be run consecutively
9798
- train: # stage name
9899
optimizer: # stages with 'optimizer' are training stages
99-
name: Adam # optimizer class as specified in hylfm.optimizers
100+
name: <str> # optimizer class as specified in hylfm.optimizers
100101
kwargs: {...}
101102

102103
max_epochs: <int> # stop after max epochs even if validation score is still improving
103104
metrics: [...] # metrics to evaluate (and log according to 'log')
104105
log: # loggers as specified in hylfm.loggers
105106
TqdmLogger: {} # show tqdm progress bar
106107
TensorBoardLogger: # log to TensorBoard event file
107-
scalars_every: {value: 1, unit: epoch} # how often to log scalar metrics and loss
108-
tensors_every: {value: 1, unit: epoch} # how often to log below specified tensors (and plots)
109-
tensor_names: [lf, pred, ls_reg] # names of tensors to be logged as 2d (max projection) images
108+
scalars_every: {value: <int>, unit: <epoch|iteration>} # how often to log scalar metrics and loss
109+
tensors_every: {value: <int>, unit: <epoch|iteration>} # how often to log below specified tensors and plots
110+
tensor_names: [<str>, ...] # names of tensors to be logged as 2d (max projection) images
110111
FileLogger: # individual output files
111-
scalars_every: {value: 1, unit: iteration}
112-
tensors_every: {value: 1, unit: iteration}
113-
tensor_names: [pred, ls_reg] # names of tensors to be logged as .tif files
112+
scalars_every: {value: <int>, unit: <epoch|iteration>}
113+
tensors_every: {value: <int>, unit: <epoch|iteration>}
114+
tensor_names: [<str>, ...] # names of tensors to be logged as .tif files
114115

115-
criterion: # criterion to optimize ('loss')
116-
name: <LossOnTensors child> # `LossOnTensors` child class specified in hylfm.losses
117-
kwargs: {...} # key word arguments
118-
tensor_names: {...} # tensor name mapping
116+
criterion: # criterion to optimize ('loss')
117+
name: <str> # `LossOnTensors` child class specified in hylfm.losses
118+
kwargs: {...} # key word arguments
119+
tensor_names: {...} # tensor name mapping
119120

120-
sampler: # data sampling strategy
121-
base: RandomSampler # `torch.utils.data.sampler.Sampler` child class in torch.utils.data
122-
drop_last: <bool> # drop last samples if less samples than 'batch_size' remain
121+
sampler: # data sampling strategy
122+
base: <str> # `torch.utils.data.sampler.Sampler` child class in torch.utils.data
123+
drop_last: <bool> # drop last samples if less samples than 'batch_size' remain
123124

124-
batch_preprocessing: [...] # List of `Transform` child classes as specified in hylfm.transformations and their kwargs
125+
batch_preprocessing: [...] # List of `Transform` child classes as specified in hylfm.transformations with kwargs
125126
batch_preprocessing_in_step: [...] # like 'batch_preprocessing', but in the iteration step (on GPU)
126-
batch_postprocessing: [...] # like 'batch_preprocessing', but after `model.forward()`
127+
batch_postprocessing: [...] # like 'batch_preprocessing', but after `model.forward()`
127128

128129
data:
129-
- batch_size: <training batch size>
130+
- batch_size: <int>
130131
sample_preprocessing: [...] # like 'batch_preprocessing', but before batch assembly on single sample
131132
datasets: # list of datasets=named tensors
132-
- tensors: {<tensor name>: <info name>, ...} # named tensors, each resolved by `hylfm.datasets.get_tensor_info()`
133-
[sample_transformations: [...]] # overwrites 'sample_preprocessing' for this dataset (optional)
134-
# subselect indices as List[int], single int, or string resolved by `hylfm.setup._utils.indice_string_to_list()`
135-
indices: null # null = "all indices"
136-
137-
validate: # validation stage of this training stage
138-
... # a validation stage is an evaluation stage with the following additional keys:
139-
score_metric: smooth_l1_loss-scaled # metric name (that has to exist in this stage's 'metrics') to use as validation score
140-
period: {value: 1, unit: epoch} # how often to validate wrt to parent training stage
141-
patience: 10 # stop after not improvement of 'score_metric' for 'patience' validations
142-
143-
- test: # stage name of an evaluation stage (no 'optimizer' defined)
144-
metrics: beads.yml # any subconfig part can be substituted by a yml file in configs/<key=metrics>/<file name=beads.yml>
145-
log: {...} # these fields are described above in 'train'
133+
- tensors: {<tensor_name>: <str>, ...} # named tensors, each resolved by `hylfm.datasets.get_tensor_info()`
134+
[sample_transformations: [...]] # overwrites 'sample_preprocessing' for this dataset (optional)
135+
# subselect indices as List[int], int, or string resolved by `hylfm.setup._utils.indice_string_to_list()`
136+
indices: null # null = "all indices"
137+
138+
validate: # validation stage of this training stage
139+
... # a validation stage is an evaluation stage with the following additional keys:
140+
score_metric: <str> # metric name (that has to exist in this stage's 'metrics') to use as validation score
141+
period: {value: <int>, unit: <epoch|iteration>} # how often to validate wrt to parent training stage
142+
patience: <int> # stop after not improvement of 'score_metric' for 'patience' validations
143+
144+
- test: # stage name of an evaluation stage (no 'optimizer' defined)
145+
# the following fields are described above in 'train':
146+
metrics: [...]
147+
log: {...}
146148
batch_preprocessing: [...]
147149
batch_preprocessing_in_step: [...]
148150
batch_postprocessing: [...]
149151
data: [...]
150152
```
151153
152154
153-
#### Logging
154-
How and if to log to TensorBoard and/or individual output files, as well as if to display a tqdm-progress bar is configurable in the `log` field for each `stage`:
155-
```yaml
156-
stages:
157-
- test:
158-
...
159-
160-
```
161-
162-
163155
## Settings
164156
To overwrite default settings, like the number of worker threads per pytorch Dataloader, adapt `hylfm/_settings/local.py` (copy from `hylfm/_settings/local.template.py`)

0 commit comments

Comments
 (0)