Skip to content

Commit 30c106f

Browse files
authored
Merge branch 'main' into train-continue
2 parents e043201 + 8330bab commit 30c106f

File tree

13 files changed

+1182
-1134
lines changed

13 files changed

+1182
-1134
lines changed

.github/workflows/metrics.yml

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ on:
1111
- main
1212
- develop
1313
workflow_dispatch:
14+
1415
jobs:
1516
metrics:
1617
runs-on: ubuntu-latest # container: docker://ghcr.io/iterative/cml:0-dvc2-base1
@@ -19,24 +20,32 @@ jobs:
1920
uses: actions/checkout@v3
2021
with:
2122
ref: ${{ github.event.pull_request.head.sha }}
23+
2224
- name: Install Python 3.12
2325
uses: actions/setup-python@v5
2426
with:
2527
python-version: "3.12"
28+
2629
- name: Setup NodeJS (for CML)
2730
uses: actions/setup-node@v3 # For CML
2831
with:
2932
node-version: '16'
33+
3034
- name: Setup CML
3135
uses: iterative/setup-cml@v1
36+
3237
- name: Install Poetry
3338
uses: snok/install-poetry@v1
39+
3440
- name: Install Dependencies
3541
run: poetry install --no-interaction --no-root --with=pytest,metrics --without=dev,docs,linters
42+
3643
- name: Install Project
3744
run: poetry install --no-interaction --with=pytest,metrics --without=dev,docs,linters
45+
3846
- name: Train model
3947
run: poetry run pytest tests/test_model_performance.py -n 1 --durations=0
48+
4049
- name: Download metrics from main
4150
uses: dawidd6/action-download-artifact@v2
4251
with:
@@ -45,28 +54,40 @@ jobs:
4554
name: metrics
4655
path: tests/metrics-main/
4756
if_no_artifact_found: warn
57+
4858
- name: Open Benchmark Report
4959
run: echo "## Model Benchmark" >> report.md
60+
5061
- name: Write Benchmark Report
5162
run: poetry run python tests/metrics/compareMetrics.py >> report.md
63+
5264
- name: Publish Report with CML
5365
env:
5466
REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
5567
run: |
56-
echo "<details>\n<summary>Model training plots</summary>\n" >> report.md
68+
echo "<details><summary>Model training plots</summary>" >> report.md
69+
echo "" >> report.md
5770
echo "## Model Training" >> report.md
71+
echo "" >> report.md
5872
echo "### PeytonManning" >> report.md
5973
cml asset publish tests/metrics/PeytonManning.svg --md >> report.md
74+
echo "" >> report.md
6075
echo "### YosemiteTemps" >> report.md
6176
cml asset publish tests/metrics/YosemiteTemps.svg --md >> report.md
77+
echo "" >> report.md
6278
echo "### AirPassengers" >> report.md
6379
cml asset publish tests/metrics/AirPassengers.svg --md >> report.md
80+
echo "" >> report.md
6481
echo "### EnergyPriceDaily" >> report.md
6582
cml asset publish tests/metrics/EnergyPriceDaily.svg --md >> report.md
66-
echo "\n</details>" >> report.md
83+
echo "" >> report.md
84+
echo "</details>" >> report.md
85+
echo "" >> report.md
6786
cml comment update --target=pr report.md # Post reports as comments in GitHub PRs
6887
cml check create --title=ModelReport report.md # update status of check in PR
88+
6989
- name: Upload metrics if on main
90+
if: github.ref == 'refs/heads/main'
7091
uses: actions/upload-artifact@v3
7192
with:
7293
name: metrics

neuralprophet/components/future_regressors/neural_nets.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,16 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
2121
if self.regressors_dims is not None:
2222
# Regresors params
2323
self.regressor_nets = nn.ModuleDict({})
24-
# TO DO: if no hidden layers, then just a as legacy
25-
self.d_hidden_regressors = config.d_hidden
26-
self.num_hidden_layers_regressors = config.num_hidden_layers
24+
self.regressors_layers = config.regressors_layers
2725
# one net per regressor. to be adapted to combined network
2826
for regressor in self.regressors_dims.keys():
2927
# Nets for both additive and multiplicative regressors
3028
regressor_net = nn.ModuleList()
3129
# This will be later 1 + static covariates
3230
d_inputs = 1
33-
for i in range(self.num_hidden_layers_regressors):
34-
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
35-
d_inputs = self.d_hidden_regressors
31+
for d_hidden_i in self.regressors_layers:
32+
regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True))
33+
d_inputs = d_hidden_i
3634
# final layer has input size d_inputs and output size equal to no. of quantiles
3735
regressor_net.append(nn.Linear(d_inputs, len(self.quantiles), bias=False))
3836
for lay in regressor_net:
@@ -79,7 +77,7 @@ def regressor(self, regressor_input, name):
7977
Forecast component of dims (batch, n_forecasts, num_quantiles)
8078
"""
8179
x = regressor_input
82-
for i in range(self.num_hidden_layers_regressors + 1):
80+
for i in range(len(self.regressors_layers) + 1):
8381
if i > 0:
8482
x = nn.functional.relu(x)
8583
x = self.regressor_nets[name][i](x)

neuralprophet/components/future_regressors/shared_neural_nets.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,16 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
2121
if self.regressors_dims is not None:
2222
# Regresors params
2323
self.regressor_nets = nn.ModuleDict({})
24-
# TO DO: if no hidden layers, then just a as legacy
25-
self.d_hidden_regressors = config.d_hidden
26-
self.num_hidden_layers_regressors = config.num_hidden_layers
24+
self.regressors_layers = config.regressors_layers
2725
# Combined network
2826
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items():
2927
# Nets for both additive and multiplicative regressors
3028
regressor_net = nn.ModuleList()
3129
# This will be later size_i(1 + static covariates)
3230
d_inputs = size_i
33-
for i in range(self.num_hidden_layers_regressors):
34-
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
35-
d_inputs = self.d_hidden_regressors
31+
for d_hidden_i in self.regressors_layers:
32+
regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True))
33+
d_inputs = d_hidden_i
3634
# final layer has input size d_inputs and output size equal to no. of quantiles
3735
regressor_net.append(nn.Linear(d_inputs, len(self.quantiles), bias=False))
3836
for lay in regressor_net:
@@ -81,7 +79,7 @@ def regressors(self, regressor_inputs, mode):
8179
Forecast component of dims (batch, n_forecasts, num_quantiles)
8280
"""
8381
x = regressor_inputs
84-
for i in range(self.num_hidden_layers_regressors + 1):
82+
for i in range(len(self.regressors_layers) + 1):
8583
if i > 0:
8684
x = nn.functional.relu(x)
8785
x = self.regressor_nets[mode][i](x)

neuralprophet/components/future_regressors/shared_neural_nets_coef.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,16 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
2121
if self.regressors_dims is not None:
2222
# Regresors params
2323
self.regressor_nets = nn.ModuleDict({})
24-
# TO DO: if no hidden layers, then just a as legacy
25-
self.d_hidden_regressors = config.d_hidden
26-
self.num_hidden_layers_regressors = config.num_hidden_layers
24+
self.regressors_layers = config.regressors_layers
2725
# Combined network
2826
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items():
2927
# Nets for both additive and multiplicative regressors
3028
regressor_net = nn.ModuleList()
3129
# This will be later size_i(1 + static covariates)
3230
d_inputs = size_i
33-
for i in range(self.num_hidden_layers_regressors):
34-
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
35-
d_inputs = self.d_hidden_regressors
31+
for d_hidden_i in self.regressors_layers:
32+
regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True))
33+
d_inputs = d_hidden_i
3634
# final layer has input size d_inputs and output size equal to no. of quantiles
3735
regressor_net.append(nn.Linear(d_inputs, size_i * len(self.quantiles), bias=False))
3836
for lay in regressor_net:
@@ -82,7 +80,7 @@ def regressors(self, regressor_inputs, mode):
8280
Forecast component of dims (batch, n_forecasts, num_quantiles)
8381
"""
8482
x = regressor_inputs
85-
for i in range(self.num_hidden_layers_regressors + 1):
83+
for i in range(len(self.regressors_layers) + 1):
8684
if i > 0:
8785
x = nn.functional.relu(x)
8886
x = self.regressor_nets[mode][i](x)

neuralprophet/configure.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def set_lr_finder_args(self, dataset_size, num_batches):
243243
Set the lr_finder_args.
244244
This is the range of learning rates to test.
245245
"""
246-
num_training = 150 + int(np.log10(100 + dataset_size) * 25)
246+
num_training = 100 + int(np.log10(dataset_size) * 20)
247247
if num_batches < num_training:
248248
log.warning(
249249
f"Learning rate finder: The number of batches ({num_batches}) is too small than the required number \
@@ -252,7 +252,7 @@ def set_lr_finder_args(self, dataset_size, num_batches):
252252
# num_training = num_batches
253253
self.lr_finder_args.update(
254254
{
255-
"min_lr": 1e-6,
255+
"min_lr": 1e-7,
256256
"max_lr": 10,
257257
"num_training": num_training,
258258
"early_stop_threshold": None,
@@ -516,8 +516,8 @@ class Regressor:
516516
@dataclass
517517
class ConfigFutureRegressors:
518518
model: str
519-
d_hidden: int
520-
num_hidden_layers: int
519+
regressors_layers: Optional[List[int]]
520+
521521
regressors: OrderedDict = field(init=False) # contains RegressorConfig objects
522522

523523
def __post_init__(self):

neuralprophet/forecaster.py

Lines changed: 8 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -199,13 +199,10 @@ class NeuralProphet:
199199
* ``shared_neural_nets``
200200
* ``shared_neural_nets_coef``
201201
202-
future_regressors_d_hidden: int
203-
Number of hidden layers in the neural network model for future regressors.
204-
Ignored if ``future_regressors_model`` is ``linear``.
202+
future_regressors_layers: list of int
203+
list of hidden layer dimensions of the future regressor nets. Specifies number of hidden layers (number of entries)
204+
and layer dimension (list entry). Default [] (no hidden layers)
205205
206-
future_regressors_num_hidden_layers: int
207-
Dimension of hidden layers in the neural network model for future regressors.
208-
Ignored if ``future_regressors_model`` is ``linear``.
209206
210207
COMMENT
211208
AR Config
@@ -438,8 +435,7 @@ def __init__(
438435
season_global_local: np_types.SeasonGlobalLocalMode = "global",
439436
seasonality_local_reg: Optional[Union[bool, float]] = False,
440437
future_regressors_model: np_types.FutureRegressorsModel = "linear",
441-
future_regressors_d_hidden: int = 4,
442-
future_regressors_num_hidden_layers: int = 2,
438+
future_regressors_layers: Optional[list] = [],
443439
n_forecasts: int = 1,
444440
n_lags: int = 0,
445441
ar_layers: Optional[list] = [],
@@ -557,8 +553,7 @@ def __init__(
557553
self.config_lagged_regressors: Optional[configure.ConfigLaggedRegressors] = None
558554
self.config_regressors = configure.ConfigFutureRegressors(
559555
model=future_regressors_model,
560-
d_hidden=future_regressors_d_hidden,
561-
num_hidden_layers=future_regressors_num_hidden_layers,
556+
regressors_layers=future_regressors_layers,
562557
) # Optional[configure.ConfigFutureRegressors] = None
563558

564559
# set during fit()
@@ -2864,13 +2859,12 @@ def _train(
28642859
lr_finder = tuner.lr_find(
28652860
model=self.model,
28662861
train_dataloaders=train_loader,
2867-
val_dataloaders=val_loader,
2862+
# val_dataloaders=val_loader, # not be used, but may lead to Lightning bug if not provided
28682863
**self.config_train.lr_finder_args,
28692864
)
28702865
# Estimate the optimal learning rate from the loss curve
28712866
assert lr_finder is not None
2872-
_, _, lr_suggestion = utils.smooth_loss_and_suggest(lr_finder.results)
2873-
self.model.learning_rate = lr_suggestion
2867+
_, _, self.model.learning_rate = utils.smooth_loss_and_suggest(lr_finder)
28742868
start = time.time()
28752869
self.trainer.fit(
28762870
self.model,
@@ -2891,8 +2885,7 @@ def _train(
28912885
)
28922886
assert lr_finder is not None
28932887
# Estimate the optimal learning rate from the loss curve
2894-
_, _, lr_suggestion = utils.smooth_loss_and_suggest(lr_finder.results)
2895-
self.model.learning_rate = lr_suggestion
2888+
_, _, self.model.learning_rate = utils.smooth_loss_and_suggest(lr_finder)
28962889
start = time.time()
28972890
self.trainer.fit(
28982891
self.model,

0 commit comments

Comments
 (0)