Skip to content

Commit d4dffe9

Browse files
alfonsogarciadecorralourownstoryleoniewgnr
authored
[Major] Glocal Modelling v2 (#1008)
* seasonality * plotting seasonalities fixed * glocal trend implemented * glocal trend implemented * black * black * starting glocal trend * modular code + seasonality with diff global/locals + glocal trend v1 done * Individual neural nets for future regressors almost done. some tests failing * Individual neural nets for future regressors done * shared neural networks for future regressors component * local seasonality can now be regularised by global seasonality * typo. using trend config property on seasonalityconfig * removing variables used for dev * changing names. Final tests premerging * Update neural_nets.py * black * debug yos * run yos on main * fix double compute of AR components * update debug notebooks --------- Co-authored-by: Oskar Triebe <ourownstory@users.noreply.github.com> Co-authored-by: leoniewgnr <42536262+leoniewgnr@users.noreply.github.com>
1 parent a99059a commit d4dffe9

31 files changed

+28175
-110
lines changed

docs/source/conf.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,11 @@
1414

1515
import os
1616
import sys
17+
from typing import Any, Dict
1718

1819
import sphinx_fontawesome # noqa: F401
1920
from sphinx.ext.autodoc import between
2021

21-
from typing import Any, Dict
22-
2322
# sys.path.insert(0, os.path.abspath('.'))
2423
sys.path.insert(0, os.path.abspath("../.."))
2524

docs/source/how-to-guides/feature-guides/global_local_modeling.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,7 @@
787787
"source": [
788788
"m = NeuralProphet(\n",
789789
" trend_global_local=\"local\",\n",
790-
" season_global_local=\"local\",\n",
790+
" season_global_localcal\",\n",
791791
" changepoints_range=0.8,\n",
792792
" epochs=20,\n",
793793
" trend_reg=5,\n",

docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb

Lines changed: 3795 additions & 0 deletions
Large diffs are not rendered by default.

docs/source/how-to-guides/feature-guides/glocal_trend.ipynb

Lines changed: 1728 additions & 0 deletions
Large diffs are not rendered by default.

docs/source/tutorials/tutorial09.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,7 @@
389389
"source": [
390390
"m = NeuralProphet(\n",
391391
" trend_global_local=\"local\",\n",
392-
" season_global_local=\"local\",\n",
392+
" seasonality_global_local=\"local\",\n",
393393
")\n",
394394
"m.set_plotting_backend(\"plotly-static\")\n",
395395
"metrics = m.fit(df, freq=\"H\")\n",

neuralprophet/components/future_regressors/linear.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import torch
22
import torch.nn as nn
33

4+
from neuralprophet import utils
45
from neuralprophet.components.future_regressors import FutureRegressors
56
from neuralprophet.utils_torch import init_parameter
67

@@ -17,6 +18,7 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
1718
device=device,
1819
config_trend_none_bool=config_trend_none_bool,
1920
)
21+
2022
if self.regressors_dims is not None:
2123
# Regresors params
2224
self.regressor_params = nn.ParameterDict(
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
from collections import OrderedDict
2+
3+
import torch.nn as nn
4+
5+
from neuralprophet.components.future_regressors import FutureRegressors
6+
from neuralprophet.utils_torch import init_parameter, interprete_model
7+
8+
# from neuralprophet.utils_torch import init_parameter
9+
10+
11+
class NeuralNetsFutureRegressors(FutureRegressors):
12+
def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool):
13+
super().__init__(
14+
config=config,
15+
n_forecasts=n_forecasts,
16+
quantiles=quantiles,
17+
id_list=id_list,
18+
device=device,
19+
config_trend_none_bool=config_trend_none_bool,
20+
)
21+
if self.regressors_dims is not None:
22+
# Regresors params
23+
self.regressor_nets = nn.ModuleDict({})
24+
# TO DO: if no hidden layers, then just a as legacy
25+
self.d_hidden_regressors = config.d_hidden
26+
self.num_hidden_layers_regressors = config.num_hidden_layers
27+
# one net per regressor. to be adapted to combined network
28+
for regressor in self.regressors_dims.keys():
29+
# Nets for both additive and multiplicative regressors
30+
regressor_net = nn.ModuleList()
31+
# This will be later 1 + static covariates
32+
d_inputs = 1
33+
for i in range(self.num_hidden_layers_regressors):
34+
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
35+
d_inputs = self.d_hidden_regressors
36+
# final layer has input size d_inputs and output size equal to no. of forecasts * no. of quantiles
37+
regressor_net.append(nn.Linear(d_inputs, self.n_forecasts * len(self.quantiles), bias=False))
38+
for lay in regressor_net:
39+
nn.init.kaiming_normal_(lay.weight, mode="fan_in")
40+
self.regressor_nets[regressor] = regressor_net
41+
42+
def get_reg_weights(self, name):
43+
"""
44+
Get attributions of regressors component network w.r.t. the model input.
45+
46+
Parameters
47+
----------
48+
name : string
49+
Regressor name
50+
51+
Returns
52+
-------
53+
torch.tensor
54+
Weight corresponding to the given regressor
55+
"""
56+
57+
reg_attributions = interprete_model(
58+
self,
59+
net="regressor_nets",
60+
forward_func="regressor",
61+
_num_in_features=self.regressor_nets[name][0].in_features,
62+
_num_out_features=self.regressor_nets[name][-1].out_features,
63+
additional_forward_args=name,
64+
)
65+
66+
return reg_attributions
67+
68+
def regressor(self, regressor_input, name):
69+
"""Compute single regressor component.
70+
Parameters
71+
----------
72+
regressor_input : torch.Tensor, float
73+
regressor values at corresponding, dims: (batch, n_forecasts, 1)
74+
nam : str
75+
Name of regressor, for attribution to corresponding model weights
76+
Returns
77+
-------
78+
torch.Tensor
79+
Forecast component of dims (batch, n_forecasts, num_quantiles)
80+
"""
81+
x = regressor_input
82+
for i in range(self.num_hidden_layers_regressors + 1):
83+
if i > 0:
84+
x = nn.functional.relu(x)
85+
x = self.regressor_nets[name][i](x)
86+
87+
# segment the last dimension to match the quantiles
88+
x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles))
89+
return x
90+
91+
def all_regressors(self, regressor_inputs, mode):
92+
"""Compute all regressors components.
93+
Parameters
94+
----------
95+
regressor_inputs : torch.Tensor, float
96+
regressor values at corresponding, dims: (batch, n_forecasts, num_regressors)
97+
Returns
98+
-------
99+
torch.Tensor
100+
Forecast component of dims (batch, n_forecasts, num_quantiles)
101+
"""
102+
# Select only elements from OrderedDict that have the value mode == 'mode_of_interest'
103+
regressors_dims_filtered = OrderedDict((k, v) for k, v in self.regressors_dims.items() if v["mode"] == mode)
104+
for i, name in enumerate(regressors_dims_filtered.keys()):
105+
regressor_index = regressors_dims_filtered[name]["regressor_index"]
106+
regressor_input = regressor_inputs[:, :, regressor_index].unsqueeze(dim=2)
107+
if i == 0:
108+
x = self.regressor(regressor_input, name=name)
109+
if i > 0:
110+
x = x + self.regressor(regressor_input, name=name)
111+
return x
112+
113+
def forward(self, inputs, mode, indeces=None):
114+
"""Compute all seasonality components.
115+
Parameters
116+
----------
117+
f_r : torch.Tensor, float
118+
future regressors inputs
119+
mode: string, either "additive" or "multiplicative"
120+
mode of the regressors
121+
Returns
122+
-------
123+
torch.Tensor
124+
Forecast component of dims (batch, n_forecasts, no_quantiles)
125+
"""
126+
127+
if "additive" == mode:
128+
f_r = self.all_regressors(inputs, mode="additive")
129+
if "multiplicative" == mode:
130+
f_r = self.all_regressors(inputs, mode="multiplicative")
131+
return f_r
Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
from collections import Counter, OrderedDict
2+
3+
import torch
4+
import torch.nn as nn
5+
6+
from neuralprophet.components.future_regressors import FutureRegressors
7+
from neuralprophet.utils_torch import init_parameter, interprete_model
8+
9+
# from neuralprophet.utils_torch import init_parameter
10+
11+
12+
class SharedNeuralNetsFutureRegressors(FutureRegressors):
13+
def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool):
14+
super().__init__(
15+
config=config,
16+
n_forecasts=n_forecasts,
17+
quantiles=quantiles,
18+
id_list=id_list,
19+
device=device,
20+
config_trend_none_bool=config_trend_none_bool,
21+
)
22+
if self.regressors_dims is not None:
23+
# Regresors params
24+
self.regressor_nets = nn.ModuleDict({})
25+
# TO DO: if no hidden layers, then just a as legacy
26+
self.d_hidden_regressors = config.d_hidden
27+
self.num_hidden_layers_regressors = config.num_hidden_layers
28+
# Combined network
29+
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items():
30+
# Nets for both additive and multiplicative regressors
31+
regressor_net = nn.ModuleList()
32+
# This will be later size_i(1 + static covariates)
33+
d_inputs = size_i
34+
for i in range(self.num_hidden_layers_regressors):
35+
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
36+
d_inputs = self.d_hidden_regressors
37+
# final layer has input size d_inputs and output size equal to no. of forecasts * no. of quantiles
38+
regressor_net.append(nn.Linear(d_inputs, self.n_forecasts * len(self.quantiles), bias=False))
39+
for lay in regressor_net:
40+
nn.init.kaiming_normal_(lay.weight, mode="fan_in")
41+
self.regressor_nets[net_i] = regressor_net
42+
43+
def get_reg_weights(self, name):
44+
"""
45+
Get attributions of regressors component network w.r.t. the model input.
46+
47+
Parameters
48+
----------
49+
name : string
50+
Regressor name
51+
52+
Returns
53+
-------
54+
torch.tensor
55+
Weight corresponding to the given regressor
56+
"""
57+
58+
mode = self.config_regressors.regressors[name].mode
59+
reg_attributions = interprete_model(
60+
self,
61+
net="regressor_nets",
62+
forward_func="regressors_net",
63+
_num_in_features=self.regressor_nets[mode][0].in_features,
64+
_num_out_features=self.regressor_nets[mode][-1].out_features,
65+
additional_forward_args=mode,
66+
)
67+
68+
regressor_index = self.regressors_dims[name]["regressor_index"]
69+
return reg_attributions[:, regressor_index].unsqueeze(-1)
70+
71+
def regressors_net(self, regressor_inputs, mode):
72+
"""Compute single regressor component.
73+
Parameters
74+
----------
75+
regressor_input : torch.Tensor, float
76+
regressor values at corresponding, dims: (batch, n_forecasts, 1)
77+
nam : str
78+
Name of regressor, for attribution to corresponding model weights
79+
Returns
80+
-------
81+
torch.Tensor
82+
Forecast component of dims (batch, n_forecasts, num_quantiles)
83+
"""
84+
x = regressor_inputs
85+
for i in range(self.num_hidden_layers_regressors + 1):
86+
if i > 0:
87+
x = nn.functional.relu(x)
88+
x = self.regressor_nets[mode][i](x)
89+
90+
# segment the last dimension to match the quantiles
91+
x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles))
92+
return x
93+
94+
def forward(self, inputs, mode, indeces=None):
95+
"""Compute all seasonality components.
96+
Parameters
97+
----------
98+
f_r : torch.Tensor, float
99+
future regressors inputs
100+
mode: string, either "additive" or "multiplicative"
101+
mode of the regressors
102+
Returns
103+
-------
104+
torch.Tensor
105+
Forecast component of dims (batch, n_forecasts, no_quantiles)
106+
"""
107+
108+
if "additive" == mode:
109+
f_r = self.regressors_net(inputs, mode="additive")
110+
if "multiplicative" == mode:
111+
f_r = self.regressors_net(inputs, mode="multiplicative")
112+
return f_r

0 commit comments

Comments
 (0)