Skip to content

Commit

Permalink
[FIX] futr_exog_list in Auto and HINT classes (#773)
Browse files Browse the repository at this point in the history
* exog_list in hint and auto

* fix ray version

* add os.environ vars for tests

* fix torch version

* remove tests

* fix auto tests
  • Loading branch information
cchallu authored Oct 4, 2023
1 parent be91eca commit 341f8b2
Show file tree
Hide file tree
Showing 9 changed files with 82 additions and 60 deletions.
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ dependencies:
- pip:
- nbdev
- black
- "ray[tune]>=2.2.0"
- "ray[tune]==2.6.3"
103 changes: 57 additions & 46 deletions nbs/common.base_auto.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,11 @@
" test_size=test_size)\n",
" self.results = results\n",
"\n",
" # Added attributes for compatibility with NeuralForecast core\n",
" self.futr_exog_list = self.model.futr_exog_list\n",
" self.hist_exog_list = self.model.hist_exog_list\n",
" self.stat_exog_list = self.model.stat_exog_list\n",
"\n",
" def predict(self, dataset, step_size=1, **data_kwargs):\n",
" \"\"\" BaseAuto.predict\n",
"\n",
Expand Down Expand Up @@ -515,6 +520,12 @@
"outputs": [],
"source": [
"#| hide\n",
"\n",
"#| hide\n",
"import os\n",
"os.environ[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n",
"os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
"\n",
"import optuna\n",
"import pandas as pd\n",
"from neuralforecast.models.mlp import MLP\n",
Expand Down Expand Up @@ -545,17 +556,17 @@
"metadata": {},
"outputs": [],
"source": [
"config = {\n",
" \"hidden_size\": tune.choice([512]),\n",
" \"num_layers\": tune.choice([3, 4]),\n",
" \"input_size\": 12,\n",
" \"max_steps\": 10,\n",
" \"val_check_steps\": 1\n",
"}\n",
"auto = BaseAuto(h=12, loss=MAE(), valid_loss=MSE(), cls_model=MLP, config=config, num_samples=2, cpus=1, gpus=0)\n",
"auto.fit(dataset=dataset)\n",
"y_hat = auto.predict(dataset=dataset)\n",
"assert mae(Y_test_df['y'].values, y_hat[:, 0]) < 200"
"# config = {\n",
"# \"hidden_size\": tune.choice([512]),\n",
"# \"num_layers\": tune.choice([3, 4]),\n",
"# \"input_size\": 12,\n",
"# \"max_steps\": 10,\n",
"# \"val_check_steps\": 5\n",
"# }\n",
"# auto = BaseAuto(h=12, loss=MAE(), valid_loss=MSE(), cls_model=MLP, config=config, num_samples=2) # cpus=1, gpus=0\n",
"# auto.fit(dataset=dataset)\n",
"# y_hat = auto.predict(dataset=dataset)\n",
"# assert mae(Y_test_df['y'].values, y_hat[:, 0]) < 200"
]
},
{
Expand All @@ -571,7 +582,7 @@
" \"num_layers\": trial.suggest_categorical('num_layers', [3, 4]),\n",
" \"input_size\": 12,\n",
" \"max_steps\": 10,\n",
" \"val_check_steps\": 1\n",
" \"val_check_steps\": 5\n",
" }"
]
},
Expand All @@ -596,10 +607,10 @@
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"Y_test_df['AutoMLP'] = y_hat\n",
"# #| hide\n",
"# Y_test_df['AutoMLP'] = y_hat\n",
"\n",
"pd.concat([Y_train_df, Y_test_df]).drop('unique_id', axis=1).set_index('ds').plot()"
"# pd.concat([Y_train_df, Y_test_df]).drop('unique_id', axis=1).set_index('ds').plot()"
]
},
{
Expand All @@ -609,37 +620,37 @@
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"# Unit tests to guarantee that losses are correctly instantiated\n",
"import pandas as pd\n",
"from neuralforecast.models.mlp import MLP\n",
"from neuralforecast.utils import AirPassengersDF as Y_df\n",
"from neuralforecast.tsdataset import TimeSeriesDataset\n",
"from neuralforecast.losses.pytorch import MAE, MSE\n",
"\n",
"Y_train_df = Y_df[Y_df.ds<='1959-12-31'] # 132 train\n",
"Y_test_df = Y_df[Y_df.ds>'1959-12-31'] # 12 test\n",
"\n",
"dataset, *_ = TimeSeriesDataset.from_df(Y_train_df)\n",
"config = {\n",
" \"hidden_size\": tune.choice([512]),\n",
" \"num_layers\": tune.choice([3, 4]),\n",
" \"input_size\": 12,\n",
" \"max_steps\": 1,\n",
" \"val_check_steps\": 1\n",
"}\n",
"\n",
"# Test instantiation\n",
"auto = BaseAuto(h=12, loss=MAE(), valid_loss=MSE(), \n",
" cls_model=MLP, config=config, num_samples=2, cpus=1, gpus=0)\n",
"test_eq(str(type(auto.loss)), \"<class 'neuralforecast.losses.pytorch.MAE'>\")\n",
"test_eq(str(type(auto.valid_loss)), \"<class 'neuralforecast.losses.pytorch.MSE'>\")\n",
"\n",
"# Test validation default\n",
"auto = BaseAuto(h=12, loss=MSE(), valid_loss=None,\n",
" cls_model=MLP, config=config, num_samples=2, cpus=1, gpus=0)\n",
"test_eq(str(type(auto.loss)), \"<class 'neuralforecast.losses.pytorch.MSE'>\")\n",
"test_eq(str(type(auto.valid_loss)), \"<class 'neuralforecast.losses.pytorch.MSE'>\")"
"# #| hide\n",
"# # Unit tests to guarantee that losses are correctly instantiated\n",
"# import pandas as pd\n",
"# from neuralforecast.models.mlp import MLP\n",
"# from neuralforecast.utils import AirPassengersDF as Y_df\n",
"# from neuralforecast.tsdataset import TimeSeriesDataset\n",
"# from neuralforecast.losses.pytorch import MAE, MSE\n",
"\n",
"# Y_train_df = Y_df[Y_df.ds<='1959-12-31'] # 132 train\n",
"# Y_test_df = Y_df[Y_df.ds>'1959-12-31'] # 12 test\n",
"\n",
"# dataset, *_ = TimeSeriesDataset.from_df(Y_train_df)\n",
"# config = {\n",
"# \"hidden_size\": tune.choice([512]),\n",
"# \"num_layers\": tune.choice([3, 4]),\n",
"# \"input_size\": 12,\n",
"# \"max_steps\": 1,\n",
"# \"val_check_steps\": 1\n",
"# }\n",
"\n",
"# # Test instantiation\n",
"# auto = BaseAuto(h=12, loss=MAE(), valid_loss=MSE(), \n",
"# cls_model=MLP, config=config, num_samples=2, cpus=1, gpus=0)\n",
"# test_eq(str(type(auto.loss)), \"<class 'neuralforecast.losses.pytorch.MAE'>\")\n",
"# test_eq(str(type(auto.valid_loss)), \"<class 'neuralforecast.losses.pytorch.MSE'>\")\n",
"\n",
"# # Test validation default\n",
"# auto = BaseAuto(h=12, loss=MSE(), valid_loss=None,\n",
"# cls_model=MLP, config=config, num_samples=2, cpus=1, gpus=0)\n",
"# test_eq(str(type(auto.loss)), \"<class 'neuralforecast.losses.pytorch.MSE'>\")\n",
"# test_eq(str(type(auto.valid_loss)), \"<class 'neuralforecast.losses.pytorch.MSE'>\")"
]
},
{
Expand Down
5 changes: 5 additions & 0 deletions nbs/models.hint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,11 @@
" val_size=val_size,\n",
" test_size=test_size,\n",
" random_seed=random_seed)\n",
" \n",
" # Added attributes for compatibility with NeuralForecast core\n",
" self.futr_exog_list = self.model.futr_exog_list\n",
" self.hist_exog_list = self.model.hist_exog_list\n",
" self.stat_exog_list = self.model.stat_exog_list\n",
"\n",
" def predict(self, dataset, step_size=1, random_seed=None, **data_module_kwargs):\n",
" \"\"\" HINT.predict\n",
Expand Down
4 changes: 2 additions & 2 deletions nbs/models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1084,8 +1084,8 @@
" \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n",
" \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n",
" \"max_steps\": tune.quniform(lower=500, upper=1500, q=100),\n",
" \"batch_size\": tune.qloguniform(lower=5, upper=9, base=2, q=1), #[32, 64, 128, 256]\n",
" \"windows_batch_size\": tune.qloguniform(lower=7, upper=10, base=2, q=1), #[128, 256, 512, 1024]\n",
" \"batch_size\": tune.choice([32, 64, 128, 256]),\n",
" \"windows_batch_size\": tune.choice([128, 256, 512, 1024]),\n",
" \"loss\": None,\n",
" \"random_seed\": tune.randint(lower=1, upper=20),\n",
" }\n",
Expand Down
8 changes: 2 additions & 6 deletions neuralforecast/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,12 +624,8 @@ class AutoNHITS(BaseAuto):
"learning_rate": tune.loguniform(1e-4, 1e-1),
"scaler_type": tune.choice([None, "robust", "standard"]),
"max_steps": tune.quniform(lower=500, upper=1500, q=100),
"batch_size": tune.qloguniform(
lower=5, upper=9, base=2, q=1
), # [32, 64, 128, 256]
"windows_batch_size": tune.qloguniform(
lower=7, upper=10, base=2, q=1
), # [128, 256, 512, 1024]
"batch_size": tune.choice([32, 64, 128, 256]),
"windows_batch_size": tune.choice([128, 256, 512, 1024]),
"loss": None,
"random_seed": tune.randint(lower=1, upper=20),
}
Expand Down
5 changes: 5 additions & 0 deletions neuralforecast/common/_base_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,11 @@ def fit(self, dataset, val_size=0, test_size=0, random_seed=None):
)
self.results = results

# Added attributes for compatibility with NeuralForecast core
self.futr_exog_list = self.model.futr_exog_list
self.hist_exog_list = self.model.hist_exog_list
self.stat_exog_list = self.model.stat_exog_list

def predict(self, dataset, step_size=1, **data_kwargs):
"""BaseAuto.predict
Expand Down
5 changes: 5 additions & 0 deletions neuralforecast/models/hint.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,11 @@ def fit(self, dataset, val_size=0, test_size=0, random_seed=None):
random_seed=random_seed,
)

# Added attributes for compatibility with NeuralForecast core
self.futr_exog_list = self.model.futr_exog_list
self.hist_exog_list = self.model.hist_exog_list
self.stat_exog_list = self.model.stat_exog_list

def predict(self, dataset, step_size=1, random_seed=None, **data_module_kwargs):
"""HINT.predict
Expand Down
8 changes: 4 additions & 4 deletions neuralforecast/models/nhits.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# %% auto 0
__all__ = ['NHITS']

# %% ../../nbs/models.nhits.ipynb 5
# %% ../../nbs/models.nhits.ipynb 6
from typing import Tuple, Optional

import numpy as np
Expand All @@ -14,7 +14,7 @@
from ..losses.pytorch import MAE
from ..common._base_windows import BaseWindows

# %% ../../nbs/models.nhits.ipynb 8
# %% ../../nbs/models.nhits.ipynb 9
class _IdentityBasis(nn.Module):
def __init__(
self,
Expand Down Expand Up @@ -68,7 +68,7 @@ def forward(self, theta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
forecast = forecast.permute(0, 2, 1)
return backcast, forecast

# %% ../../nbs/models.nhits.ipynb 9
# %% ../../nbs/models.nhits.ipynb 10
ACTIVATIONS = ["ReLU", "Softplus", "Tanh", "SELU", "LeakyReLU", "PReLU", "Sigmoid"]

POOLING = ["MaxPool1d", "AvgPool1d"]
Expand Down Expand Up @@ -179,7 +179,7 @@ def forward(
backcast, forecast = self.basis(theta)
return backcast, forecast

# %% ../../nbs/models.nhits.ipynb 10
# %% ../../nbs/models.nhits.ipynb 11
class NHITS(BaseWindows):
"""NHITS
Expand Down
2 changes: 1 addition & 1 deletion settings.ini
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ language = English
custom_sidebar = True
license = apache2
status = 2
requirements = numpy>=1.21.6 pandas>=1.3.5 torch>=2.0.0 pytorch-lightning>=2.0.0 ray[tune]>=2.2.0 optuna utilsforecast>=0.0.6 numba
requirements = numpy>=1.21.6 pandas>=1.3.5 torch>=2.0.0 pytorch-lightning>=2.0.0 ray[tune]==2.6.3 optuna utilsforecast>=0.0.6 numba
dev_requirements = nbdev black mypy flake8 matplotlib hyperopt
nbs_path = nbs
doc_path = _docs
Expand Down

0 comments on commit 341f8b2

Please sign in to comment.