diff --git a/.github/workflows/build-docs.yaml b/.github/workflows/build-docs.yaml
new file mode 100644
index 000000000..e132568f7
--- /dev/null
+++ b/.github/workflows/build-docs.yaml
@@ -0,0 +1,51 @@
+name: "build-docs"
+on:
+ push:
+ branches: ["main"]
+ pull_request:
+ branches: ["main"]
+ workflow_dispatch:
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ build-docs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ submodules: 'recursive'
+ - uses: actions/setup-python@v4
+ with:
+ cache: "pip"
+ python-version: '3.10'
+ cache-dependency-path: settings.ini
+ - name: Build docs
+ run: |
+ set -ux
+ python -m pip install --upgrade pip
+ pip install -Uq nbdev
+ pip install -e ".[dev]"
+ mkdir nbs/_extensions
+ cp -r docs-scripts/mintlify/ nbs/_extensions/
+ python docs-scripts/update-quarto.py
+ nbdev_docs
+ - name: Apply final formats
+ run: bash ./docs-scripts/docs-final-formatting.bash
+ - name: Copy over necessary assets
+ run: |
+ cp nbs/mint.json _docs/mint.json
+ cp docs-scripts/imgs/* _docs/
+ - name: Deploy to Mintlify Docs
+ if: github.event_name == 'push'
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_branch: docs
+ publish_dir: ./_docs
+ # The following lines assign commit authorship to the official GH-Actions bot for deploys to `docs` branch.
+ # You can swap them out with your own user credentials.
+ user_name: github-actions[bot]
+ user_email: 41898282+github-actions[bot]@users.noreply.github.com
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..d785203a9
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,4 @@
+[submodule "docs-scripts"]
+ path = docs-scripts
+ url = https://github.com/Nixtla/docs.git
+ branch = scripts
diff --git a/docs-scripts b/docs-scripts
new file mode 160000
index 000000000..d63d02696
--- /dev/null
+++ b/docs-scripts
@@ -0,0 +1 @@
+Subproject commit d63d02696ad23a3104636207152d2ac393291315
diff --git a/nbs/common.base_recurrent.ipynb b/nbs/common.base_recurrent.ipynb
index da17c378b..4a18ec099 100644
--- a/nbs/common.base_recurrent.ipynb
+++ b/nbs/common.base_recurrent.ipynb
@@ -691,11 +691,19 @@
"outputs": [],
"source": [
"#| hide\n",
- "# add h=0,1 unit test for _parse_windows \n",
"from neuralforecast.losses.pytorch import MAE\n",
"from neuralforecast.utils import AirPassengersDF\n",
- "from neuralforecast.tsdataset import TimeSeriesDataset, TimeSeriesDataModule\n",
- "\n",
+ "from neuralforecast.tsdataset import TimeSeriesDataset, TimeSeriesDataModule"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#| hide\n",
+ "# add h=0,1 unit test for _parse_windows \n",
"# Declare batch\n",
"AirPassengersDF['x'] = np.array(len(AirPassengersDF))\n",
"AirPassengersDF['x2'] = np.array(len(AirPassengersDF)) * 2\n",
@@ -729,13 +737,6 @@
"test_eq(set(temporal_data_cols), set(['y', 'x', 'x2']))\n",
"test_eq(windows['temporal'].shape, torch.Size([1,len(['y', 'x', 'x2', 'available_mask']),117,12+1]))"
]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
}
],
"metadata": {
diff --git a/nbs/examples/Automatic_Hyperparameter_Tuning.ipynb b/nbs/examples/Automatic_Hyperparameter_Tuning.ipynb
index e0d996b8d..225c63bd0 100644
--- a/nbs/examples/Automatic_Hyperparameter_Tuning.ipynb
+++ b/nbs/examples/Automatic_Hyperparameter_Tuning.ipynb
@@ -54,8 +54,7 @@
"outputs": [],
"source": [
"%%capture\n",
- "!pip install neuralforecast\n",
- "!pip install hyperopt"
+ "# !pip install neuralforecast hyperopt"
]
},
{
@@ -879,7 +878,6 @@
"
params_n_freq_downsample | \n",
" params_n_pool_kernel_size | \n",
" params_random_seed | \n",
- " user_attrs_ALL_PARAMS | \n",
" state | \n",
" \n",
" \n",
@@ -887,141 +885,131 @@
" \n",
" 0 | \n",
" 0 | \n",
- " 7.257883e+07 | \n",
- " 2023-10-03 11:20:55.218656 | \n",
- " 2023-10-03 11:21:03.084276 | \n",
- " 0 days 00:00:07.865620 | \n",
- " 0.063594 | \n",
- " [1, 1, 1] | \n",
+ " 2.964735e+01 | \n",
+ " 2023-10-23 19:13:30.251719 | \n",
+ " 2023-10-23 19:13:33.007086 | \n",
+ " 0 days 00:00:02.755367 | \n",
+ " 0.000074 | \n",
+ " [24, 12, 1] | \n",
" [2, 2, 2] | \n",
- " 6 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 2 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 1 | \n",
" 1 | \n",
- " 1.942069e+01 | \n",
- " 2023-10-03 11:21:03.084708 | \n",
- " 2023-10-03 11:21:11.257456 | \n",
- " 0 days 00:00:08.172748 | \n",
- " 0.000425 | \n",
- " [168, 24, 1] | \n",
+ " 2.790444e+03 | \n",
+ " 2023-10-23 19:13:33.007483 | \n",
+ " 2023-10-23 19:13:35.823089 | \n",
+ " 0 days 00:00:02.815606 | \n",
+ " 0.026500 | \n",
+ " [24, 12, 1] | \n",
" [2, 2, 2] | \n",
- " 6 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 10 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 2 | \n",
" 2 | \n",
- " 9.739689e+11 | \n",
- " 2023-10-03 11:21:11.258083 | \n",
- " 2023-10-03 11:21:19.065551 | \n",
- " 0 days 00:00:07.807468 | \n",
- " 0.075866 | \n",
+ " 2.193000e+01 | \n",
+ " 2023-10-23 19:13:35.823607 | \n",
+ " 2023-10-23 19:13:38.599414 | \n",
+ " 0 days 00:00:02.775807 | \n",
+ " 0.000337 | \n",
" [168, 24, 1] | \n",
" [2, 2, 2] | \n",
- " 1 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 7 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 3 | \n",
" 3 | \n",
- " 3.989237e+04 | \n",
- " 2023-10-03 11:21:19.065988 | \n",
- " 2023-10-03 11:21:27.578646 | \n",
- " 0 days 00:00:08.512658 | \n",
- " 0.020554 | \n",
- " [24, 12, 1] | \n",
- " [2, 2, 2] | \n",
- " 1 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 1.147799e+08 | \n",
+ " 2023-10-23 19:13:38.600149 | \n",
+ " 2023-10-23 19:13:41.440307 | \n",
+ " 0 days 00:00:02.840158 | \n",
+ " 0.059274 | \n",
+ " [1, 1, 1] | \n",
+ " [16, 8, 1] | \n",
+ " 5 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 4 | \n",
" 4 | \n",
- " 1.443079e+03 | \n",
- " 2023-10-03 11:21:27.579112 | \n",
- " 2023-10-03 11:21:35.763104 | \n",
- " 0 days 00:00:08.183992 | \n",
- " 0.027780 | \n",
- " [24, 12, 1] | \n",
+ " 2.140740e+01 | \n",
+ " 2023-10-23 19:13:41.440833 | \n",
+ " 2023-10-23 19:13:44.184860 | \n",
+ " 0 days 00:00:02.744027 | \n",
+ " 0.000840 | \n",
+ " [168, 24, 1] | \n",
" [16, 8, 1] | \n",
- " 7 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 5 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 5 | \n",
" 5 | \n",
- " 2.354673e+01 | \n",
- " 2023-10-03 11:21:35.763652 | \n",
- " 2023-10-03 11:21:44.838978 | \n",
- " 0 days 00:00:09.075326 | \n",
- " 0.000176 | \n",
- " [24, 12, 1] | \n",
- " [2, 2, 2] | \n",
- " 4 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 1.606544e+01 | \n",
+ " 2023-10-23 19:13:44.185291 | \n",
+ " 2023-10-23 19:13:46.945672 | \n",
+ " 0 days 00:00:02.760381 | \n",
+ " 0.005477 | \n",
+ " [1, 1, 1] | \n",
+ " [16, 8, 1] | \n",
+ " 8 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 6 | \n",
" 6 | \n",
- " 5.158909e+01 | \n",
- " 2023-10-03 11:21:44.839487 | \n",
- " 2023-10-03 11:21:55.577230 | \n",
- " 0 days 00:00:10.737743 | \n",
- " 0.000018 | \n",
- " [24, 12, 1] | \n",
- " [2, 2, 2] | \n",
- " 9 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 1.301640e+04 | \n",
+ " 2023-10-23 19:13:46.946108 | \n",
+ " 2023-10-23 19:13:49.805633 | \n",
+ " 0 days 00:00:02.859525 | \n",
+ " 0.056746 | \n",
+ " [1, 1, 1] | \n",
+ " [16, 8, 1] | \n",
+ " 3 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 7 | \n",
" 7 | \n",
- " 7.657581e+01 | \n",
- " 2023-10-03 11:21:55.578412 | \n",
- " 2023-10-03 11:22:05.263415 | \n",
- " 0 days 00:00:09.685003 | \n",
- " 0.017256 | \n",
- " [168, 24, 1] | \n",
+ " 4.972713e+01 | \n",
+ " 2023-10-23 19:13:49.806278 | \n",
+ " 2023-10-23 19:13:52.577180 | \n",
+ " 0 days 00:00:02.770902 | \n",
+ " 0.000021 | \n",
+ " [24, 12, 1] | \n",
" [2, 2, 2] | \n",
- " 8 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 9 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 8 | \n",
" 8 | \n",
- " 1.957761e+01 | \n",
- " 2023-10-03 11:22:05.263939 | \n",
- " 2023-10-03 11:22:14.527842 | \n",
- " 0 days 00:00:09.263903 | \n",
- " 0.004017 | \n",
- " [24, 12, 1] | \n",
+ " 2.138879e+01 | \n",
+ " 2023-10-23 19:13:52.577678 | \n",
+ " 2023-10-23 19:13:55.372792 | \n",
+ " 0 days 00:00:02.795114 | \n",
+ " 0.007136 | \n",
+ " [1, 1, 1] | \n",
" [2, 2, 2] | \n",
- " 7 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " 9 | \n",
" COMPLETE | \n",
"
\n",
" \n",
" 9 | \n",
" 9 | \n",
- " 3.355409e+01 | \n",
- " 2023-10-03 11:22:14.528438 | \n",
- " 2023-10-03 11:22:23.752345 | \n",
- " 0 days 00:00:09.223907 | \n",
- " 0.000065 | \n",
+ " 2.094145e+01 | \n",
+ " 2023-10-23 19:13:55.373149 | \n",
+ " 2023-10-23 19:13:58.125058 | \n",
+ " 0 days 00:00:02.751909 | \n",
+ " 0.004655 | \n",
" [1, 1, 1] | \n",
- " [16, 8, 1] | \n",
- " 7 | \n",
- " {'max_steps': 100, 'input_size': 24, 'learning... | \n",
+ " [2, 2, 2] | \n",
+ " 6 | \n",
" COMPLETE | \n",
"
\n",
" \n",
@@ -1030,52 +1018,40 @@
],
"text/plain": [
" number value datetime_start datetime_complete \\\n",
- "0 0 7.257883e+07 2023-10-03 11:20:55.218656 2023-10-03 11:21:03.084276 \n",
- "1 1 1.942069e+01 2023-10-03 11:21:03.084708 2023-10-03 11:21:11.257456 \n",
- "2 2 9.739689e+11 2023-10-03 11:21:11.258083 2023-10-03 11:21:19.065551 \n",
- "3 3 3.989237e+04 2023-10-03 11:21:19.065988 2023-10-03 11:21:27.578646 \n",
- "4 4 1.443079e+03 2023-10-03 11:21:27.579112 2023-10-03 11:21:35.763104 \n",
- "5 5 2.354673e+01 2023-10-03 11:21:35.763652 2023-10-03 11:21:44.838978 \n",
- "6 6 5.158909e+01 2023-10-03 11:21:44.839487 2023-10-03 11:21:55.577230 \n",
- "7 7 7.657581e+01 2023-10-03 11:21:55.578412 2023-10-03 11:22:05.263415 \n",
- "8 8 1.957761e+01 2023-10-03 11:22:05.263939 2023-10-03 11:22:14.527842 \n",
- "9 9 3.355409e+01 2023-10-03 11:22:14.528438 2023-10-03 11:22:23.752345 \n",
+ "0 0 2.964735e+01 2023-10-23 19:13:30.251719 2023-10-23 19:13:33.007086 \n",
+ "1 1 2.790444e+03 2023-10-23 19:13:33.007483 2023-10-23 19:13:35.823089 \n",
+ "2 2 2.193000e+01 2023-10-23 19:13:35.823607 2023-10-23 19:13:38.599414 \n",
+ "3 3 1.147799e+08 2023-10-23 19:13:38.600149 2023-10-23 19:13:41.440307 \n",
+ "4 4 2.140740e+01 2023-10-23 19:13:41.440833 2023-10-23 19:13:44.184860 \n",
+ "5 5 1.606544e+01 2023-10-23 19:13:44.185291 2023-10-23 19:13:46.945672 \n",
+ "6 6 1.301640e+04 2023-10-23 19:13:46.946108 2023-10-23 19:13:49.805633 \n",
+ "7 7 4.972713e+01 2023-10-23 19:13:49.806278 2023-10-23 19:13:52.577180 \n",
+ "8 8 2.138879e+01 2023-10-23 19:13:52.577678 2023-10-23 19:13:55.372792 \n",
+ "9 9 2.094145e+01 2023-10-23 19:13:55.373149 2023-10-23 19:13:58.125058 \n",
"\n",
" duration params_learning_rate params_n_freq_downsample \\\n",
- "0 0 days 00:00:07.865620 0.063594 [1, 1, 1] \n",
- "1 0 days 00:00:08.172748 0.000425 [168, 24, 1] \n",
- "2 0 days 00:00:07.807468 0.075866 [168, 24, 1] \n",
- "3 0 days 00:00:08.512658 0.020554 [24, 12, 1] \n",
- "4 0 days 00:00:08.183992 0.027780 [24, 12, 1] \n",
- "5 0 days 00:00:09.075326 0.000176 [24, 12, 1] \n",
- "6 0 days 00:00:10.737743 0.000018 [24, 12, 1] \n",
- "7 0 days 00:00:09.685003 0.017256 [168, 24, 1] \n",
- "8 0 days 00:00:09.263903 0.004017 [24, 12, 1] \n",
- "9 0 days 00:00:09.223907 0.000065 [1, 1, 1] \n",
- "\n",
- " params_n_pool_kernel_size params_random_seed \\\n",
- "0 [2, 2, 2] 6 \n",
- "1 [2, 2, 2] 6 \n",
- "2 [2, 2, 2] 1 \n",
- "3 [2, 2, 2] 1 \n",
- "4 [16, 8, 1] 7 \n",
- "5 [2, 2, 2] 4 \n",
- "6 [2, 2, 2] 9 \n",
- "7 [2, 2, 2] 8 \n",
- "8 [2, 2, 2] 7 \n",
- "9 [16, 8, 1] 7 \n",
+ "0 0 days 00:00:02.755367 0.000074 [24, 12, 1] \n",
+ "1 0 days 00:00:02.815606 0.026500 [24, 12, 1] \n",
+ "2 0 days 00:00:02.775807 0.000337 [168, 24, 1] \n",
+ "3 0 days 00:00:02.840158 0.059274 [1, 1, 1] \n",
+ "4 0 days 00:00:02.744027 0.000840 [168, 24, 1] \n",
+ "5 0 days 00:00:02.760381 0.005477 [1, 1, 1] \n",
+ "6 0 days 00:00:02.859525 0.056746 [1, 1, 1] \n",
+ "7 0 days 00:00:02.770902 0.000021 [24, 12, 1] \n",
+ "8 0 days 00:00:02.795114 0.007136 [1, 1, 1] \n",
+ "9 0 days 00:00:02.751909 0.004655 [1, 1, 1] \n",
"\n",
- " user_attrs_ALL_PARAMS state \n",
- "0 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "1 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "2 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "3 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "4 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "5 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "6 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "7 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "8 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE \n",
- "9 {'max_steps': 100, 'input_size': 24, 'learning... COMPLETE "
+ " params_n_pool_kernel_size params_random_seed state \n",
+ "0 [2, 2, 2] 2 COMPLETE \n",
+ "1 [2, 2, 2] 10 COMPLETE \n",
+ "2 [2, 2, 2] 7 COMPLETE \n",
+ "3 [16, 8, 1] 5 COMPLETE \n",
+ "4 [16, 8, 1] 5 COMPLETE \n",
+ "5 [16, 8, 1] 8 COMPLETE \n",
+ "6 [16, 8, 1] 3 COMPLETE \n",
+ "7 [2, 2, 2] 9 COMPLETE \n",
+ "8 [2, 2, 2] 9 COMPLETE \n",
+ "9 [2, 2, 2] 6 COMPLETE "
]
},
"execution_count": null,
@@ -1085,7 +1061,7 @@
],
"source": [
"results = nf.models[0].results.trials_dataframe()\n",
- "results"
+ "results.drop(columns='user_attrs_ALL_PARAMS')"
]
},
{
diff --git a/nbs/examples/HierarchicalNetworks.ipynb b/nbs/examples/HierarchicalNetworks.ipynb
index 17549bed5..9ab3e2ab9 100644
--- a/nbs/examples/HierarchicalNetworks.ipynb
+++ b/nbs/examples/HierarchicalNetworks.ipynb
@@ -114,7 +114,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Mathematically a hierarchical multivariate time series can be denoted by the vector $\\mathbf{y}_{[a,b],t}$ defined by the following aggregation constraint: \n",
+ "Mathematically a hierarchical multivariate time series can be denoted by the vector $\\mathbf{y}_{[a,b],t}$ defined by the following aggregation constraint:\n",
+ "\n",
"$$\n",
"\\mathbf{y}_{[a,b],t} = \\mathbf{S}_{[a,b][b]} \\mathbf{y}_{[b],t} \\quad \\Leftrightarrow \\quad \n",
"\\begin{bmatrix}\\mathbf{y}_{[a],t}\n",
@@ -332,7 +333,10 @@
"source": [
"To evaluate the coherent probabilistic predictions we use the scaled Continuous Ranked Probability Score (sCRPS), defined as follows:\n",
"\n",
- "$$\\mathrm{CRPS}(\\hat{F}_{[a,b],\\tau},\\mathbf{y}_{[a,b],\\tau}) = \\frac{2}{N_{a}+N_{b}} \\sum_{i} \\int^{1}_{0} \\mathrm{QL}(\\hat{F}_{i,\\tau}, y_{i,\\tau})_{q} dq$$\n",
+ "$$\n",
+ "\\mathrm{CRPS}(\\hat{F}_{[a,b],\\tau},\\mathbf{y}_{[a,b],\\tau}) = \n",
+ " \\frac{2}{N_{a}+N_{b}} \\sum_{i} \\int^{1}_{0} \\mathrm{QL}(\\hat{F}_{i,\\tau}, y_{i,\\tau})_{q} dq\n",
+ "$$\n",
"\n",
"$$\n",
"\\mathrm{sCRPS}(\\hat{F}_{[a,b\\,],\\tau},\\mathbf{y}_{[a,b\\,],\\tau}) = \n",
@@ -689,28 +693,10 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "- [Kin G. Olivares, David Luo, Cristian Challu, Stefania La Vattiata, Max Mergenthaler, Artur Dubrawski (2023). \"HINT: Hierarchical Mixture Networks For Coherent Probabilistic Forecasting\". International Conference on Machine Learning (ICML). Workshop on Structured Probabilistic Inference & Generative Modeling. Available at https://arxiv.org/abs/2305.07089.](https://arxiv.org/abs/2305.07089)
\n",
- "- [Kin G. Olivares, O. Nganba Meetei, Ruijun Ma, Rohan Reddy, Mengfei Cao, Lee Dicker (2023).\"Probabilistic Hierarchical Forecasting with Deep Poisson Mixtures\". International Journal Forecasting, accepted paper. URL https://arxiv.org/pdf/2110.13179.pdf.](https://arxiv.org/pdf/2110.13179.pdf)
\n",
+ "- [Kin G. Olivares, David Luo, Cristian Challu, Stefania La Vattiata, Max Mergenthaler, Artur Dubrawski (2023). \"HINT: Hierarchical Mixture Networks For Coherent Probabilistic Forecasting\". International Conference on Machine Learning (ICML). Workshop on Structured Probabilistic Inference & Generative Modeling. Available at https://arxiv.org/abs/2305.07089.](https://arxiv.org/abs/2305.07089)
\n",
+ "- [Kin G. Olivares, O. Nganba Meetei, Ruijun Ma, Rohan Reddy, Mengfei Cao, Lee Dicker (2023).\"Probabilistic Hierarchical Forecasting with Deep Poisson Mixtures\". International Journal Forecasting, accepted paper. URL https://arxiv.org/pdf/2110.13179.pdf.](https://arxiv.org/pdf/2110.13179.pdf)
\n",
"- [Kin G. Olivares, Federico Garza, David Luo, Cristian Challu, Max Mergenthaler, Souhaib Ben Taieb, Shanika Wickramasuriya, and Artur Dubrawski (2023). \"HierarchicalForecast: A reference framework for hierarchical forecasting\". Journal of Machine Learning Research, submitted. URL https://arxiv.org/abs/2207.03517](https://arxiv.org/abs/2207.03517)"
]
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": []
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": []
- },
- {
- "attachments": {},
- "cell_type": "markdown",
- "metadata": {},
- "source": []
}
],
"metadata": {
diff --git a/nbs/examples/PredictInsample.ipynb b/nbs/examples/PredictInsample.ipynb
index d87b932c1..b7ee8eb17 100644
--- a/nbs/examples/PredictInsample.ipynb
+++ b/nbs/examples/PredictInsample.ipynb
@@ -232,7 +232,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- ""
+ ""
]
},
{
diff --git a/nbs/mint.json b/nbs/mint.json
new file mode 100644
index 000000000..95609775e
--- /dev/null
+++ b/nbs/mint.json
@@ -0,0 +1,138 @@
+{
+ "$schema": "https://mintlify.com/schema.json",
+ "name": "Nixtla",
+ "logo": {
+ "light": "/light.png",
+ "dark": "/dark.png"
+ },
+ "favicon": "/favicon.svg",
+ "colors": {
+ "primary": "#0E0E0E",
+ "light": "#FAFAFA",
+ "dark": "#0E0E0E",
+ "anchors": {
+ "from": "#2AD0CA",
+ "to": "#0E00F8"
+ }
+ },
+ "topbarCtaButton": {
+ "type": "github",
+ "url": "https://github.com/Nixtla/neuralforecast"
+ },
+ "topAnchor": {
+ "name": "NeuralForecast",
+ "icon": "brain-circuit"
+ },
+ "navigation": [
+ {
+ "group": "",
+ "pages": ["index.html"]
+ },
+ {
+ "group": "Getting Started",
+ "pages": [
+ "examples/installation.html",
+ "examples/getting_started.html",
+ "examples/data_format.html",
+ "examples/exogenous_variables.html",
+ "examples/time_series_scaling.html",
+ "examples/automatic_hyperparameter_tuning.html",
+ "examples/predictinsample.html",
+ "examples/save_load_models.html",
+ "examples/getting_started_complete.html",
+ "examples/neuralforecast_map.html",
+ "examples/how_to_add_models.html"
+ ]
+ },
+ {
+ "group": "Tutorials",
+ "pages": [
+ "examples/signal_decomposition.html",
+ "examples/uncertaintyintervals.html",
+ "examples/longhorizon_probabilistic.html",
+ "examples/longhorizon_with_transformers.html",
+ "examples/intermittentdata.html",
+ "examples/robust_regression.html",
+ "examples/electricitypeakforecasting.html",
+ "examples/hierarchicalnetworks.html",
+ "examples/transfer_learning.html",
+ "examples/temporal_classifiers.html",
+ "examples/predictive_maintenance.html",
+ "examples/statsmlneuralmethods.html"
+ ]
+ },
+ {
+ "group": "",
+ "pages": ["examples/models_intro"]
+ },
+ {
+ "group": "API Reference",
+ "pages": [
+ "core.html",
+ "models.html",
+ "models.html",
+ {
+ "group": "Models' Documentation",
+ "pages": [
+ {
+ "group": "A. RNN-Based",
+ "pages": [
+ "models.rnn.html",
+ "models.gru.html",
+ "models.lstm.html",
+ "models.dilated_rnn.html",
+ "models.tcn.html",
+ "models.deepar.html"
+ ]
+ },
+ {
+ "group": "B. MLP-Based",
+ "pages": [
+ "models.mlp.html",
+ "models.nhits.html",
+ "models.nbeats.html",
+ "models.nbeatsx.html"
+ ]
+ },
+ {
+ "group": "C. Transformer-Based",
+ "pages": [
+ "models.tft.html",
+ "models.vanillatransformer.html",
+ "models.informer.html",
+ "models.autoformer.html",
+ "models.patchtst.html"
+ ]
+ },
+ {
+ "group": "D. CNN-Based",
+ "pages": ["models.timesnet.html"]
+ },
+ {
+ "group": "E. Multivariate",
+ "pages": ["models.hint.html", "models.stemgnn.html"]
+ }
+ ]
+ },
+ {
+ "group": "Train/Evaluation",
+ "pages": ["losses.pytorch.html", "losses.numpy.html"]
+ },
+ {
+ "group": "Common Components",
+ "pages": [
+ "common.base_auto.html",
+ "common.base_recurrent.html",
+ "common.base_windows.html",
+ "common.scalers.html",
+ "common.modules.html"
+ ]
+ },
+ {
+ "group": "Utils",
+ "pages": ["tsdataset.html", "utils.html"]
+ }
+ ]
+ }
+ ]
+}
diff --git a/nbs/models.hint.ipynb b/nbs/models.hint.ipynb
index 6a9a6730d..af885136b 100644
--- a/nbs/models.hint.ipynb
+++ b/nbs/models.hint.ipynb
@@ -100,7 +100,7 @@
"\n",
" **References:**
\n",
" - [Orcutt, G.H., Watts, H.W., & Edwards, J.B.(1968). \\\"Data aggregation and information loss\\\". The American \n",
- " Economic Review, 58 , 773{787)](http://www.jstor.org/stable/1815532). \n",
+ " Economic Review, 58 , 773(787)](http://www.jstor.org/stable/1815532). \n",
" \"\"\"\n",
" n_series = len(S)\n",
" n_agg = n_series-S.shape[1]\n",
diff --git a/nbs/models.ipynb b/nbs/models.ipynb
index cd55676e1..0d143d00b 100644
--- a/nbs/models.ipynb
+++ b/nbs/models.ipynb
@@ -1687,7 +1687,7 @@
" \"input_size_multiplier\": [1, 2, 3],\n",
" \"h\": None,\n",
" \"hidden_size\": tune.choice([16, 128, 256]),\n",
- " \"n_head\": tune.choice([4, 16]),\n",
+ " \"n_heads\": tune.choice([4, 16]),\n",
" \"patch_len\": tune.choice([16, 24]),\n",
" \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n",
" \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n",
diff --git a/neuralforecast/auto.py b/neuralforecast/auto.py
index 8340ff215..58f8b8d8b 100644
--- a/neuralforecast/auto.py
+++ b/neuralforecast/auto.py
@@ -987,7 +987,7 @@ class AutoPatchTST(BaseAuto):
"input_size_multiplier": [1, 2, 3],
"h": None,
"hidden_size": tune.choice([16, 128, 256]),
- "n_head": tune.choice([4, 16]),
+ "n_heads": tune.choice([4, 16]),
"patch_len": tune.choice([16, 24]),
"learning_rate": tune.loguniform(1e-4, 1e-1),
"scaler_type": tune.choice([None, "robust", "standard"]),
diff --git a/neuralforecast/models/hint.py b/neuralforecast/models/hint.py
index 9e19a0a67..42457018f 100644
--- a/neuralforecast/models/hint.py
+++ b/neuralforecast/models/hint.py
@@ -24,7 +24,7 @@ def get_bottomup_P(S: np.ndarray):
**References:**
- [Orcutt, G.H., Watts, H.W., & Edwards, J.B.(1968). \"Data aggregation and information loss\". The American
- Economic Review, 58 , 773{787)](http://www.jstor.org/stable/1815532).
+ Economic Review, 58 , 773(787)](http://www.jstor.org/stable/1815532).
"""
n_series = len(S)
n_agg = n_series - S.shape[1]