From 4d7723345a5d4495f7f3adc26712d3020d52c5cb Mon Sep 17 00:00:00 2001 From: Olivier Sprangers Date: Thu, 19 Dec 2024 11:06:07 +0100 Subject: [PATCH] update_readme_index_and_fix_ml_frameworks_example --- README.md | 9 +- nbs/examples/MLFrameworksExample.ipynb | 397 +------------------------ nbs/index.ipynb | 7 +- 3 files changed, 10 insertions(+), 403 deletions(-) diff --git a/README.md b/README.md index cfa4a00..db09914 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,6 @@ from hierarchicalforecast.evaluation import evaluate from hierarchicalforecast.methods import BottomUp, TopDown, MiddleOut from utilsforecast.losses import mse - # Load TourismSmall dataset Y_df, S, tags = HierarchicalData.load('./data', 'TourismSmall') Y_df['ds'] = pd.to_datetime(Y_df['ds']) @@ -92,10 +91,9 @@ Y_test_df = Y_df.groupby('unique_id').tail(4) Y_train_df = Y_df.drop(Y_test_df.index) # Compute base auto-ARIMA predictions -fcst = StatsForecast(df=Y_train_df, - models=[AutoARIMA(season_length=4), Naive()], - freq='Q', n_jobs=-1) -Y_hat_df = fcst.forecast(h=4) +fcst = StatsForecast(models=[AutoARIMA(season_length=4), Naive()], + freq='QE', n_jobs=-1) +Y_hat_df = fcst.forecast(df=Y_train_df, h=4) # Reconcile the base predictions reconcilers = [ @@ -116,7 +114,6 @@ Assumes you have a test dataframe. df = Y_rec_df.merge(Y_test_df, on=['unique_id', 'ds']) evaluation = evaluate(df = df, tags = tags, - train_df = Y_train_df, metrics = [mse], benchmark = "Naive") ``` diff --git a/nbs/examples/MLFrameworksExample.ipynb b/nbs/examples/MLFrameworksExample.ipynb index 15cea8b..e8fd1e7 100644 --- a/nbs/examples/MLFrameworksExample.ipynb +++ b/nbs/examples/MLFrameworksExample.ipynb @@ -51,6 +51,9 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", + "os.environ[\"NIXTLA_ID_AS_COL\"] = \"1\"\n", + "\n", "import numpy as np\n", "import pandas as pd\n", "\n", @@ -254,399 +257,7 @@ "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:lightning_fabric.utilities.seed:Seed set to 1\n", - "INFO:pytorch_lightning.utilities.rank_zero:GPU available: False, used: False\n", - "INFO:pytorch_lightning.utilities.rank_zero:TPU available: False, using: 0 TPU cores\n", - "INFO:pytorch_lightning.utilities.rank_zero:HPU available: False, using: 0 HPUs\n", - "INFO:pytorch_lightning.callbacks.model_summary:\n", - " | Name | Type | Params | Mode \n", - "-------------------------------------------------------\n", - "0 | loss | GMM | 99 | train\n", - "1 | padder_train | ConstantPad1d | 0 | train\n", - "2 | scaler | TemporalNorm | 0 | train\n", - "3 | blocks | ModuleList | 2.8 M | train\n", - "-------------------------------------------------------\n", - "2.8 M Trainable params\n", - "999 Non-trainable params\n", - "2.8 M Total params\n", - "11.232 Total estimated model params size (MB)\n", - "31 Modules in train mode\n", - "0 Modules in eval mode\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "0476c56f8fb54718a911fcbd68086e1b", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Sanity Checking: | | 0/? [00:00