Skip to content

Commit

Permalink
[FEAT] - Add iTransformer to neuralforecast (#944)
Browse files Browse the repository at this point in the history
* Add iTransformer and AutoiTransformer to nf

* Add docstring to iTransformer

* Fix config for auto version

* Use Multivariate windows and fix encoder, decoder and output shapes

* Fix AutoiTransformer and correct error when n_series is 1

* Add n_series parameter for AutoiTransformer tests

* Add n_series parameters to test AutoiTransformer

* Remove windows_batch_size arg

* Clean notebook and add another missing n_series param

* Fix conflict in model/__init__

* Run and clean itransformer notebook

* Run and clean core and models notebooks

* Place imports in separate cells
  • Loading branch information
marcopeix authored Apr 5, 2024
1 parent c1f903b commit 6b0bca5
Show file tree
Hide file tree
Showing 9 changed files with 1,749 additions and 11 deletions.
3 changes: 2 additions & 1 deletion nbs/core.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
" TFT, VanillaTransformer,\n",
" Informer, Autoformer, FEDformer,\n",
" StemGNN, PatchTST, TimesNet, TimeLLM, TSMixer, TSMixerx,\n",
" MLPMultivariate\n",
" MLPMultivariate, iTransformer\n",
")"
]
},
Expand Down Expand Up @@ -228,6 +228,7 @@
" 'tsmixer': TSMixer, 'autotsmixer': TSMixer,\n",
" 'tsmixerx': TSMixerx, 'autotsmixerx': TSMixerx,\n",
" 'mlpmultivariate': MLPMultivariate, 'automlpmultivariate': MLPMultivariate,\n",
" 'itransformer': iTransformer, 'autoitransformer': iTransformer\n",
"}"
]
},
Expand Down
Binary file added nbs/imgs_models/iTransformer.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
152 changes: 152 additions & 0 deletions nbs/models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
"from neuralforecast.models.fedformer import FEDformer\n",
"from neuralforecast.models.patchtst import PatchTST\n",
"from neuralforecast.models.timesnet import TimesNet\n",
"from neuralforecast.models.itransformer import iTransformer\n",
"\n",
"from neuralforecast.models.stemgnn import StemGNN\n",
"from neuralforecast.models.hint import HINT\n",
Expand Down Expand Up @@ -2706,6 +2707,157 @@
"model.fit(dataset=dataset)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a61c3be9",
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"class AutoiTransformer(BaseAuto):\n",
"\n",
" default_config = {\n",
" \"input_size_multiplier\": [1, 2, 3, 4, 5],\n",
" \"h\": None,\n",
" \"n_series\": None,\n",
" \"hidden_size\": tune.choice([64, 128, 256]),\n",
" \"n_heads\": tune.choice([4, 8]),\n",
" \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n",
" \"scaler_type\": tune.choice([None, 'robust', 'standard']),\n",
" \"max_steps\": tune.choice([500, 1000, 2000]),\n",
" \"batch_size\": tune.choice([32, 64, 128, 256]),\n",
" \"loss\": None,\n",
" \"random_seed\": tune.randint(1, 20),\n",
" }\n",
"\n",
" def __init__(self,\n",
" h,\n",
" n_series,\n",
" loss=MAE(),\n",
" valid_loss=None,\n",
" config=None, \n",
" search_alg=BasicVariantGenerator(random_state=1),\n",
" num_samples=10,\n",
" refit_with_val=False,\n",
" cpus=cpu_count(),\n",
" gpus=torch.cuda.device_count(),\n",
" verbose=False,\n",
" alias=None,\n",
" backend='ray',\n",
" callbacks=None):\n",
" \n",
" # Define search space, input/output sizes\n",
" if config is None:\n",
" config = self.get_default_config(h=h, backend=backend, n_series=n_series) \n",
"\n",
" # Always use n_series from parameters, raise exception with Optuna because we can't enforce it\n",
" if backend == 'ray':\n",
" config['n_series'] = n_series\n",
" elif backend == 'optuna':\n",
" mock_trial = MockTrial()\n",
" if ('n_series' in config(mock_trial) and config(mock_trial)['n_series'] != n_series) or ('n_series' not in config(mock_trial)):\n",
" raise Exception(f\"config needs 'n_series': {n_series}\") \n",
"\n",
" super(AutoiTransformer, self).__init__(\n",
" cls_model=iTransformer, \n",
" h=h,\n",
" loss=loss,\n",
" valid_loss=valid_loss,\n",
" config=config,\n",
" search_alg=search_alg,\n",
" num_samples=num_samples, \n",
" refit_with_val=refit_with_val,\n",
" cpus=cpus,\n",
" gpus=gpus,\n",
" verbose=verbose,\n",
" alias=alias,\n",
" backend=backend,\n",
" callbacks=callbacks, \n",
" )\n",
"\n",
" @classmethod\n",
" def get_default_config(cls, h, backend, n_series):\n",
" config = cls.default_config.copy() \n",
" config['input_size'] = tune.choice([h * x \\\n",
" for x in config[\"input_size_multiplier\"]])\n",
"\n",
" # Rolling windows with step_size=1 or step_size=h\n",
" # See `BaseWindows` and `BaseRNN`'s create_windows\n",
" config['step_size'] = tune.choice([1, h])\n",
" del config[\"input_size_multiplier\"]\n",
" if backend == 'optuna':\n",
" # Always use n_series from parameters\n",
" config['n_series'] = n_series\n",
" config = cls._ray_config_to_optuna(config) \n",
"\n",
" return config "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f416fa0",
"metadata": {},
"outputs": [],
"source": [
"show_doc(AutoiTransformer, title_level=3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ffd40db",
"metadata": {},
"outputs": [],
"source": [
"%%capture\n",
"# Use your own config or AutoiTransformer.default_config\n",
"config = dict(max_steps=1, val_check_steps=1, input_size=12, hidden_size=16)\n",
"model = AutoiTransformer(h=12, n_series=1, config=config, num_samples=1, cpus=1)\n",
"\n",
"# Fit and predict\n",
"model.fit(dataset=dataset)\n",
"y_hat = model.predict(dataset=dataset)\n",
"\n",
"# Optuna\n",
"model = AutoiTransformer(h=12, n_series=1, config=None, backend='optuna')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7a2052de",
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"# Check Optuna\n",
"assert model.config(MockTrial())['h'] == 12\n",
"\n",
"# Unit test to test that Auto* model contains all required arguments from BaseAuto\n",
"test_args(AutoiTransformer, exclude_args=['cls_model']) \n",
"\n",
"# Unit test for situation: Optuna with updated default config\n",
"my_config = AutoiTransformer.get_default_config(h=12, n_series=1, backend='optuna')\n",
"def my_config_new(trial):\n",
" config = {**my_config(trial)}\n",
" config.update({'max_steps': 1, 'val_check_steps': 1, 'input_size': 12, 'hidden_size': 16})\n",
" return config\n",
"\n",
"model = AutoiTransformer(h=12, n_series=1, config=my_config_new, backend='optuna', num_samples=1, cpus=1)\n",
"model.fit(dataset=dataset)\n",
"\n",
"# Unit test for situation: Ray with updated default config\n",
"my_config = AutoiTransformer.get_default_config(h=12, n_series=1, backend='ray')\n",
"my_config['max_steps'] = 1\n",
"my_config['val_check_steps'] = 1\n",
"my_config['input_size'] = 12\n",
"my_config['hidden_size'] = 16\n",
"model = AutoiTransformer(h=12, n_series=1, config=my_config, backend='ray', num_samples=1, cpus=1)\n",
"model.fit(dataset=dataset)"
]
},
{
"attachments": {},
"cell_type": "markdown",
Expand Down
Loading

0 comments on commit 6b0bca5

Please sign in to comment.