Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FEAT] - Add SOFTS model #1024

Merged
merged 1 commit into from
Jun 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions nbs/core.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
" Informer, Autoformer, FEDformer,\n",
" StemGNN, PatchTST, TimesNet, TimeLLM, TSMixer, TSMixerx,\n",
" MLPMultivariate, iTransformer,\n",
" BiTCN, TiDE, DeepNPTS,\n",
" BiTCN, TiDE, DeepNPTS, SOFTS\n",
")"
]
},
Expand Down Expand Up @@ -240,7 +240,7 @@
" 'bitcn': BiTCN, 'autobitcn': BiTCN,\n",
" 'tide': TiDE, 'autotide': TiDE,\n",
" 'deepnpts': DeepNPTS, 'autodeepnpts': DeepNPTS,\n",
"\n",
" 'softs': SOFTS, 'autosofts': SOFTS\n",
"}"
]
},
Expand Down
Binary file added nbs/imgs_models/softs_architecture.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
429 changes: 429 additions & 0 deletions nbs/models.ipynb

Large diffs are not rendered by default.

1,019 changes: 1,019 additions & 0 deletions nbs/models.softs.ipynb

Large diffs are not rendered by default.

24 changes: 24 additions & 0 deletions neuralforecast/_modidx.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@
'neuralforecast.auto.AutoRNN.__init__': ('models.html#autornn.__init__', 'neuralforecast/auto.py'),
'neuralforecast.auto.AutoRNN.get_default_config': ( 'models.html#autornn.get_default_config',
'neuralforecast/auto.py'),
'neuralforecast.auto.AutoSOFTS': ('models.html#autosofts', 'neuralforecast/auto.py'),
'neuralforecast.auto.AutoSOFTS.__init__': ('models.html#autosofts.__init__', 'neuralforecast/auto.py'),
'neuralforecast.auto.AutoSOFTS.get_default_config': ( 'models.html#autosofts.get_default_config',
'neuralforecast/auto.py'),
'neuralforecast.auto.AutoStemGNN': ('models.html#autostemgnn', 'neuralforecast/auto.py'),
'neuralforecast.auto.AutoStemGNN.__init__': ( 'models.html#autostemgnn.__init__',
'neuralforecast/auto.py'),
Expand Down Expand Up @@ -960,6 +964,26 @@
'neuralforecast/models/rnn.py'),
'neuralforecast.models.rnn.RNN.forward': ( 'models.rnn.html#rnn.forward',
'neuralforecast/models/rnn.py')},
'neuralforecast.models.softs': { 'neuralforecast.models.softs.DataEmbedding_inverted': ( 'models.softs.html#dataembedding_inverted',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.DataEmbedding_inverted.__init__': ( 'models.softs.html#dataembedding_inverted.__init__',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.DataEmbedding_inverted.forward': ( 'models.softs.html#dataembedding_inverted.forward',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.SOFTS': ( 'models.softs.html#softs',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.SOFTS.__init__': ( 'models.softs.html#softs.__init__',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.SOFTS.forecast': ( 'models.softs.html#softs.forecast',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.SOFTS.forward': ( 'models.softs.html#softs.forward',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.STAD': ( 'models.softs.html#stad',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.STAD.__init__': ( 'models.softs.html#stad.__init__',
'neuralforecast/models/softs.py'),
'neuralforecast.models.softs.STAD.forward': ( 'models.softs.html#stad.forward',
'neuralforecast/models/softs.py')},
'neuralforecast.models.stemgnn': { 'neuralforecast.models.stemgnn.GLU': ( 'models.stemgnn.html#glu',
'neuralforecast/models/stemgnn.py'),
'neuralforecast.models.stemgnn.GLU.__init__': ( 'models.stemgnn.html#glu.__init__',
Expand Down
88 changes: 87 additions & 1 deletion neuralforecast/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
'AutoNBEATSx', 'AutoNHITS', 'AutoDLinear', 'AutoNLinear', 'AutoTiDE', 'AutoDeepNPTS', 'AutoTFT',
'AutoVanillaTransformer', 'AutoInformer', 'AutoAutoformer', 'AutoFEDformer', 'AutoPatchTST',
'AutoiTransformer', 'AutoTimesNet', 'AutoStemGNN', 'AutoHINT', 'AutoTSMixer', 'AutoTSMixerx',
'AutoMLPMultivariate']
'AutoMLPMultivariate', 'AutoSOFTS']

# %% ../nbs/models.ipynb 2
from os import cpu_count
Expand Down Expand Up @@ -48,6 +48,7 @@
from .models.tsmixer import TSMixer
from .models.tsmixerx import TSMixerx
from .models.mlpmultivariate import MLPMultivariate
from .models.softs import SOFTS

from .losses.pytorch import MAE, MQLoss, DistributionLoss

Expand Down Expand Up @@ -2079,3 +2080,88 @@ def get_default_config(cls, h, backend, n_series):
config = cls._ray_config_to_optuna(config)

return config

# %% ../nbs/models.ipynb 130
class AutoSOFTS(BaseAuto):

default_config = {
"input_size_multiplier": [1, 2, 3, 4, 5],
"h": None,
"n_series": None,
"hidden_size": tune.choice([64, 128, 256, 512]),
"d_core": tune.choice([64, 128, 256, 512]),
"learning_rate": tune.loguniform(1e-4, 1e-1),
"scaler_type": tune.choice([None, "robust", "standard", "identity"]),
"max_steps": tune.choice([500, 1000, 2000]),
"batch_size": tune.choice([32, 64, 128, 256]),
"loss": None,
"random_seed": tune.randint(1, 20),
}

def __init__(
self,
h,
n_series,
loss=MAE(),
valid_loss=None,
config=None,
search_alg=BasicVariantGenerator(random_state=1),
num_samples=10,
refit_with_val=False,
cpus=cpu_count(),
gpus=torch.cuda.device_count(),
verbose=False,
alias=None,
backend="ray",
callbacks=None,
):

# Define search space, input/output sizes
if config is None:
config = self.get_default_config(h=h, backend=backend, n_series=n_series)

# Always use n_series from parameters, raise exception with Optuna because we can't enforce it
if backend == "ray":
config["n_series"] = n_series
elif backend == "optuna":
mock_trial = MockTrial()
if (
"n_series" in config(mock_trial)
and config(mock_trial)["n_series"] != n_series
) or ("n_series" not in config(mock_trial)):
raise Exception(f"config needs 'n_series': {n_series}")

super(AutoSOFTS, self).__init__(
cls_model=SOFTS,
h=h,
loss=loss,
valid_loss=valid_loss,
config=config,
search_alg=search_alg,
num_samples=num_samples,
refit_with_val=refit_with_val,
cpus=cpus,
gpus=gpus,
verbose=verbose,
alias=alias,
backend=backend,
callbacks=callbacks,
)

@classmethod
def get_default_config(cls, h, backend, n_series):
config = cls.default_config.copy()
config["input_size"] = tune.choice(
[h * x for x in config["input_size_multiplier"]]
)

# Rolling windows with step_size=1 or step_size=h
# See `BaseWindows` and `BaseRNN`'s create_windows
config["step_size"] = tune.choice([1, h])
del config["input_size_multiplier"]
if backend == "optuna":
# Always use n_series from parameters
config["n_series"] = n_series
config = cls._ray_config_to_optuna(config)

return config
3 changes: 3 additions & 0 deletions neuralforecast/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
BiTCN,
TiDE,
DeepNPTS,
SOFTS,
)

# %% ../nbs/core.ipynb 5
Expand Down Expand Up @@ -176,6 +177,8 @@ def _insample_times(
"autotide": TiDE,
"deepnpts": DeepNPTS,
"autodeepnpts": DeepNPTS,
"softs": SOFTS,
"autosofts": SOFTS,
}

# %% ../nbs/core.ipynb 8
Expand Down
3 changes: 2 additions & 1 deletion neuralforecast/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
'MLP', 'NHITS', 'NBEATS', 'NBEATSx', 'DLinear', 'NLinear',
'TFT', 'VanillaTransformer', 'Informer', 'Autoformer', 'PatchTST', 'FEDformer',
'StemGNN', 'HINT', 'TimesNet', 'TimeLLM', 'TSMixer', 'TSMixerx', 'MLPMultivariate',
'iTransformer', 'BiTCN', 'TiDE', 'DeepNPTS'
'iTransformer', 'BiTCN', 'TiDE', 'DeepNPTS', 'SOFTS'
]

from .rnn import RNN
Expand Down Expand Up @@ -34,3 +34,4 @@
from .bitcn import BiTCN
from .tide import TiDE
from .deepnpts import DeepNPTS
from .softs import SOFTS
Loading
Loading