Skip to content

Commit

Permalink
feat: update the hint type
Browse files Browse the repository at this point in the history
  • Loading branch information
LongxingTan committed Sep 1, 2023
1 parent 75f6d07 commit e80464b
Show file tree
Hide file tree
Showing 25 changed files with 126 additions and 96 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ documentation = "https://time-series-prediction.readthedocs.io"
homepage = "https://time-series-prediction.readthedocs.io"

[tool.poetry.dependencies]
python = ">=3.7.1,<3.11"
python = ">=3.7.1,<3.12"

optuna = "^2.3.0"
pandas = "^1.2.0"
Expand Down
16 changes: 11 additions & 5 deletions tfts/datasets/get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import logging
import random
from typing import List, Optional, Tuple, Union

import numpy as np
import pandas as pd
Expand All @@ -11,7 +12,9 @@
)


def get_data(name: str = "sine", train_length: int = 24, predict_length: int = 8, test_size: float = 0.1):
def get_data(
name: str = "sine", train_length: int = 24, predict_length: int = 8, test_size: float = 0.1
) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[Tuple[np.ndarray, np.ndarray]], None]:
assert (test_size >= 0) & (test_size <= 1), "test_size is the ratio of test dataset"
if name == "sine":
return get_sine(train_length, predict_length, test_size=test_size)
Expand All @@ -23,7 +26,9 @@ def get_data(name: str = "sine", train_length: int = 24, predict_length: int = 8
raise ValueError("unsupported data of {} yet, try 'sine', 'airpassengers'".format(name))


def get_sine(train_sequence_length: int = 24, predict_sequence_length: int = 8, test_size: float = 0.2, n_examples=100):
def get_sine(
train_sequence_length: int = 24, predict_sequence_length: int = 8, test_size: float = 0.2, n_examples: int = 100
) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[Tuple[np.ndarray, np.ndarray]]]:
"""
Generate synthetic sine wave data.
Expand All @@ -36,8 +41,8 @@ def get_sine(train_sequence_length: int = 24, predict_sequence_length: int = 8,
Returns:
(tuple): Two tuples of numpy arrays containing training and validation data.
"""
x = []
y = []
x: List[np.ndarray] = []
y: List[np.ndarray] = []
for _ in range(n_examples):
rand = random.random() * 2 * np.pi
sig1 = np.sin(np.linspace(rand, 3.0 * np.pi + rand, train_sequence_length + predict_sequence_length))
Expand Down Expand Up @@ -86,7 +91,8 @@ def get_air_passengers(train_sequence_length: int = 24, predict_sequence_length:
v = df.iloc[:, 1:2].values
v = (v - np.max(v)) / (np.max(v) - np.min(v)) # MinMaxScaler

x, y = [], []
x: List[np.ndarray] = []
y: List[np.ndarray] = []
for seq in range(1, train_sequence_length + 1):
x_roll = np.roll(v, seq, axis=0)
x.append(x_roll)
Expand Down
20 changes: 11 additions & 9 deletions tfts/layers/attention_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
class FullAttention(tf.keras.layers.Layer):
"""Multi-head attention layer"""

def __init__(self, hidden_size: int, num_heads: int, attention_dropout: float = 0.0):
def __init__(self, hidden_size: int, num_heads: int, attention_dropout: float = 0.0) -> None:
"""Initialize the layer.
Parameters:
Expand All @@ -36,7 +36,7 @@ def __init__(self, hidden_size: int, num_heads: int, attention_dropout: float =
self.num_heads = num_heads
self.attention_dropout = attention_dropout

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]) -> None:
self.dense_q = Dense(self.hidden_size, use_bias=False)
self.dense_k = Dense(self.hidden_size, use_bias=False)
self.dense_v = Dense(self.hidden_size, use_bias=False)
Expand Down Expand Up @@ -94,14 +94,16 @@ def get_config(self):


class SelfAttention(tf.keras.layers.Layer):
def __init__(self, hidden_size: int, num_heads: int, attention_dropout: float = 0.0, **kwargs):
def __init__(
self, hidden_size: int, num_heads: int, attention_dropout: float = 0.0, **kwargs: Dict[str, Any]
) -> None:
super(SelfAttention, self).__init__()
self.attention = FullAttention(hidden_size, num_heads, attention_dropout=attention_dropout)

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]) -> None:
super(SelfAttention, self).build(input_shape)

def call(self, x, mask=None):
def call(self, x: tf.Tensor, mask: Optional[tf.Tensor] = None):
"""_summary_
Parameters
Expand Down Expand Up @@ -132,7 +134,7 @@ def __init__(self, hidden_size: int = 128, num_heads: int = 1, attention_dropout
self.factor = 5
self.scale = None

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]) -> None:
self.dense_q = Dense(self.hidden_size, use_bias=False)
self.dense_k = Dense(self.hidden_size, use_bias=False)
self.dense_v = Dense(self.hidden_size, use_bias=False)
Expand Down Expand Up @@ -235,7 +237,7 @@ class SparseAttention(tf.keras.layers.Layer):
def __init__(self, hidden_size: int, num_heads: int, attention_dropout: float = 0.0, **kwargs):
super().__init__()

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
super().build(input_shape)

def call(self, x, mask=None):
Expand All @@ -256,10 +258,10 @@ def get_config(self):


class FastAttention(tf.keras.layers.Layer):
def __init__(self, **kwargs):
def __init__(self, **kwargs) -> None:
super().__init__()

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]) -> None:
super().build(input_shape)

def call(self, x, mask=None):
Expand Down
6 changes: 4 additions & 2 deletions tfts/layers/autoformer_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
"""Layer for :py:class:`~tfts.models.autoformer`"""

import math
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import tensorflow as tf
from tensorflow.keras.layers import AveragePooling1D, Conv1D, Dense, Dropout
Expand All @@ -14,7 +15,7 @@ def __init__(self, kernel_size: int) -> None:
self.kernel_size = kernel_size
self.moving_avg = AveragePooling1D(pool_size=kernel_size, strides=1, padding="same")

def call(self, x):
def call(self, x: tf.Tensor):
"""
Perform time-series decomposition on the input tensor.
Expand Down Expand Up @@ -51,12 +52,13 @@ def __init__(self, d_model: int, num_heads: int, attention_dropout: float = 0.0)
self.depth = d_model // num_heads
self.attention_dropout = attention_dropout

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.wq = Dense(self.d_model, name="q")
self.wk = Dense(self.d_model, name="k")
self.wv = Dense(self.d_model, name="v")
self.drop = Dropout(self.attention_dropout)
self.dense = Dense(self.d_model, name="project")
super().build(input_shape)

def time_delay_agg(self, q, k, v): # TODO: v not used in process
"""Compute time-delayed autocorrelation between queries and keys.
Expand Down
6 changes: 4 additions & 2 deletions tfts/layers/cnn_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# @author: Longxing Tan, tanlongxing888@163.com
"""Layer for :py:class:`~tfts.models.wavenet`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import tensorflow as tf
from tensorflow.keras import activations, constraints, initializers, regularizers

Expand Down Expand Up @@ -31,7 +33,7 @@ def __init__(
self.causal = causal
self.kernel_initializer = initializers.get(kernel_initializer)

def build(self, input_shape): # Create the weights
def build(self, input_shape: Tuple[Optional[int], ...]) -> None:
self.conv = tf.keras.layers.Conv1D(
kernel_size=self.kernel_size,
kernel_initializer=self.kernel_initializer,
Expand Down Expand Up @@ -83,7 +85,7 @@ def __init__(self):
self.temporal_conv = ConvTemp()
self.att = SelfAttention()

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
super(ConvAttTemp, self).build(input_shape)

def call(self, inputs):
Expand Down
4 changes: 3 additions & 1 deletion tfts/layers/deepar_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# @author: Longxing Tan, tanlongxing888@163.com
"""Layer for :py:class:`~tfts.models.deepar`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv1D, Dense, Dropout
Expand All @@ -12,7 +14,7 @@ def __init__(self, units: int):
self.units = units
super(GaussianLayer, self).__init__()

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
in_channels = input_shape[2]
self.weight1 = self.add_weight(
name="gauss_w1", shape=(in_channels, self.units), initializer=tf.keras.initializers.GlorotNormal()
Expand Down
4 changes: 2 additions & 2 deletions tfts/layers/dense_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def __init__(
self.use_bias = use_bias
self.bias_initializer = bias_initializer

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
inputs_units = int(input_shape[-1]) # input.get_shape().as_list()[-1]
self.kernel = self.add_weight(
"kernel",
Expand Down Expand Up @@ -81,7 +81,7 @@ def __init__(self, hidden_size: int, filter_size: int, relu_dropout: float = 0.0
self.filter_size = filter_size
self.relu_dropout = relu_dropout

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.filter_dense_layer = Dense(self.filter_size, use_bias=True, activation="relu")
self.output_dense_layer = Dense(self.hidden_size, use_bias=True)
self.drop = Dropout(self.relu_dropout)
Expand Down
14 changes: 8 additions & 6 deletions tfts/layers/embed_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# @author: Longxing Tan, tanlongxing888@163.com
"""Layer for :py:class:`~tfts.models.transformer`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import GRU, LSTM, Conv1D, Dense, Dropout, Embedding, LayerNormalization, SpatialDropout1D
Expand All @@ -25,7 +27,7 @@ def __init__(self, embed_size: int):
super(TokenEmbedding, self).__init__()
self.embed_size = embed_size

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.token_weights = self.add_weight(
name="token_weights",
shape=[input_shape[-1], self.embed_size],
Expand Down Expand Up @@ -57,7 +59,7 @@ def __init__(self, embed_size: int) -> None:
super().__init__()
self.embed_size = embed_size

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.rnn = GRU(self.embed_size, return_sequences=True, return_state=True)
super().build(input_shape)

Expand Down Expand Up @@ -89,7 +91,7 @@ def __init__(self, max_len: int = 5000):
super(PositionalEmbedding, self).__init__()
self.max_len = max_len

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
super(PositionalEmbedding, self).build(input_shape)

def call(self, x, masking=True):
Expand Down Expand Up @@ -131,7 +133,7 @@ def __init__(self, max_len: int = 5000):
super(PositionalEncoding, self).__init__()
self.max_len = max_len

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
super(PositionalEncoding, self).build(input_shape)

def call(self, x, masking=True):
Expand Down Expand Up @@ -177,7 +179,7 @@ class FixedEmbedding(tf.keras.layers.Layer):
def __init__(self) -> None:
super().__init__()

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.embed = tf.keras.layers.Embedding(input_dim=input_shape[1], output_dim=input_shape[2])
super().build(input_shape)

Expand Down Expand Up @@ -234,7 +236,7 @@ def __init__(self, embed_size: int, dropout: float = 0.0):
self.positional_embedding = PositionalEncoding()
self.dropout = Dropout(dropout)

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
super(DataEmbedding, self).build(input_shape)

def call(self, x):
Expand Down
2 changes: 2 additions & 0 deletions tfts/layers/mask_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# @author: Longxing Tan, tanlongxing888@163.com
"""Layer for :py:class:`~tfts.models.transformer`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import tensorflow as tf
from tensorflow.keras import activations, constraints, initializers, regularizers

Expand Down
9 changes: 6 additions & 3 deletions tfts/layers/nbeats_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# @author: Longxing Tan, tanlongxing888@163.com
"""Layer for :py:class:`~tfts.models.nbeats`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Activation, Dense
Expand All @@ -19,7 +21,7 @@ def __init__(
self.hidden_size = hidden_size
self.n_block_layers = n_block_layers

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.layers = [Dense(self.hidden_size, activation="relu") for _ in range(self.n_block_layers)]
self.theta = Dense(self.train_sequence_length + self.predict_sequence_length, use_bias=False, activation=None)
super(GenericBlock, self).build(input_shape)
Expand Down Expand Up @@ -77,9 +79,10 @@ def __init__(
axis=0,
)

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.layers = [Dense(self.hidden_size, activation="relu") for _ in range(self.n_bloack_layers)]
self.theta = Dense(2 * self.polynomial_size, use_bias=False, activation=None)
super().build(input_shape)

def call(self, inputs):
"""_summary_
Expand Down Expand Up @@ -139,7 +142,7 @@ def __init__(self, train_sequence_length, predict_sequence_length, hidden_size,
self.forecast_cos_template = tf.transpose(tf.cos(self.forecast_grid))
self.forecast_sin_template = tf.transpose(tf.sin(self.forecast_grid))

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.layers = [Dense(self.hidden_size, activation="relu") for _ in range(self.n_block_layers)]
self.theta = Dense(self.theta_size, use_bias=False, activation=None)

Expand Down
8 changes: 5 additions & 3 deletions tfts/layers/unet_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# @author: Longxing Tan, tanlongxing888@163.com
"""Layer for :py:class:`~tfts.models.unet`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import tensorflow as tf
from tensorflow.keras.layers import Activation, Add, BatchNormalization, Conv1D, Dense, GlobalAveragePooling1D, Multiply

Expand All @@ -14,7 +16,7 @@ def __init__(self, units: int, kernel_size: int, strides: int, dilation: int):
self.strides = strides
self.dilation = dilation

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.conv1 = Conv1D(
self.units, kernel_size=self.kernel_size, strides=self.strides, dilation_rate=self.dilation, padding="same"
)
Expand Down Expand Up @@ -53,7 +55,7 @@ def __init__(self, units):
super(SeBlock, self).__init__()
self.units = units

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
self.pool = GlobalAveragePooling1D()
self.fc1 = Dense(self.units // 8, activation="relu")
self.fc2 = Dense(self.units, activation="sigmoid")
Expand Down Expand Up @@ -98,7 +100,7 @@ def __init__(self, units, kernel_size, strides, dilation, use_se):
self.se_block = SeBlock(units=units)
self.use_se = use_se

def build(self, input_shape):
def build(self, input_shape: Tuple[Optional[int], ...]):
super(ReBlock, self).build(input_shape)

def call(self, x):
Expand Down
4 changes: 2 additions & 2 deletions tfts/models/auto_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
class AutoConfig(object):
"""AutoConfig for model"""

def __init__(self, use_model: str):
def __init__(self, use_model: str) -> None:
if use_model.lower() == "seq2seq":
self.params = seq2seq_params
elif use_model.lower() == "rnn":
Expand Down Expand Up @@ -49,7 +49,7 @@ def __init__(self, use_model: str):
def get_config(self):
return self.params

def print_config(self):
def print_config(self) -> None:
print(self.params)

def save_config(self):
Expand Down
Loading

0 comments on commit e80464b

Please sign in to comment.