Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 130 additions & 0 deletions examples/metaLearing_exp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import sys
import os
sys.path.insert(0, os.getcwd())

def main():
""" - First, we create synthetic dataset.
- We add meta-learning pipeline.
- Then, we extract meta-features of time series with three
deep learning methods, i.e., EDMLP, EDLSTM, EDTCN.
- We also extract statistical features of time series with TsFresh.
- Then, We add 2 base_forecasters
- Finally, we fit and predict time series.
"""

##########
# dataset
##########

LEN_TS = 39 # time series length
FH = 7 # forecasting horizon

# create synthetic data
from metats.datasets import ETSDataset
ets_generator = ETSDataset({'A,N,N': 512,
'M,M,M': 512}, length=LEN_TS, freq=4)
data, labels = ets_generator.load(return_family=True)
colors = list(map(lambda x: (x=='A,N,N')*1, labels))

# scaling data
from sklearn.preprocessing import StandardScaler
scaled_data = StandardScaler().fit_transform(data.T)
data = scaled_data.T[:, :, None] # size: batch_dim x seires_length x series_dim
print('data shape is:',data.shape)

# creats MetaLearning pipeline
from metats.pipeline import MetaLearning
pipeline = MetaLearning(method='selection', loss='mse')

##########
# MLP
##########

from metats.features.unsupervised import DeepAutoEncoder
from metats.features.deep import AutoEncoder, MLPEncoder, MLPDecoder

enc = MLPEncoder(input_size=data.shape[2], input_length=LEN_TS-FH, latent_size=8, hidden_layers=(16,))
dec = MLPDecoder(input_size=data.shape[2], input_length=LEN_TS-FH, latent_size=8, hidden_layers=(16,))
ae = AutoEncoder(encoder=enc, decoder=dec)
ae_features = DeepAutoEncoder(auto_encoder=ae, epochs=100, verbose=False)

pipeline.add_feature(ae_features)

##########
# LSTM
##########

from metats.features.unsupervised import DeepAutoEncoder
from metats.features.deep import AutoEncoder, LSTMDecoder, LSTMEncoder

H = 5 # hidden size
l = 3 # latent size
NL= 2 # number of layers

enc = LSTMEncoder(input_size=data.shape[2], latent_size=l,
hidden_size=H, num_layers=NL, directions=1)
dec = LSTMDecoder(output_length=LEN_TS-FH, output_size=data.shape[2],
latent_size=l, hidden_size=H, num_layers=NL, directions=1)
ae = AutoEncoder(encoder=enc, decoder=dec)
ae_features = DeepAutoEncoder(auto_encoder=ae, epochs=100, verbose=False)
pipeline.add_feature(ae_features)

##########
# TCN
##########

from metats.features.unsupervised import DeepAutoEncoder
from metats.features.deep import AutoEncoder, Encoder_Decoder_TCN

# construct model
EDTCN = Encoder_Decoder_TCN(input_size=data.shape[2], input_length=LEN_TS-FH,
hidden_layers=(4,1), kernel_size =4, dilation=2)
enc = EDTCN.encoder
dec = EDTCN.decoder
ae = AutoEncoder(encoder=enc, decoder=dec)
ae_features = DeepAutoEncoder(auto_encoder=ae, epochs=100, verbose=True)

pipeline.add_feature(ae_features)

##########
# TsFresh
##########

from metats.features.statistical import TsFresh

# adding TsFresh as statistical features extractor
stat_features = TsFresh()
pipeline.add_feature(stat_features)

##########
# Forecasters
##########

from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.compose import make_reduction
from sklearn.neighbors import KNeighborsRegressor

# creating two base-forecasters
regressor = KNeighborsRegressor(n_neighbors=1)
forecaster1 = make_reduction(regressor, window_length=15, strategy="recursive")
forecaster2 = NaiveForecaster()

# adding base-forecasters to pipline
pipeline.add_forecaster(forecaster1)
pipeline.add_forecaster(forecaster2)

##########
# MetaLearner
##########

from sklearn.ensemble import RandomForestClassifier
# to adopt random foreset as meta-learner:
# meta-learner makes connection between features and
# each base-forecaster's weights
pipeline.add_metalearner(RandomForestClassifier())
pipeline.fit(data, fh=FH)
predict = pipeline.predict(data, fh=FH)
print(predict.shape)

if __name__ == "__main__":
main()
12 changes: 12 additions & 0 deletions examples/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
`metaLearning_exp.py` is a simple example that shows how we can integrate three deep-learning feature extraction methods and one statistical feature extractor, to perform model selction between two base-forecasters.

You can run:
``` bash
python examples\metaLearing_exp.py
```
---
**NOTE:**

Good accuracy usually requires precise Hyper-parmeters selection of NN's architectures, etc.

---
84 changes: 55 additions & 29 deletions metats/features/deep.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ def forward(self, latent):
h0, c0 = self.get_initial(bsize)

out, _ = self.lstm(lstm_in, (h0, c0))
Y = 0.5 * (out[:, :, :self.output_size] + out[:, :, self.output_size:])
Y = Y.permute(1, 0, 2)
# Y = 0.5 * (out[:, :, :self.output_size] + out[:, :, self.output_size:])
Y = out.permute(1, 0, 2)
return Y


Expand Down Expand Up @@ -271,7 +271,7 @@ def forward(self, latent):
return Y


class Encoder_Decoder_TCN(nn.Module):
class Encoder_Decoder_TCN():
"""
A general class for Encoder decoder with
dilated Temporal Convolutional Networks (TCN).
Expand All @@ -296,37 +296,63 @@ def __init__(self, input_size, input_length, hidden_layers=(128,64),
raise ValueError(f"'Time series length' has to be divisible by number of "\
f"'hidden_layers', but {input_length} is not divisible by {2**len(hidden_layers)}!")

super().__init__()
depth = len(hidden_layers)
if activation == None:
activation = nn.Tanh


## Encoder:
model = []
for i in range(depth):
dilation_size = 2 ** i
in_channels = input_size if i == 0 else hidden_layers[i-1]
model.append(nn.Conv1d(in_channels, hidden_layers[i],
kernel_size, padding='same', dilation=dilation_size))
model.append(nn.Dropout(dropout))
model.append(activation())
model.append(nn.MaxPool1d(2))
self.encoder = nn.Sequential(*model)
self.encoder.latent_size = hidden_layers[-1]

## Decoder:
model = []
for i in range(depth-1,-1,-1):
dilation_size = 2 ** i
out_channels = input_size if i == 0 else hidden_layers[i-1]
model.append(nn.Upsample(scale_factor=2))
model.append(nn.Conv1d(hidden_layers[i], out_channels,
kernel_size=kernel_size, padding='same', dilation=dilation_size))
model.append(nn.Dropout(dropout))
model.append(activation())

self.decoder = nn.Sequential(*model)
class Encoder(nn.Module):
def __init__(self):
super().__init__()
model = []
for i in range(depth):
dilation_size = 2 ** i
in_channels = input_size if i == 0 else hidden_layers[i-1]
model.append(nn.Conv1d(in_channels, hidden_layers[i],
kernel_size, padding='same', dilation=dilation_size))
model.append(nn.Dropout(dropout))
model.append(activation())
model.append(nn.MaxPool1d(2))
self._encoder = nn.Sequential(*model)
self.latent_size = self._encoder_dim()

def _encoder_dim(self):
"""gets endoder laten dimension

Returns:
int: size of the latant dimension
"""
x=torch.randn(1, input_size, input_length)
encode = self._encoder(x).reshape(1,-1)
return encode.shape[1]

def forward(self,x):
x = x.permute(0, 2, 1)
y = self._encoder(x)
return torch.squeeze(y,dim=1)

## Decoder:
class Decoder(nn.Module):
def __init__(self):
super().__init__()
model = []
for i in range(depth-1,-1,-1):
dilation_size = 2 ** i
out_channels = input_size if i == 0 else hidden_layers[i-1]
model.append(nn.Upsample(scale_factor=2))
model.append(nn.Conv1d(hidden_layers[i], out_channels,
kernel_size=kernel_size, padding='same', dilation=dilation_size))
model.append(nn.Dropout(dropout))
model.append(activation())
self._decoder = nn.Sequential(*model)

def forward(self,x):
x = torch.unsqueeze(x,dim=1)
y = self._decoder(x)
return y

self.encoder = Encoder()
self.decoder = Decoder()

class AutoEncoder(nn.Module):
"""
Expand Down