import torch
import torch.nn as nn
import torch.nn.functional as F
import neuralforecast.losses.pytorch as losses
from typing import Optional
from neuralforecast.common._base_windows import BaseWindows
from neuralforecast.losses.pytorch import MAE
深度NPTS
深度非参数时间序列预测器(DeepNPTS)是一个非参数基线模型,用于时间序列预测。该模型通过根据可调策略从经验分布中采样生成预测。这个策略是通过利用多个相关时间序列之间的信息来学习的。该模型为时间序列预测提供了一个强大且简单的基线。
损失
该实现与原始工作不同,返回的是经验分布的加权和作为预测。因此,它仅支持点损失。
import logging
import warnings
from fastcore.test import test_eq
from nbdev.showdoc import show_doclogging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
warnings.filterwarnings("ignore")class DeepNPTS(BaseWindows):
""" DeepNPTS
Deep Non-Parametric Time Series Forecaster (`DeepNPTS`) is a baseline model for time-series forecasting. This model generates predictions by (weighted) sampling from the empirical distribution according to a learnable strategy. The strategy is learned by exploiting the information across multiple related time series.
**Parameters:**<br>
`h`: int, Forecast horizon. <br>
`input_size`: int, autorregresive inputs size, y=[1,2,3,4] input_size=2 -> y_[t-2:t]=[1,2].<br>
`hidden_size`: int=32, hidden size of dense layers.<br>
`batch_norm`: bool=True, if True, applies Batch Normalization after each dense layer in the network.<br>
`dropout`: float=0.1, dropout.<br>
`n_layers`: int=2, number of dense layers.<br>
`stat_exog_list`: str list, static exogenous columns.<br>
`hist_exog_list`: str list, historic exogenous columns.<br>
`futr_exog_list`: str list, future exogenous columns.<br>
`exclude_insample_y`: bool=False, the model skips the autoregressive features y[t-input_size:t] if True.<br>
`loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>
`valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>
`max_steps`: int=1000, maximum number of training steps.<br>
`learning_rate`: float=1e-3, Learning rate between (0, 1).<br>
`num_lr_decays`: int=-1, Number of learning rate decays, evenly distributed across max_steps.<br>
`early_stop_patience_steps`: int=-1, Number of validation iterations before early stopping.<br>
`val_check_steps`: int=100, Number of training steps between every validation loss check.<br>
`batch_size`: int=32, number of different series in each batch.<br>
`valid_batch_size`: int=None, number of different series in each validation and test batch, if None uses batch_size.<br>
`windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>
`inference_windows_batch_size`: int=-1, number of windows to sample in each inference batch, -1 uses all.<br>
`start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>
`step_size`: int=1, step size between each window of temporal data.<br>
`scaler_type`: str='identity', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>
`random_seed`: int, random_seed for pytorch initializer and numpy generators.<br>
`num_workers_loader`: int=os.cpu_count(), workers to be used by `TimeSeriesDataLoader`.<br>
`drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.<br>
`alias`: str, optional, Custom name of the model.<br>
`optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).<br>
`optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.<br>
`lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).<br>
`lr_scheduler_kwargs`: dict, optional, list of parameters used by the user specified `lr_scheduler`.<br>
`**trainer_kwargs`: int, keyword trainer arguments inherited from [PyTorch Lighning's trainer](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.trainer.trainer.Trainer.html?highlight=trainer).<br>
**References**<br>
- [Rangapuram, Syama Sundar, Jan Gasthaus, Lorenzo Stella, Valentin Flunkert, David Salinas, Yuyang Wang, and Tim Januschowski (2023). "Deep Non-Parametric Time Series Forecaster". arXiv.](https://arxiv.org/abs/2312.14657)<br>
"""
# Class attributes
SAMPLING_TYPE = 'windows'
EXOGENOUS_FUTR = True
EXOGENOUS_HIST = True
EXOGENOUS_STAT = True
def __init__(self,
h,
input_size: int = -1,
hidden_size: int = 32,
batch_norm: bool = True,
dropout: float = 0.1,
n_layers: int = 2,
futr_exog_list = None,
hist_exog_list = None,
stat_exog_list = None,
exclude_insample_y = False,
loss = MAE(),
valid_loss = MAE(),
max_steps: int = 1000,
learning_rate: float = 1e-3,
num_lr_decays: int = 3,
early_stop_patience_steps: int =-1,
val_check_steps: int = 100,
batch_size: int = 32,
valid_batch_size: Optional[int] = None,
windows_batch_size: int = 1024,
inference_windows_batch_size: int = 1024,
start_padding_enabled = False,
step_size: int = 1,
scaler_type: str = 'standard',
random_seed: int = 1,
num_workers_loader = 0,
drop_last_loader = False,
optimizer = None,
optimizer_kwargs = None,
lr_scheduler = None,
lr_scheduler_kwargs = None,
**trainer_kwargs):
if exclude_insample_y:
raise Exception('DeepNPTS has no possibility for excluding y.')
if not isinstance(loss, losses.BasePointLoss):
raise Exception('DeepNPTS only supports point loss functions (MAE, MSE, etc) as loss function.')
if not isinstance(valid_loss, losses.BasePointLoss):
raise Exception('DeepNPTS only supports point loss functions (MAE, MSE, etc) as valid loss function.')
# Inherit BaseWindows class
super(DeepNPTS, self).__init__(h=h,
input_size=input_size,
futr_exog_list=futr_exog_list,
hist_exog_list=hist_exog_list,
stat_exog_list=stat_exog_list,
exclude_insample_y = exclude_insample_y,
loss=loss,
valid_loss=valid_loss,
max_steps=max_steps,
learning_rate=learning_rate,
num_lr_decays=num_lr_decays,
early_stop_patience_steps=early_stop_patience_steps,
val_check_steps=val_check_steps,
batch_size=batch_size,
windows_batch_size=windows_batch_size,
valid_batch_size=valid_batch_size,
inference_windows_batch_size=inference_windows_batch_size,
start_padding_enabled=start_padding_enabled,
step_size=step_size,
scaler_type=scaler_type,
num_workers_loader=num_workers_loader,
drop_last_loader=drop_last_loader,
random_seed=random_seed,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
lr_scheduler=lr_scheduler,
lr_scheduler_kwargs=lr_scheduler_kwargs,
**trainer_kwargs)
self.h = h
self.hidden_size = hidden_size
self.dropout = dropout
input_dim = input_size * (1 + self.futr_exog_size + self.hist_exog_size) + self.stat_exog_size + self.h * self.futr_exog_size
# Create DeepNPTSNetwork
modules = []
for i in range(n_layers):
modules.append(nn.Linear(input_dim if i == 0 else hidden_size, hidden_size))
modules.append(nn.ReLU())
if batch_norm:
modules.append(nn.BatchNorm1d(hidden_size))
if dropout > 0.0:
modules.append(nn.Dropout(dropout))
modules.append(nn.Linear(hidden_size, input_size * self.h))
self.deepnptsnetwork = nn.Sequential(*modules)
def forward(self, windows_batch):
# Parse windows_batch
x = windows_batch['insample_y'].unsqueeze(-1) # [B, L, 1]
hist_exog = windows_batch['hist_exog'] # [B, L, X]
futr_exog = windows_batch['futr_exog'] # [B, L + h, F]
stat_exog = windows_batch['stat_exog'] # [B, S]
batch_size, seq_len = x.shape[:2] # B = batch_size, L = seq_len
insample_y = windows_batch['insample_y'].unsqueeze(-1)
# 将x_t与未来的外生输入连接起来
if self.futr_exog_size > 0:
x = torch.cat((x, futr_exog[:, :seq_len]), dim=2) # [B, L, 1] + [B, L, F] -> [B, L, 1 + F]
# 将 x_t 与历史外生变量连接
if self.hist_exog_size > 0:
x = torch.cat((x, hist_exog), dim=2) # [B, L, 1 + F] + [B, L, X] -> [B, L, 1 + F + X]
x = x.reshape(batch_size, -1) # [B, L, 1 + F + X] -> [B, L * (1 + F + X)]
# 将 x 与静态外生变量连接
if self.stat_exog_size > 0:
x = torch.cat((x, stat_exog), dim=1) # [B, L * (1 + F + X)] + [B, S] -> [B, L * (1 + F + X) + S]
# 将 x_t 与未来外生变量按时间范围连接
if self.futr_exog_size > 0:
futr_exog = futr_exog[:, seq_len:] # [B, L + h, F] -> [B, h, F]
futr_exog = futr_exog.reshape(batch_size, -1) # [B, L + h, F] -> [B, h * F]
x = torch.cat((x, futr_exog), dim=1) # [B, L * (1 + F + X) + S] + [B, h * F] -> [B, L * (1 + F + X) + S + h * F]
# 通过深度NPTS网络运行
weights = self.deepnptsnetwork(x) # [B, L * (1 + F + X) + S + h * F] -> [B, L * h]
# 对加权输入预测应用softmax函数
weights = weights.reshape(batch_size, seq_len, -1) # [B, L * h] -> [B, L, h]
x = F.softmax(weights, dim=1) * insample_y # [B, L, h] * [B, L, 1] = [B, L, h]
output = torch.sum(x, dim=1).unsqueeze(-1) # [B, L, h] -> [B, h, 1]
forecast = self.loss.domain_map(output) # [B, h, 1] -> [B, h, 1]
return forecastshow_doc(DeepNPTS, title_level=3)show_doc(DeepNPTS.fit, name='DeepNPTS.fit', title_level=3)show_doc(DeepNPTS.predict, name='DeepNPTS.predict', title_level=3)使用示例
import pandas as pd
import matplotlib.pyplot as plt
from neuralforecast import NeuralForecast
from neuralforecast.models import DeepNPTS
from neuralforecast.utils import AirPassengersPanel, AirPassengersStatic
Y_train_df = AirPassengersPanel[AirPassengersPanel.ds<AirPassengersPanel['ds'].values[-12]] # 132次列车
Y_test_df = AirPassengersPanel[AirPassengersPanel.ds>=AirPassengersPanel['ds'].values[-12]].reset_index(drop=True) # 12项测试
nf = NeuralForecast(
models=[DeepNPTS(h=12,
input_size=24,
stat_exog_list=['airline1'],
futr_exog_list=['trend'],
max_steps=1000,
val_check_steps=10,
early_stop_patience_steps=3,
scaler_type='robust',
enable_progress_bar=True),
],
freq='M'
)
nf.fit(df=Y_train_df, static_df=AirPassengersStatic, val_size=12)
Y_hat_df = nf.predict(futr_df=Y_test_df)
# 绘制分位数预测图
Y_hat_df = Y_hat_df.reset_index(drop=False).drop(columns=['unique_id','ds'])
plot_df = pd.concat([Y_test_df, Y_hat_df], axis=1)
plot_df = pd.concat([Y_train_df, plot_df])
plot_df = plot_df[plot_df.unique_id=='Airline1'].drop('unique_id', axis=1)
plt.plot(plot_df['ds'], plot_df['y'], c='black', label='True')
plt.plot(plot_df['ds'], plot_df['DeepNPTS'], c='red', label='mean')
plt.grid()
plt.plot()Give us a ⭐ on Github