data = torch.Tensor(1001):range(1,1001):apply(math.rad):apply(math.sin)
input = data[{{1,1000}}]
target = data[{{2,1001}}]
for k = 1, batch_size do
inputs[{{}, {k}}] = input[{{shuffle[k], shuffle[k]+seq_length-1}}]
targets[{{}, {k}}] = target[{{shuffle[k], shuffle[k]+seq_length-1}}]
end
lstm = nn.Sequencer(
nn.Sequential()
:add(nn.FastLSTM(no_param, 100))
:add(nn.FastLSTM(100, 100))
:add(nn.Linear(100,no_param))
:add(nn.Tanh())
)
criterion = nn.SequencerCriterion(nn.MSECriterion())
Epoch: 489, error: 6.2851066044103e-05
Epoch: 490, error: 5.619043210411e-05
Epoch: 491, error: 4.9503041066306e-05
Epoch: 492, error: 9.3122510279393e-05
Epoch: 493, error: 5.871072009701e-05
Epoch: 494, error: 5.4363651393891e-05
Epoch: 495, error: 3.6511985437243e-05
Epoch: 496, error: 4.3361181429568e-05
Epoch: 497, error: 4.2539467036372e-05
Epoch: 498, error: 6.5783935830644e-05
Epoch: 499, error: 3.9669659029783e-05
Epoch: 500, error: 2.1134164055749e-05
lstm:forget() -- set hidden state to zero
local x = torch.Tensor(1,1,1):zero()
-- warm-up the lstm
for i=1,warmup do
x[1][1][1] = data[start+i-1] -- start
print(x[1][1][1])
lstm:forward(x)
end
print('output')
output = torch.Tensor(N)
for i=1,N do
local x_next = lstm:forward(x)
output[i] = x_next:squeeze()
x = x_next
end
The sampled results look like this. In red the warm-up point and in green the sampled points from the network.