import torch.nn as nn
import torch
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
# net = nn.RNN(100,10) #100个单词,每个单词10个维度
# print(net._parameters.keys())
#序列时间点预测
num_time_steps =50
input_size =1
hidden_size =16
output_size = 1
lr=0.01
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.rnn = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
batch_first=True, #[b,seq,feature] batch_first=False [seq,b,feature] ,
)
self.linear = nn.Linear(hidden_size,output_size)
def forward(self,x,hidden_prev):
# hidden_prev=h0 表示最后一个Ht的输出,out是表示[h0,h1,h2,h3....]每一个时间t的输出
out,hidden_prev = self.rnn(x,hidden_prev)
#[1,seq,h] => [seq,h]
out = out.view(-1,hidden_size)
out = self.linear(out) #[seq,h] => [seq,1]
out = out.unsqueeze(dim=0) #=>[1,seq,1]
return out,hidden_prev
model =Net()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr)
hidden_prev = torch.zeros(1,1,hidden_size) #[b,1,10]
for iter in range(6000):
start = np.random.randint(10,size=1)[0]
time_steps = np.linspace(start,start+10,num_time_steps)
data = np.sin(time_steps)
data = data.reshape(num_time_steps,1)
x = torch.tensor(data[:-1]).float().view(1,num_time_steps-1,1)
y = torch.tensor(data[1:]).float().view(1,num_time_steps-1,1)
output,hidden_prev = model(x,hidden_prev)
hidden_prev =hidden_prev.detach()
loss = criterion(output,y)
model.zero_grad()
loss.backward()
optimizer.step()
if iter%100 == 0:
print("Iteration:{} loss{}".format(iter,loss.item()))
predictions = []
input = x[:,0,:]
for _ in range(x.shape[1]):
input = input.view(1,1,1)
(pred,hidden_prev) = model(input,hidden_prev)
input = pred
predictions.append(pred.detach().numpy().ravel()[0])
x= x.data.numpy().ravel()
y = y.data.numpy()
plt.scatter(time_steps[:-1],x.ravel(),s=90)
plt.plot(time_steps[:-1],predictions)
plt.scatter(time_steps[1:],predictions)
plt.show()