pytorch循环神经网络实现回归预测

学习视频:莫烦python

# RNN for classification
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data as Data
import matplotlib.pyplot as plt
import torchvision


#hyper parameters
TIME_STEP=10     #run time step
INPUT_SIZE=1
LR=0.02   #learning rate

# t=np.linspace(0,np.pi*2,100,dtype=float)  #from zero to pi*2, and one hundred point there
# x=np.sin(t)
# y=np.cos(t)
# plt.plot(t,x,'r-',label='input (sin)')
# plt.plot(t,y,'b-',label='target (cos)')
# plt.legend(loc='best')
# plt.show()

class RNN_Net(nn.Module):
    def __init__(self):
        super(RNN_Net,self).__init__()
        self.rnn=nn.RNN(
            input_size=INPUT_SIZE,
            hidden_size=32,
            num_layers=1,
            batch_first=True,
        )
        self.out=nn.Linear(32,1)

    def forward(self,x,h_state):
        r_out,h_state=self.rnn(x,h_state)
        outs=[]
        for time_step in range(r_out.size(1)):
            outs.append(self.out(r_out[:,time_step,:]))
        return torch.stack(outs,dim=1),h_state  # the type of return data is torch, and the return data also include h_state

rnn=RNN_Net()
# print(rnn)

optimizer=torch.optim.Adam(rnn.parameters(),lr=LR)
loss_func=nn.MSELoss()

plt.ion()
h_state=None
for step in range (60):
    start,end=step*np.pi,(step+1)*np.pi
    #using sin predicts cos
    steps=np.linspace(start,end,TIME_STEP,dtype=np.float32)
    x_np=np.sin(steps)
    y_np=np.cos(steps)

    x=torch.from_numpy(x_np[np.newaxis,:,np.newaxis]) # np.newaxis means increase a dim
    y=torch.from_numpy(y_np[np.newaxis,:,np.newaxis])
    predition,h_state=rnn(x,h_state) #the first h_state is None
    h_state=h_state.data  #?????
    loss=loss_func(predition,y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    plt.plot(steps,y_np,'r-')
    plt.plot(steps,predition.detach().numpy().flatten(),'b-')  #flatten() 展平维度
    plt.draw()
    plt.pause(0.05)
plt.ioff()
plt.show()