import torch import torch.nn.functional as F # 激励函数都在这
# 把一维数据编程二维 x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1) # 增加噪点 y = x.pow(2) + 0.2 * torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
# 假数据 x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1) y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
loss_func = torch.nn.MSELoss() losses_his = [[], [], [], []] # 记录 training 时不同神经网络的 loss
for epoch in range(EPOCH): # EPOCH=12 print('Epoch: ', epoch) for step, (b_x, b_y) in enumerate(loader):
# 对每个优化器, 优化属于他的神经网络 for net, opt, l_his in zip(nets, optimizers, losses_his): # 后者都是list形式 output = net(b_x) # get output for every net loss = loss_func(output, b_y) # compute loss for every net opt.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients opt.step() # apply gradients l_his.append(loss.data.numpy()) # loss recoder
CNN
收集一小块的图像信息,进行总结
mnist
mnist数据集放在root下的\MNIST\processed文件夹下
import torch import torch.nn as nn import torch.utils.data as Data import torchvision # 数据库模块 import matplotlib.pyplot as plt
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# training and testing for epoch in range(EPOCH): for step, (b_x, b_y) in enumerate(train_loader): # 分配 batch data, normalize x when iterate train_loader output = cnn(b_x) # cnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step%50==0: test_output=cnn(test_x) pred_y=torch.max(test_output,1)[1].data.squeeze() accuracy=float(sum(pred_y==test_y))/test_y.size(0) print('Epoch:',epoch,'| train loss %.4f' % loss.data,'| test accuracy: %.2f'% accuracy)
Epoch: 0 | train loss 2.2979 | test accuracy: 0.13 Epoch: 0 | train loss 0.2706 | test accuracy: 0.84 Epoch: 0 | train loss 0.2919 | test accuracy: 0.89 Epoch: 0 | train loss 0.1879 | test accuracy: 0.92
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
for epoch in range(EPOCH): for step, (x, b_y) in enumerate(train_loader): b_x = x.view(-1, 28, 28) # reshape x to (batch,time_step,input_size) output = rnn(b_x) # rnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients
Epoch: 0 | train loss 2.2883 | test accuracy: 0.10 Epoch: 0 | train loss 0.8795 | test accuracy: 0.57 Epoch: 0 | train loss 1.0830 | test accuracy: 0.76
回归
用sin预测cos
import torch from torch import nn import numpy as np import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# Hyper Parameters TIME_STEP = 10# rnn time step INPUT_SIZE = 1# rnn input size LR = 0.02# learning rate
outs = [] # save all predictions for time_step in range(r_out.size(1)): # calculate output for each time step outs.append(self.out(r_out[:, time_step, :]))
# instead, for simplicity, you can replace above codes by follows # r_out = r_out.view(-1, 32) # outs = self.out(r_out) # outs = outs.view(-1, TIME_STEP, 1) # return outs, h_state
# or even simpler, since nn.Linear can accept inputs of any dimension # and returns outputs with same dimension except for the last # outs = self.out(r_out) # return outs
for step in range(100): start, end = step * np.pi, (step + 1) * np.pi # time range # use sin predicts cos steps = np.linspace(start, end, TIME_STEP, dtype=np.float32, endpoint=False) # float32 for converting torch FloatTensor x_np = np.sin(steps) y_np = np.cos(steps)
x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
prediction, h_state = rnn(x, h_state) # rnn output # !! next step is important !! h_state = h_state.data # repack the hidden state, break the connection from last iteration
loss = loss_func(prediction, y) # calculate loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients