import torch
from torch.autograd import Variable
import numpy as np
from torch import nn, optim
import matplotlib.pyplot as plt
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
plt.plot(x_train,y_train,"ro") #show for test
plt.show()
x_train = torch.from_numpy(x_train) #cover the numpy array to tensor
y_train = torch.from_numpy(y_train)
class LinearRegression(nn.Module): #define a LinearRegression module
def __init__(self):
super(LinearRegression, self).__init__()
self.linear = nn.Linear(1, 1) # input and output is 1 dimension
def forward(self, x):
out = self.linear(x)
return out
model = LinearRegression() #setting the modle to system
criterion = nn.MSELoss() #setting loss function
optimizer = optim.SGD(model.parameters(), lr=1e-4) #choic an optimizer
num_epochs = 1000
for epoch in range(num_epochs):
inputs = Variable(x_train)
target = Variable(y_train)
# forward
out = model(inputs) # 前向传播
loss = criterion(out, target) # 计算loss
# backward
optimizer.zero_grad() # 梯度归零
loss.backward() # 反向传播
optimizer.step() # 更新参数
if (epoch ) % 20 == 0:
print('Epoch[{}/{}], loss: {:.2f}'.format(epoch , num_epochs, loss.data[0]))
因篇幅问题不能全部显示,请点此查看更多更全内容