复现视频里的代码,方便后续查阅。
import numpy as np
import torch
torch.cuda.current_device()
import torch.nn as nn
# 构造输入数据x和其对应的标签y
x_values=[i for i in range(11)]
x_train=np.array(x_values,dtype=np.float32)
x_train=x_train.reshape(-1,1)
y_values=[2*i+1 for i in x_values]
y_train=np.array(y_values,dtype=np.float32)
y_train=y_train.reshape(-1,1)
# 其实线性回归是不加激活函数的全连接层
class LinearRegressionModel(nn.Module):
def __init__(self,input_dim,output_dim):
super(LinearRegressionModel,self).__init__()
self.linear=nn.Linear(input_dim,output_dim)
# 重写前向传播方法,继承自module
def forward(self,x):
out=self.linear(x)
return out
input_dim=1
output_dim=1
model=LinearRegressionModel(input_dim,output_dim)
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
model.to(device)
criterion=nn.MSELoss()
#训练次数
epochs=1000
#定义学习率
learning_rate=0.01
# 优化器选择随机梯度下降算法
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
# 选择损失函数MSE
criterion=nn.MSELoss()
for epoch in range(epochs):
epoch+=1
# 转成tensor格式
inputs=torch.from_numpy(x_train).to(device)
labels=torch.from_numpy(y_train).to(device)
# 每次迭代梯度清零,防止累加
optimizer.zero_grad()
# 前向传播
outputs=model(inputs)
# 计算损失
loss=criterion(outputs,labels)
# 反向传播
loss.backward()
# 更新权重参数
optimizer.step()
if epoch%50==0:
print('epoch{},loss{}'.format(epoch,loss.item()))
结果如图: