目录
- 原理
- 单卡训练代码
- 双卡训练代码
原理
当我们使用nn.DataParallel时,PyTorch会将模型复制到多个GPU上,并在每个GPU上分别计算一部分输入数据的输出。具体来说,nn.DataParallel会将输入数据划分为多个小批次,然后将每个小批次分配到不同的GPU上,让它们分别计算输出。最后,nn.DataParallel会将每个GPU上的输出合并起来,得到最终的输出,并计算损失函数和梯度。
为了实现这个过程,nn.DataParallel会在每个GPU上创建一个模型副本,并使用torch.nn.parallel.replicate函数将模型副本复制到不同的GPU上。然后,nn.DataParallel会将输入数据划分为多个小批次,每个小批次分配到不同的GPU上,并使用torch.nn.parallel.scatter函数将输入数据复制到各个GPU上。接下来,nn.DataParallel会在每个GPU上分别计算模型的输出,并使用torch.nn.parallel.gather函数将各个GPU上的输出合并起来,得到最终的输出。最后,nn.DataParallel会计算损失函数和梯度,并使用torch.nn.parallel.parallel_apply函数将梯度应用到各个GPU上的模型副本上,最终得到更新后的模型参数。
单卡训练代码
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
# 加载数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
#train=True表示加载的是MNIST数据集中的训练集部分。MNIST数据集包含了6万张28x28像素的手写数字图片,其中训练集包含了前5万张图片,测试集包含了后1万张图片。通过将train=True传入datasets.MNIST函数,可以加载MNIST数据集中的训练集,从而用于训练神经网络模型。如果将train=False,则会加载MNIST数据集中的测试集,用于测试训练好的模型的性能表现。
train_dataset = datasets.MNIST('data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST('data', train=False, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=True)
# 定义神经网络模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2))
x = nn.functional.relu(nn.functional.max_pool2d(self.dropout(self.conv2(x)), 2))
x = x.view(-1, 320)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.dropout(x, training=self.training)
x = self.fc2(x)
return nn.functional.log_softmax(x)
# 定义训练函数
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nn.functional.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# 定义测试函数
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += nn.functional.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# 定义主函数
def main():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(1, 11):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
单卡训练输出:
Train Epoch: 10 [0/60000 (0%)] Loss: 0.151937
Train Epoch: 10 [6400/60000 (11%)] Loss: 0.222246
Train Epoch: 10 [12800/60000 (21%)] Loss: 0.120114
Train Epoch: 10 [19200/60000 (32%)] Loss: 0.177940
Train Epoch: 10 [25600/60000 (43%)] Loss: 0.172243
Train Epoch: 10 [32000/60000 (53%)] Loss: 0.203212
Train Epoch: 10 [38400/60000 (64%)] Loss: 0.298426
Train Epoch: 10 [44800/60000 (75%)] Loss: 0.198832
Train Epoch: 10 [51200/60000 (85%)] Loss: 0.097117
Train Epoch: 10 [57600/60000 (96%)] Loss: 0.120100
Test set: Average loss: 0.0577, Accuracy: 9815/10000 (98%)
双卡训练代码
服务器上有两块1080Ti的显卡,现将其改造为双卡训练:
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
# 加载数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
#train=True表示加载的是MNIST数据集中的训练集部分。MNIST数据集包含了6万张28x28像素的手写数字图片,其中训练集包含了前5万张图片,测试集包含了后1万张图片。通过将train=True传入datasets.MNIST函数,可以加载MNIST数据集中的训练集,从而用于训练神经网络模型。如果将train=False,则会加载MNIST数据集中的测试集,用于测试训练好的模型的性能表现。
train_dataset = datasets.MNIST('data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST('data', train=False, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=True)
# 定义神经网络模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = nn.functional.relu(nn.functional.max_pool2d(self.conv1(x), 2))
x = nn.functional.relu(nn.functional.max_pool2d(self.dropout(self.conv2(x)), 2))
x = x.view(-1, 320)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.dropout(x, training=self.training)
x = self.fc2(x)
return nn.functional.log_softmax(x)
# 定义训练函数
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nn.functional.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# 定义测试函数
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += nn.functional.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# 定义主函数
def main():
# 定义设备,"cuda"将根据可选设备自动决定使用的gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 将模型放到多GPU上
model = Net()
if torch.cuda.device_count() > 1:
#如果gpu设备数目大于1个,同时使用两个GPU
model = nn.DataParallel(model, [0, 1])
model.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
for epoch in range(1, 11):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
输出gpu占用列表: