使用半精度模型可以显著地降低内存占用和计算时间,因此在部署深度学习模型时,使用半精度模型已经是一个不错的选择。在 PyTorch 中,使用半精度模型的相关操作可以参考以下攻略:
1. 确认半精度支持
在 PyTorch 中,半精度支持可以使用 torch.cuda
中的相关 API 进行判断和开启。可以使用以下代码进行确认:
import torch
# 判断当前硬件是否支持半精度计算
print(torch.cuda.is_available())
# 开启半精度支持,仅在支持的情况下有效
device = torch.device("cuda")
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
torch.cuda.set_device(device)
2. 将模型转化为半精度模型
在 PyTorch 中,可以使用 torch.half
将模型转化为半精度模型。但需要注意的是,由于半精度精度只有 16 位,因此需要对模型的权重和梯度进行裁剪。具体可以使用以下代码进行操作:
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 转化前的模型
net = Net()
# 转化为半精度模型
net.half()
3. 裁剪权重和梯度
半精度模型的权重和梯度需要进行裁剪,以能够获得更好的计算效果。具体可以使用以下代码进行操作:
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
# 裁剪权重
layer.weight.data = layer.weight.data.half()
# 裁剪梯度
if layer.bias is not None:
layer.bias.data = layer.bias.data.half()
elif isinstance(layer, nn.BatchNorm2d):
layer.weight.data = layer.weight.data.half()
layer.bias.data = layer.bias.data.half()
layer.running_mean.data = layer.running_mean.data.half()
layer.running_var.data = layer.running_var.data.half()
4. 训练和测试模型
使用半精度模型进行训练和测试的过程和使用普通模型没有太大的区别,只需要在训练和测试的代码中添加以下代码即可:
x = x.half()
y = y.half()
outputs = net(x)
loss = criterion(outputs, y)
# 计算梯度
loss.backward()
# 更新权重
optimizer.step()
# 使用半精度模型输出的预测结果需要进行转化为 float32 格式
preds = outputs.float().argmax(dim=1, keepdim=True)
示例
下面给出两个使用 PyTorch 半精度模型进行部署的示例。
示例 1:使用半精度模型进行图像分类
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
# 定义数据预处理
transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# 导入 ImageNet 数据集
train_set = datasets.ImageFolder(root='./train', transform=transform)
test_set = datasets.ImageFolder(root='./val', transform=transform)
# 定义超参数
epochs = 10
learning_rate = 0.001
device = torch.device("cuda")
# 定义模型
model = torch.hub.load('pytorch/vision:v0.9.0', 'resnet50', pretrained=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# 开启半精度支持
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
torch.cuda.set_device(device)
# 转化为半精度模型
model.half()
# 裁剪权重和梯度
for layer in model.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
layer.weight.data = layer.weight.data.half()
if layer.bias is not None:
layer.bias.data = layer.bias.data.half()
elif isinstance(layer, nn.BatchNorm2d):
layer.weight.data = layer.weight.data.half()
layer.bias.data = layer.bias.data.half()
layer.running_mean.data = layer.running_mean.data.half()
layer.running_var.data = layer.running_var.data.half()
# 定义训练过程
def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(tqdm(train_loader)):
data, target = data.to(device), target.to(device)
# 转化为半精度
data = data.half()
target = target.half()
# 前向传播
output = model(data)
loss = criterion(output, target)
# 计算梯度
optimizer.zero_grad()
loss.backward()
# 更新权重
optimizer.step()
# 输出训练进度
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# 定义测试过程
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in tqdm(test_loader):
data, target = data.to(device), target.to(device)
# 转化为半精度
data = data.half()
target = target.half()
# 前向传播
output = model(data)
test_loss += criterion(output, target).item()
pred = output.float().argmax(dim=1, keepdim=True)
correct += pred.eq(target.float().view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# 使用 DataLoader 加载训练和测试数据集
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True,
num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=32,
shuffle=False, num_workers=4, pin_memory=True)
# 开始训练和测试
for epoch in range(1, epochs + 1):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
示例 2:使用半精度模型进行文本分类
import torch
import torch.nn as nn
from torchtext.legacy import data
from torchtext.legacy.data import Iterator, BucketIterator
from torchtext.vocab import Vectors
import argparse
parser = argparse.ArgumentParser(description='PyTorch Text Classification using Word Embedding')
parser.add_argument('--model', type=str, default='lstm',
help='type of recurrent net (lstm, gru)')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size for training (default: 32)')
parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',
help='number of hidden dimensions (default: 100)')
parser.add_argument('--embed_dim', type=int, default=100, metavar='N',
help='number of embedding dimensions (default: 100)')
parser.add_argument('--num_epochs', type=int, default=20, metavar='N',
help='number of epochs (default: 20)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
USE_CUDA = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if USE_CUDA:
torch.cuda.manual_seed(args.seed)
# 定义数据字段
TEXT = data.Field(sequential=True, tokenize='spacy')
LABEL = data.LabelField()
# 加载数据集
train_data, test_data = data.TabularDataset.splits(
path='./data/', train='train.csv', test='test.csv', format='csv',
fields=[('id', None), ('text', TEXT), ('label', LABEL)])
# 加载预训练的词向量
vectors = Vectors(name='./glove.6B.100d.txt')
# 构建分类器
class Classifier(nn.Module):
def __init__(self, n_vocab, hidden_dim, n_layers, n_cats, embed_dim):
super().__init__()
self.embedding = nn.Embedding(n_vocab, embed_dim)
self.rnn = nn.LSTM(embed_dim, hidden_dim, n_layers, batch_first=True)
self.fc1 = nn.Linear(hidden_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, n_cats)
def forward(self, x):
x = self.embedding(x)
hidden_states, _ = self.rnn(x)
x = hidden_states[:, -1, :]
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# 创建数据迭代器
train_iter, test_iter = BucketIterator.splits(
(train_data, test_data), batch_size=args.batch_size, device=torch.device("cuda"),
sort_key=lambda x: len(x.text), sort_within_batch=False, repeat=False)
# 定义模型和优化器
model = Classifier(len(TEXT.vocab), args.hidden_dim, 2, len(LABEL.vocab), args.embed_dim)
model.half()
model.embedding.weight.data.copy_(TEXT.vocab.vectors)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
if USE_CUDA:
model.cuda()
criterion.cuda()
# 训练过程
for epoch in range(1, args.num_epochs + 1):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in train_iter:
optimizer.zero_grad()
# 转化为半精度
x = batch.text.half()
y = batch.label.half()
predictions = model(x).squeeze(1)
loss = criterion(predictions, y)
acc = (predictions.argmax(1) == y).float().mean()
# 计算梯度
loss.backward()
# 更新权重
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
epoch_loss /= len(train_iter)
epoch_acc /= len(train_iter)
print(f'Epoch {epoch}:')
print(f'Train Loss : {epoch_loss:.4f} | Train Acc: {epoch_acc:.4f}')
# 测试过程
model.eval()
test_loss = 0
test_acc = 0
with torch.no_grad():
for batch in test_iter:
# 转化为半精度
x = batch.text.half()
y = batch.label.half()
predictions = model(x).squeeze(1)
loss = criterion(predictions, y)
acc = (predictions.argmax(1) == y).float().mean()
test_loss += loss.item()
test_acc += acc.item()
test_loss /= len(test_iter)
test_acc /= len(test_iter)
print(f'Test Loss : {test_loss:.4f} | Test Acc: {test_acc:.4f}')
通过以上示例,可以看出,使用 PyTorch 进行深度学习模型部署,并使用半精度模型进行优化是相对简单的。在实践中,可以根据不同的需求和环境调整具体的优化方法和参数,并获得更好的性能表现。
本站文章如无特殊说明,均为本站原创,如若转载,请注明出处:pytorch 使用半精度模型部署的操作 - Python技术站