系统环境
CUDA 11.1
gtx1070-8g
win10 21h1 19043.1266
打开该选项
torch.backends.cudnn.benchmark=True
加快运算速度,但相比加速。。。。。。
最为直观的影响
计算误差会增大约1000倍。
显存占用会变为关闭时的 1.1 - 1.5 倍。
这个选项不能随便打开,影响很大。
以下为实验
cudnn.benchmark = False,显存占用为 3.0 G
card_id: 0 elapsed/total: 1/10 max_diff_1: 0.00003290 max_diff_1: 0.00002837
card_id: 0 elapsed/total: 2/10 max_diff_1: 0.00003082 max_diff_1: 0.00002468
card_id: 0 elapsed/total: 3/10 max_diff_1: 0.00003231 max_diff_1: 0.00002813
card_id: 0 elapsed/total: 4/10 max_diff_1: 0.00003111 max_diff_1: 0.00002661
card_id: 0 elapsed/total: 5/10 max_diff_1: 0.00003064 max_diff_1: 0.00002551
card_id: 0 elapsed/total: 6/10 max_diff_1: 0.00003046 max_diff_1: 0.00002813
card_id: 0 elapsed/total: 7/10 max_diff_1: 0.00003266 max_diff_1: 0.00002570
card_id: 0 elapsed/total: 8/10 max_diff_1: 0.00003067 max_diff_1: 0.00002459
card_id: 0 elapsed/total: 9/10 max_diff_1: 0.00003183 max_diff_1: 0.00002503
card_id: 0 elapsed/total: 10/10 max_diff_1: 0.00003159 max_diff_1: 0.00002563
cudnn.benchmark = True,显存占用为 4.0 G
card_id: 0 elapsed/total: 1/10 max_diff_1: 0.12276840 max_diff_1: 0.09905499
card_id: 0 elapsed/total: 2/10 max_diff_1: 0.10842407 max_diff_1: 0.08467466
card_id: 0 elapsed/total: 3/10 max_diff_1: 0.11710882 max_diff_1: 0.08327144
card_id: 0 elapsed/total: 4/10 max_diff_1: 0.11344403 max_diff_1: 0.08150017
card_id: 0 elapsed/total: 5/10 max_diff_1: 0.10728776 max_diff_1: 0.08666122
card_id: 0 elapsed/total: 6/10 max_diff_1: 0.12120318 max_diff_1: 0.08831707
card_id: 0 elapsed/total: 7/10 max_diff_1: 0.12990451 max_diff_1: 0.08046508
card_id: 0 elapsed/total: 8/10 max_diff_1: 0.11482769 max_diff_1: 0.08473527
card_id: 0 elapsed/total: 9/10 max_diff_1: 0.11562169 max_diff_1: 0.08603296
card_id: 0 elapsed/total: 10/10 max_diff_1: 0.11495388 max_diff_1: 0.08923209
以下为实验代码,你可以通过修改第100行的代码来观察 cudnn.benchmark 的影响
'''
用于检测cuda运算错误
'''
import os
import torch
import torch.nn as nn
from torch.backends import cudnn
import argparse
import time
import platform
def ConvBnAct(in_ch, out_ch, ker_sz, stride, pad, act=nn.Identity(), group=1, dilation=1):
return nn.Sequential(nn.Conv2d(in_ch, out_ch, ker_sz, stride, pad, groups=group, bias=False, dilation=dilation),
nn.GroupNorm(16, out_ch, eps=1e-8),
act)
def DeConvBnAct(in_ch, out_ch, ker_sz, stride, pad, act=nn.Identity(), group=1, dilation=1):
return nn.Sequential(nn.ConvTranspose2d(in_ch, out_ch, ker_sz, stride, pad, groups=group, bias=False, dilation=dilation),
nn.GroupNorm(16, out_ch, eps=1e-8),
act)
class RevSequential(nn.ModuleList):
'''
功能大部分与ModuleList重叠
'''
def __init__(self, modules=None):
super().__init__(modules)
def append(self, module):
assert hasattr(module, 'invert') and callable(module.invert)
super().append(module)
def extend(self, modules):
for m in modules:
self.append(m)
def forward(self, x1, x2):
y1, y2 = x1, x2
for m in self:
y1, y2 = m(y1, y2)
return y1, y2
def invert(self, y1, y2):
x1, x2 = y1, y2
for m in list(self)[::-1]:
x1, x2 = m.invert(x1, x2)
return x1, x2
class RevGroupBlock(RevSequential):
'''
当前只支持输入通道等于输出通道,并且不允许下采样
'''
def __init__(self, in_ch, out_ch, stride, act, block_type, blocks, **kwargs):
assert in_ch == out_ch
assert stride == 1
mods = []
for _ in range(blocks):
mods.append(block_type(in_ch=in_ch, out_ch=out_ch, stride=1, act=act, **kwargs))
# self.extend(mods)
super().__init__(mods)
class RevBlockC(nn.Module):
def __init__(self, in_ch, out_ch, stride, act, **kwargs):
super().__init__()
inter_ch = in_ch // 2
self.conv1 = ConvBnAct(in_ch, inter_ch, ker_sz=5, stride=1, pad=2, act=act)
self.conv2 = ConvBnAct(inter_ch, inter_ch, ker_sz=5, stride=1, pad=2, act=act, group=inter_ch)
self.conv3 = ConvBnAct(in_ch, in_ch, ker_sz=1, stride=1, pad=0, act=nn.Identity())
def func(self, x):
y1 = self.conv1(x)
y2 = self.conv2(y1)
y = torch.cat([y1, y2], dim=1)
y = self.conv3(y)
return y
def forward(self, x1, x2):
y = x1 + self.func(x2)
return x2, y
def invert(self, y1, y2):
x2, y = y1, y2
x1 = y - self.func(x2)
return x1, x2
def new_model():
act = nn.ELU()
rvb = RevGroupBlock(128, 128, 1, act, RevBlockC, 12).to(device)
rvb.eval()
return rvb
if __name__ == '__main__':
cudnn.benchmark = True
cudnn.deterministic = True
torch.set_grad_enabled(False)
parse = argparse.ArgumentParser(description='Used to check pytorch cudnn.benchmark flag.')
parse.add_argument('-i', type=int, help='Card id. Which cuda card do you want to test. default: 0', default=0)
parse.add_argument('-e', type=int, help='Epoch. defaule: 10', default=10)
parse.add_argument('-bp', type=bool, help='Use backward. defaule: True', default=True)
parse.add_argument('-bs', type=int, help='Batch size. defaule: 8', default=8)
parse = parse.parse_args()
card_id = parse.i
epoch = parse.e
use_backward = parse.bp
batch_size = parse.bs
# 使用cpu测试理论上是永远不会报错的
device = 'cpu' if card_id == -1 else f'cuda:{card_id}'
device = torch.device(device)
assert epoch > 0
assert batch_size > 0
rvb = new_model()
torch.set_grad_enabled(use_backward)
for e in range(epoch):
e = e+1
a1 = torch.randn(batch_size, 128, 64, 64, device=device)
b1, b2 = rvb(a1, a1)
o_a1, o_a2 = rvb.invert(b1, b2)
if use_backward:
(o_a1.max() + o_a2.max()).backward()
with torch.no_grad():
max_diff_1 = torch.abs(o_a1 - o_a2).max().item()
max_diff_2 = torch.abs(a1 - o_a1).max().item()
line = f'card_id: {card_id} elapsed/total: {e}/{epoch} max_diff_1: {max_diff_1:.8f} max_diff_1: {max_diff_2:.8f}'
print(line)