当数据预传输到 GPU 时,pytorch 运行缓慢

标签 pytorch

我有一个用 pytorch 编写的模型。由于我的数据集很小,我可以直接将所有数据加载到 GPU。但我发现这样做前进速度会变慢。下面是一个可运行的例子。具体来说,我有以下模型:

import numpy as np
from time import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader

def knn(x, k):
    inner = -2*torch.matmul(x.transpose(2, 1), x)
    xx = torch.sum(x**2, dim=1, keepdim=True)
    pairwise_distance = -xx - inner - xx.transpose(2, 1)
    idx = pairwise_distance.topk(k=k, dim=-1)[1]   # (batch_size, num_points, k)
    return idx

def get_graph_feature(x, k=20, idx=None):
    batch_size = x.size(0)
    num_points = x.size(2)
    x = x.view(batch_size, -1, num_points)
    if idx is None:
        idx = knn(x, k=k)   # (batch_size, num_points, k)
    idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1)*num_points
    idx = idx + idx_base
    idx = idx.view(-1)
    _, num_dims, _ = x.size()
    x = x.transpose(2, 1).contiguous()   # (batch_size, num_points, num_dims)  -> (batch_size*num_points, num_dims) #   batch_size * num_points * k + range(0, batch_size*num_points)
    feature = x.view(batch_size*num_points, -1)[idx, :]
    feature = feature.view(batch_size, num_points, k, num_dims) 
    x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
    feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
    return feature

class DGCNN(nn.Module):
    def __init__(self, k=25, output_channels=10):
        super(DGCNN, self).__init__()
        self.k = k
        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(64)
        self.bn3 = nn.BatchNorm2d(128)
        self.bn4 = nn.BatchNorm2d(256)
        self.bn5 = nn.BatchNorm1d(1024)
        self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
                                   self.bn1,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.conv2 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False),
                                   self.bn2,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.conv3 = nn.Sequential(nn.Conv2d(64*2, 128, kernel_size=1, bias=False),
                                   self.bn3,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.conv4 = nn.Sequential(nn.Conv2d(128*2, 256, kernel_size=1, bias=False),
                                   self.bn4,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.conv5 = nn.Sequential(nn.Conv1d(512, 1024, kernel_size=1, bias=False),
                                   self.bn5,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.linear1 = nn.Linear(1024*2, 512, bias=False)
        self.bn6 = nn.BatchNorm1d(512)
        self.dp1 = nn.Dropout()
        self.linear2 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
        self.dp2 = nn.Dropout()
        self.linear3 = nn.Linear(256, output_channels)

    def forward(self, x):
        x = x.transpose(2, 1)
        batch_size = x.size(0)
        x = get_graph_feature(x, k=self.k)
        x = self.conv1(x)
        x1 = x.max(dim=-1, keepdim=False)[0]
        x = get_graph_feature(x1, k=self.k)
        x = self.conv2(x)
        x2 = x.max(dim=-1, keepdim=False)[0]
        x = get_graph_feature(x2, k=self.k)
        x = self.conv3(x)
        x3 = x.max(dim=-1, keepdim=False)[0]
        x = get_graph_feature(x3, k=self.k)
        x = self.conv4(x)
        x4 = x.max(dim=-1, keepdim=False)[0]
        x = torch.cat((x1, x2, x3, x4), dim=1)
        x = self.conv5(x)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)
        x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
        x = self.dp1(x)
        x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
        x = self.dp2(x)
        x = self.linear3(x)
        return x

这是数据加载器和测试函数的样子:

class my_loader(Dataset):
    def __init__(self, device):
        self.data = torch.rand(256, 2048, 3).to(device).float()
        self.labels = torch.rand(256).to(device).long()

    def __getitem__(self, ind):
        return self.data[ind], self.labels[ind]

    def __len__(self):
        return len(self.data)

def test():
    device = torch.device('cuda:2')
    test_set = my_loader(device)
    test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
    model = DGCNN().to(device)
    model.eval()
    
    #---------- this one is 0.12s --------------#
    for inputs, labels in test_loader:
        tic = time()
        pred = model(inputs)
        print('time1 {}'.format(time() - tic))
    print('------------------')
   
    #---------- this one is 0.004s --------------#
    for inputs, labels in test_loader:
        inputs = inputs.detach().cpu().to(device)
        tic = time()
        pred = model(inputs)
        print('time2 {}'.format(time() - tic))
    print('------------------')

    #---------- this one is 0.12s --------------#
    for inputs, labels in test_loader:
        tic = time()
        inputs = inputs.detach().cpu().to(device)
        pred = model(inputs)
        print('time3 {}'.format(time() - tic))
    print('------------------')
  

基本上,如果在前向传播之前或之后没有显式调用 GPU 到 cpu 传输,则前向传播将花费更多时间。看起来前向传播隐式地进行 GPU->CPU 传输。

最佳答案

我稍微研究了一下代码,我认为问题是您在同一运行中测量两种情况的时间。这是我的代码的精简版本,因为您的模型破坏了我的 GPU 内存:

class DGCNN(nn.Module):
    def __init__(self, num_layers):
        super(DGCNN, self).__init__()
        self.layers = nn.ModuleList([nn.Linear(256, 256) for _ in range(1200)])

    def forward(self, x):
        x = x.view(-1, 256)
        for layer in self.layers:
            x = layer(x)
        return x

class my_loader(Dataset):
    def __init__(self, device):
        self.data = torch.rand(256, 2048, 3).to(device).float()
        self.labels = torch.rand(256).to(device).long()

    def __getitem__(self, ind):
        return self.data[ind], self.labels[ind]

    def __len__(self):
        return len(self.data)

现在,我在这里演示不同版本的 test()

版本#1:

def test():
    device = torch.device('cuda:0')
    test_set = my_loader(device)
    test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
    model = DGCNN().to(device)
    model.eval()

    #---------- this one is 0.12s --------------#
    tic = time()
    for inputs, labels in test_loader:
        pred = model(inputs)
    tac = time()    
    print(f'# First case -> Full forward pass: {tac - tic:.6f}')

    #---------- this one is 0.004s --------------#
    tic = time()
    for inputs, labels in test_loader:
        pred = model(inputs.detach().cpu().to(device))
    tac = time()
    print(f'# Second case -> Full forward pass: {tac - tic:.6f}')

>>> # First case -> Full forward pass: 3.105103, # Second case -> Full forward pass: 2.831652

现在我改变了案例的计时计算顺序。版本#2:

def test():
    device = torch.device('cuda:0')
    test_set = my_loader(device)
    test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
    model = DGCNN().to(device)
    model.eval()

    #---------- this one is 0.004s --------------#
    tic = time()
    for inputs, labels in test_loader:
        pred = model(inputs.detach().cpu().to(device))
    tac = time()
    print(f'# Second case -> Full forward pass: {tac - tic:.6f}')

    #---------- this one is 0.12s --------------#
    tic = time()
    for inputs, labels in test_loader:
        pred = model(inputs)
    tac = time()
    print(f'# First case -> Full forward pass: {tac - tic:.6f}')

>>> # Second case -> Full forward pass: 3.288522, # First case -> Full forward pass: 2.583231

显然,您计算的第一个时间似乎较慢。因此,我在使用新鲜内核的不同运行中分别计算了这些时间。版本#3:

def test():    
    device = torch.device('cuda:0')
    test_set = my_loader(device)
    test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
    model = DGCNN().to(device)
    model.eval()

    #---------- this one is 0.12s --------------#
    tic = time()
    for inputs, labels in test_loader:
        pred = model(inputs)
    tac = time()
    print(f'# First case -> Full forward pass: {tac - tic:.6f}')

>>> # First case -> Full forward pass: 3.091592

版本#4:

def test():
    device = torch.device('cuda:0')
    test_set = my_loader(device)
    test_loader = DataLoader(test_set, batch_size=16, shuffle=True, num_workers=0)
    model = DGCNN().to(device)
    model.eval()

    #---------- this one is 0.004s --------------#
    tic = time()
    for inputs, labels in test_loader:
        pred = model(inputs.detach().cpu().to(device))
    tac = time()
    print(f'# Second case -> Full forward pass: {tac - tic:.6f}')

>>> # Second case -> Full forward pass: 3.190248

因此,通过一次测试一个,似乎 pred = model(inputs) 的运行速度比 pred = model(inputs.detach().cpu().to 稍快(device)),这是明显的预期结果。

关于当数据预传输到 GPU 时,pytorch 运行缓慢,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/65642697/

相关文章:

neural-network - 为什么在神经网络类定义中使用多个 ReLU 对象?

python-3.x - Pytorch - 堆栈维度必须完全相同?

python - Torch.sort 和 argsort 在相同元素的情况下随机排序

python - pytorch中是否有一个函数可以找到 bool 矩阵中每一行的OR?

python - 如何确保所有 PyTorch 代码在 Google Colab 上充分利用 GPU

keras - 用于生成 300 * 300 * 3 图像的 GAN 生成器和判别器模型的架构应该是什么?

python - 理解 ‘backward()’ : How to code the Pytorch function ‘.backward()’ from scratch?

python - 将 numpy 3 维矩阵切片为 2 维矩阵

python - 类型错误 : setup() got an unexpected keyword argument 'stage'

AzureML 实验管道未将 CUDA 与 PyTorch 结合使用