Pytorch学习笔记

该文演示了PyTorch张量与numpy数组之间的相互转换,并在CPU和GPU之间移动数据的过程。同时,它还通过一个小实验对比了PyTorch和CuPy在处理运算时的性能差异。
部署运行你感兴趣的模型镜像

Pytorch笔记

pytorch与numpy的相互转换

  • 在CPU上,numpy和tensor共用同一块存储空间
import time
import numpy as np
import torch

def numpy_to_tensor_to_gpu_to_cpu_tensor_to_numpy():
    data_np = np.array([[1,2,0],[3,4,5]])
    data_tr_s = torch.from_numpy(data_np)
    data_tr_d = torch.tensor(data_np, dtype=int)
    print('0.cpu orgin data_np', list(data_np))
    print('0.cpu orgin data_tr_s', list(data_tr_s))
    print('0.cpu orgin data_tr_d', list(data_tr_d))
    
    data_np[0][1] = data_np[1][0]

    print('1.cpu modify data_np', list(data_np))
    print('1.cpu modify data_tr_s', list(data_tr_s))
    print('1.cpu modify data_tr_d', list(data_tr_d))

    data_tr_s = data_tr_s.cuda()
    data_tr_d = data_tr_d.cuda()
    print('2.gpu orign data_tr_s', list(data_tr_s))
    print('2.gpu orign data_tr_d', list(data_tr_d))

    data_tr_s = data_tr_s.add_(1)
    data_tr_d = data_tr_d.add_(1)
    print('2.gpu modify data_tr_s', list(data_tr_s))
    print('2.gpu modify data_tr_d', list(data_tr_d))

    data_tr_s = data_tr_s.cpu()
    data_tr_d = data_tr_d.cpu()
    print('3.cpu modify data_tr_s', list(data_tr_s))
    print('3.cpu modify data_tr_d', list(data_tr_d))

    data_np_s = data_tr_s.numpy()
    data_np_d = data_tr_d.numpy()
    data_tr_s = data_tr_s.add_(1)
    data_tr_d = data_tr_d.add_(1)
    print('3.cpu modify data_tr_s', list(data_tr_s))
    print('3.cpu modify data_tr_d', list(data_tr_d))
    print('3.cpu modify data_np_s', list(data_np_s))
    print('3.cpu modify data_np_d', list(data_np_d))

if __name__ == '__main__':
    numpy_to_tensor_to_gpu_to_cpu_tensor_to_numpy()
  • pytorch vs. cupy
import time
import numpy as np
import cupy as cp
import torch

def time_of_numpy_to_cupy_to_numpy(data_size):
    data_np = np.ones((data_size,data_size), dtype=float)

    time_start = time.perf_counter()
    data_cp = cp.asarray(data_np)
    time_end = time.perf_counter()
    print('cp np_to_cp duration {}ms'.format((time_end-time_start)*1000))

    time_start = time.perf_counter()
    data_cp = cp.matmul(data_cp, data_cp)
    time_end = time.perf_counter()
    print('cp operate duration {}ms'.format((time_end-time_start)*1000))

    time_start = time.perf_counter()
    data_np = np.matmul(data_np, data_np)
    time_end = time.perf_counter()
    print('np operate duration {}ms'.format((time_end-time_start)*1000))

    time_start = time.perf_counter()
    data_np = cp.asnumpy(data_cp)
    time_end = time.perf_counter()
    print('cp np_to_cp duration {}ms'.format((time_end-time_start)*1000))

def time_of_numpy_to_pytorch_to_numpy(data_size):
    data_np = np.ones((data_size,data_size), dtype=float)

    time_start = time.perf_counter()
    data_tr = torch.from_numpy(data_np)
    time_end = time.perf_counter()
    print('torch np_to_torch duration {}ms'.format((time_end-time_start)*1000))
    time_start = time.perf_counter()
    data_tr = data_tr.cuda()
    time_end = time.perf_counter()
    print('torch cpu_to_cuda duration {}ms'.format((time_end-time_start)*1000))

    data_tr_1 = torch.add(data_tr, 1)
    time_start = time.perf_counter()
    data_tr = torch.mm(data_tr_1, data_tr)
    time_end = time.perf_counter()
    print('torch operate duration {}ms'.format((time_end-time_start)*1000))

    time_start = time.perf_counter()
    data_tr = data_tr.cpu()
    time_end = time.perf_counter()
    print('torch cuda_to_cpu duration {}ms'.format((time_end-time_start)*1000))
    time_start = time.perf_counter()
    data_np = data_tr.numpy()
    time_end = time.perf_counter()
    print('torch duration torch_to_np {}ms'.format((time_end-time_start)*1000))

if __name__ == '__main__':
    data_size = 10000
    time_of_numpy_to_cupy_to_numpy(data_size)
    time_of_numpy_to_pytorch_to_numpy(data_size)

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值