import torch
# returns the maximum GPU memory occupied by tensors in bytes for a given device.
"cuda memory = {:.3f} GBs".format(torch.cuda.max_memory_allocated() / 1024 ** 3)
import torch
# returns the maximum GPU memory occupied by tensors in bytes for a given device.
"cuda memory = {:.3f} GBs".format(torch.cuda.max_memory_allocated() / 1024 ** 3)