[pytorch] 쿠다 사용

less than 1 minute read

개요

torch를 이용해 cuda를 사용해보겠다.

torch를 이용한 cuda의 기본 사용법

import torch
 
#  Returns a bool indicating if CUDA is currently available.
torch.cuda.is_available()
#  True
 
#  Returns the index of a currently selected device.
torch.cuda.current_device()
#  0
 
#  Returns the number of GPUs available.
torch.cuda.device_count()
#  1
 
#  Gets the name of a device.
torch.cuda.get_device_name(0)
#  'GeForce GTX 1060'
 
#  Context-manager that changes the selected device.
#  device (torch.device or int) – device index to select. 
torch.cuda.device(0)

gpu에 로딩

# Default CUDA device
cuda = torch.device('cuda')
 
# allocates a tensor on default GPU
a = torch.tensor([1., 2.], device=cuda)
 
# transfers a tensor from 'C'PU to 'G'PU
b = torch.tensor([1., 2.]).cuda()
 
# Same with .cuda()
b2 = torch.tensor([1., 2.]).to(device=cuda)

c = torch.tensor([1,2])

# gpu에 load한 것들끼리 연산 가능. cpu에 있는 것을 연산에 이용하면 에러 발생 
a+c 
#output
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!

사용하지 않는 tensor를 gpu에서 release!

torch.cuda.empty_cache()