import torch #Check availability of GPU: print(f"Pytorch version: {torch.__version__}") print(f"Current device: {torch.cuda.current_device()}") print(f"Device count: {torch.cuda.device_count()}") print(f"Device name: {torch.cuda.get_device_name(0)}") print(f"Is CUDA available?: {torch.cuda.is_available()}") # Set device on GPU if available, else CPU device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('\nUsing device:', device) #A dditional info on memory if device.type == 'cuda': print(torch.cuda.get_device_name(0)) print('\nMemory Usage:') print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB') print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')