Last active
July 28, 2024 09:48
-
-
Save sparkydogX/845b658e3e6cef58a7bf706a9f43d7bf to your computer and use it in GitHub Desktop.
Revisions
-
sparkydogX revised this gist
Mar 19, 2019 . 1 changed file with 1 addition and 1 deletion.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -15,7 +15,7 @@ def occumpy_mem(cuda_device): total, used = check_mem(cuda_device) total = int(total) used = int(used) max_mem = int(total * 0.9) block_mem = max_mem - used x = torch.cuda.FloatTensor(256,1024,block_mem) del x -
sparkydogX revised this gist
Mar 18, 2019 . 1 changed file with 1 addition and 1 deletion.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -17,7 +17,7 @@ def occumpy_mem(cuda_device): used = int(used) max_mem = int(total * 0.8) block_mem = max_mem - used x = torch.cuda.FloatTensor(256,1024,block_mem) del x if __name__ == '__main__': -
sparkydogX revised this gist
Mar 18, 2019 . 1 changed file with 4 additions and 5 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -6,14 +6,13 @@ # declare which gpu device to use cuda_device = '0' def check_mem(cuda_device): devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n") total, used = devices_info[int(cuda_device)].split(',') return total,used def occumpy_mem(cuda_device): total, used = check_mem(cuda_device) total = int(total) used = int(used) max_mem = int(total * 0.8) @@ -23,7 +22,7 @@ def main(): if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device occumpy_mem(cuda_device) for _ in tqdm(range(60)): time.sleep(1) print('Done') -
sparkydogX created this gist
Mar 16, 2019 .There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,29 @@ import os import torch from tqdm import tqdm import time # declare which gpu device to use cuda_device = '0' def check_mem(): devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n") total, used = devices_info[int(cuda_device)].split(',') return total,used def main(): total, used = check_mem() total = int(total) used = int(used) max_mem = int(total * 0.8) block_mem = max_mem - used x = torch.rand((256,1024,block_mem)).cuda() del x if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device main() for _ in tqdm(range(60)): time.sleep(1) print('Done')