Skip to content

Instantly share code, notes, and snippets.

@sparkydogX
Last active July 28, 2024 09:48
Show Gist options
  • Select an option

  • Save sparkydogX/845b658e3e6cef58a7bf706a9f43d7bf to your computer and use it in GitHub Desktop.

Select an option

Save sparkydogX/845b658e3e6cef58a7bf706a9f43d7bf to your computer and use it in GitHub Desktop.

Revisions

  1. sparkydogX revised this gist Mar 19, 2019. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion occupy-memory.py
    Original file line number Diff line number Diff line change
    @@ -15,7 +15,7 @@ def occumpy_mem(cuda_device):
    total, used = check_mem(cuda_device)
    total = int(total)
    used = int(used)
    max_mem = int(total * 0.8)
    max_mem = int(total * 0.9)
    block_mem = max_mem - used
    x = torch.cuda.FloatTensor(256,1024,block_mem)
    del x
  2. sparkydogX revised this gist Mar 18, 2019. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion occupy-memory.py
    Original file line number Diff line number Diff line change
    @@ -17,7 +17,7 @@ def occumpy_mem(cuda_device):
    used = int(used)
    max_mem = int(total * 0.8)
    block_mem = max_mem - used
    x = torch.rand((256,1024,block_mem)).cuda()
    x = torch.cuda.FloatTensor(256,1024,block_mem)
    del x

    if __name__ == '__main__':
  3. sparkydogX revised this gist Mar 18, 2019. 1 changed file with 4 additions and 5 deletions.
    9 changes: 4 additions & 5 deletions occupy-memory.py
    Original file line number Diff line number Diff line change
    @@ -6,14 +6,13 @@
    # declare which gpu device to use
    cuda_device = '0'

    def check_mem():
    def check_mem(cuda_device):
    devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n")
    total, used = devices_info[int(cuda_device)].split(',')
    return total,used

    def main():

    total, used = check_mem()
    def occumpy_mem(cuda_device):
    total, used = check_mem(cuda_device)
    total = int(total)
    used = int(used)
    max_mem = int(total * 0.8)
    @@ -23,7 +22,7 @@ def main():

    if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    main()
    occumpy_mem(cuda_device)
    for _ in tqdm(range(60)):
    time.sleep(1)
    print('Done')
  4. sparkydogX created this gist Mar 16, 2019.
    29 changes: 29 additions & 0 deletions occupy-memory.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,29 @@
    import os
    import torch
    from tqdm import tqdm
    import time

    # declare which gpu device to use
    cuda_device = '0'

    def check_mem():
    devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n")
    total, used = devices_info[int(cuda_device)].split(',')
    return total,used

    def main():

    total, used = check_mem()
    total = int(total)
    used = int(used)
    max_mem = int(total * 0.8)
    block_mem = max_mem - used
    x = torch.rand((256,1024,block_mem)).cuda()
    del x

    if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    main()
    for _ in tqdm(range(60)):
    time.sleep(1)
    print('Done')