Skip to content

Instantly share code, notes, and snippets.

@cli99
Last active September 23, 2024 22:08
Show Gist options
  • Save cli99/507a64692fb5d24cc9be1f8522c3f0cb to your computer and use it in GitHub Desktop.
Save cli99/507a64692fb5d24cc9be1f8522c3f0cb to your computer and use it in GitHub Desktop.

Revisions

  1. cli99 revised this gist Sep 23, 2024. 1 changed file with 5 additions and 1 deletion.
    6 changes: 5 additions & 1 deletion test_Float8_e4m3fn.py
    Original file line number Diff line number Diff line change
    @@ -1,6 +1,10 @@
    import torch
    from torch.utils.cpp_extension import load_inline

    finfo = torch.finfo(torch.float8_e4m3fn)
    print(f"finfo: {finfo}")
    # finfo(resolution=1, min=-448, max=448, eps=0.125, smallest_normal=0.015625, tiny=0.015625, dtype=float8_e4m3fn)

    cuda_source = """
    C10_HOST_DEVICE constexpr auto FP8_E4M3_MAX = std::numeric_limits<c10::Float8_e4m3fn>::max();
    void test() {
    @@ -17,4 +21,4 @@
    with_cuda=True,
    )

    ext.test()
    ext.test() # 448
  2. cli99 created this gist Sep 23, 2024.
    20 changes: 20 additions & 0 deletions test_Float8_e4m3fn.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,20 @@
    import torch
    from torch.utils.cpp_extension import load_inline

    cuda_source = """
    C10_HOST_DEVICE constexpr auto FP8_E4M3_MAX = std::numeric_limits<c10::Float8_e4m3fn>::max();
    void test() {
    std::cout << FP8_E4M3_MAX << std::endl;
    }
    """
    cpp_source = "void test();"

    ext = load_inline(
    name="ext",
    cpp_sources=[cpp_source],
    cuda_sources=[cuda_source],
    functions=["test"],
    with_cuda=True,
    )

    ext.test()