Skip to content

Instantly share code, notes, and snippets.

@cli99
Last active September 23, 2024 22:08
Show Gist options
  • Save cli99/507a64692fb5d24cc9be1f8522c3f0cb to your computer and use it in GitHub Desktop.
Save cli99/507a64692fb5d24cc9be1f8522c3f0cb to your computer and use it in GitHub Desktop.
Float8_e4m3fn
import torch
from torch.utils.cpp_extension import load_inline
finfo = torch.finfo(torch.float8_e4m3fn)
print(f"finfo: {finfo}")
# finfo(resolution=1, min=-448, max=448, eps=0.125, smallest_normal=0.015625, tiny=0.015625, dtype=float8_e4m3fn)
cuda_source = """
C10_HOST_DEVICE constexpr auto FP8_E4M3_MAX = std::numeric_limits<c10::Float8_e4m3fn>::max();
void test() {
std::cout << FP8_E4M3_MAX << std::endl;
}
"""
cpp_source = "void test();"
ext = load_inline(
name="ext",
cpp_sources=[cpp_source],
cuda_sources=[cuda_source],
functions=["test"],
with_cuda=True,
)
ext.test() # 448
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment