I wrote these instructions as part of "installing PyTorch with CUDA 12.1.1".
Anyway, if you still need to compile from source… here's how:
This is a dependency of PyTorch, which is sensitive to CUDA version.
Clone Magma:
git clone --depth 1 https://bitbucket.org/icl/magma.git
cd magmaLook up your CUDA Architecture, e.g. 4090 has architecture 8.9. Use this to determine the two GPU_TARGET variables below.
Activate your conda environment, if you haven't done so already.
Install a Fortran compiler:
sudo apt-get install gfortranInstall Intel BLAS and LAPACK (https://www.intel.com/content/www/us/en/docs/oneapi/installation-guide-linux/2023-2/apt.html#GUID-186C17A8-4183-4BC3-B367-01331B1B74AF)
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null
# Type sudo password
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
sudo apt update
#sudo apt install intel-basekit intel-aikit
# Source the Intel oneAPI
#. /opt/intel/oneapi/setvars.sh
conda create -n ietrans
conda activate ietrans
conda config --env --add channels intel
# As of writing, intel python 3.11 has not been released yet.
conda install intelpython3_full python=3.10 mkl-dpcpp mkl-include cmake ninja intel-extension-for-pytorch intel-openmp intel-fortran-rt dpcpp-cpp-rt
conda install astunparse expecttest hypothesis numpy psutil pyyaml requests setuptools types-dataclasses typing-extensions sympy filelock networkx jinja2 fsspec
Configure Magma (see README):
#export LD_LIBRARY_PATH=${HOME}/anaconda3/lib:${LD_LIBRARY_PATH}
#export LD_PRELOAD=${HOME}/anaconda3/lib/libmkl_core.so:${HOME}/anaconda3/lib/libmkl_sequential.so
echo -e "GPU_TARGET = sm_86\nBACKEND = cuda\nFORT = false" > make.inc
make generate
export LD_LIBRARY_PATH="${CONDA_PREFIX}/lib:/usr/local/cuda/targets/x86_64-linux/lib"
export CUDA_DIR="/usr/local/cuda-12.2"
export CONDA_LIB=${CONDA_PREFIX}/lib
#export LD_PRELOAD="${CONDA_PREFIX}/lib/libiomp5.so"
# be careful here; they didn't accept sm_89 so I had to round it down to major version, sm_80
make clean && rm -rf build/
#TARGETARCH=amd64 cmake -H. -Bbuild -DUSE_FORTRAN=OFF -DGPU_TARGET="Ampere" -DCMAKE_CUDA_COMPILER="$CUDA_DIR/bin/nvcc" -DMKLROOT=${CONDA_PREFIX} -DLIBS="${CONDA_LIB}/libmkl_intel_lp64.so;${CONDA_LIB}/libmkl_intel_thread.so;${CONDA_LIB}/libmkl_core.so;${CONDA_LIB}/libiomp5.so;-lm;-ldl" -DLAPACK_LIBRARIES=${CONDA_LIB}/libmkl_intel_lp64.so;${CONDA_LIB}/libmkl_intel_thread.so;/${CONDA_LIB}/libmkl_core.so;${CONDA_LIB}/libiomp5.so;-lm;-ldl;-lm;-ldl" -DCUDA_NVCC_FLAGS="-Xfatbin;-compress-all;-DHAVE_CUBLAS;-std=c++11;--threads=0;" -GNinja
TARGETARCH=amd64 cmake -H. -Bbuild -DUSE_FORTRAN=OFF -DGPU_TARGET="Ampere" -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS="-fPIC" -DCMAKE_C_FLAGS="-fPIC" -DMKLROOT=${CONDA_PREFIX} -DCUDA_NVCC_FLAGS="-Xfatbin;-compress-all;-DHAVE_CUBLAS;-std=c++11;--threads=0;" -GNinja
sudo mkdir /usr/local/magma/
cmake --build build --target install && rm -r ./build # buildkit"mkl_intel_thread
Build Magma:
cmake --build build -j $(nproc) --target installInstall Magma:
cp build/include/* ${CONDA_PREFIX}/include/
cp build/lib/*.so ${CONDA_PREFIX}/lib/
cp build/lib/pkgconfig/*.pc ${CONDA_PREFIX}/lib/pkgconfig/
sudo cp /usr/local/magma/include/* ${CONDA_PREFIX}/include/
#sudo cp /usr/local/magma/lib/*.so ${CONDA_PREFIX}/lib/
sudo cp /usr/local/magma/lib/pkgconfig/*.pc ${CONDA_PREFIX}/lib/pkgconfig/
#sudo cp build/target/include/* /usr/local/include/
#sudo cp build/target/lib/*.so /usr/local/lib/
#sudo cp build/target/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/