let package = { writeShellScript , llama-cpp , nixGL }: writeShellScript "run-llamafile" '' if [ $# -eq 0 ] ; then echo "Usage: $0 server -m wizardcoder-python-13b-v1.0.Q4_K_M.gguf -ngl 9999" exit 2 fi program=$1 shift ${nixGL.auto.nixGLNvidia}/bin/nixGLNvidia-* ${llama-cpp}/bin/llama-cpp-"$program" "$@" ''; nixpkgs = fetchTarball "https://github.com/NixOS/nixpkgs/archive/d64c6c31e50878fc6cd11d143a8bbb235bdfcc45.tar.gz"; pkgs = import nixpkgs { config = { cudaSupport = true; }; }; llama-cpp = pkgs.callPackage (import "${nixpkgs}/pkgs/by-name/ll/llama-cpp/package.nix") {}; nixGL-source = fetchTarball "https://github.com/nix-community/nixGL/archive/489d6b095ab9d289fe11af0219a9ff00fe87c7c5.zip"; nixGL = pkgs.callPackage (import "${nixGL-source}/nixGL.nix") {}; in pkgs.callPackage package { inherit llama-cpp nixGL; }