Last active
November 16, 2023 04:36
-
-
Save KEINOS/debf7526d2e200d85a8e6479294f9e81 to your computer and use it in GitHub Desktop.
Revisions
-
KEINOS revised this gist
Nov 16, 2023 . 1 changed file with 161 additions and 0 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,161 @@ $ git clone https://github.com/jmorganca/ollama.git ... $ cd ollama $ git submodule update Submodule path 'llm/llama.cpp/ggml': checked out '9e232f0234073358e7031c1b8d7aa45020469a3b' Submodule path 'llm/llama.cpp/gguf': checked out '9e70cc03229df19ca2d28ce23cc817198f897278' $ cd llm/llama.cpp $ go generate generate_darwin_amd64.go Submodule path 'ggml': checked out '9e232f0234073358e7031c1b8d7aa45020469a3b' -- The C compiler identification is AppleClang 14.0.0.14000029 -- The CXX compiler identification is AppleClang 14.0.0.14000029 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /Library/Developer/CommandLineTools/usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /Library/Developer/CommandLineTools/usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/local/bin/git (found version "2.42.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Accelerate framework found -- CMAKE_SYSTEM_PROCESSOR: x86_64 -- x86 detected -- Configuring done (3.2s) -- Generating done (1.5s) -- Build files have been written to: /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/build/cpu [ 9%] Building C object CMakeFiles/ggml.dir/ggml.c.o /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:2420:5: warning: implicit conversion increases floating-point precision: 'float' to 'ggml_float' (aka 'double') [-Wdouble-promotion] GGML_F16_VEC_REDUCE(sumf, sum); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:2052:37: note: expanded from macro 'GGML_F16_VEC_REDUCE' #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:2042:33: note: expanded from macro 'GGML_F32Cx8_REDUCE' #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:1988:11: note: expanded from macro 'GGML_F32x8_REDUCE' res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ ~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:3462:9: warning: implicit conversion increases floating-point precision: 'float' to 'ggml_float' (aka 'double') [-Wdouble-promotion] GGML_F16_VEC_REDUCE(sumf[k], sum[k]); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:2052:37: note: expanded from macro 'GGML_F16_VEC_REDUCE' #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:2042:33: note: expanded from macro 'GGML_F32Cx8_REDUCE' #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:1988:11: note: expanded from macro 'GGML_F32x8_REDUCE' res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ ~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:603:23: warning: unused function 'mul_sum_i8_pairs' [-Wunused-function] static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:634:19: warning: unused function 'hsum_i32_4' [-Wunused-function] static inline int hsum_i32_4(const __m128i a) { ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/ggml/ggml.c:699:23: warning: unused function 'packNibbles' [-Wunused-function] static inline __m128i packNibbles( __m256i bytes ) ^ 5 warnings generated. [ 18%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 27%] Building C object CMakeFiles/ggml.dir/k_quants.c.o [ 27%] Built target ggml [ 36%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 45%] Linking CXX static library libllama.a [ 45%] Built target llama [ 54%] Building CXX object examples/CMakeFiles/common.dir/common.cpp.o [ 63%] Building CXX object examples/CMakeFiles/common.dir/console.cpp.o [ 72%] Building CXX object examples/CMakeFiles/common.dir/grammar-parser.cpp.o [ 72%] Built target common [ 81%] Built target BUILD_INFO [ 90%] Building CXX object examples/server/CMakeFiles/server.dir/server.cpp.o [100%] Linking CXX executable ../../bin/server ld: warning: directory not found for option '-L/usr/local/opt/pcsc-lite/lib:-L/usr/local/opt/openssl@3/lib:-L/usr/local/opt/python3/lib:' [100%] Built target server Submodule path 'gguf': checked out '9e70cc03229df19ca2d28ce23cc817198f897278' -- The C compiler identification is AppleClang 14.0.0.14000029 -- The CXX compiler identification is AppleClang 14.0.0.14000029 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /Library/Developer/CommandLineTools/usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /Library/Developer/CommandLineTools/usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/local/bin/git (found version "2.42.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Accelerate framework found -- Metal framework found -- CMAKE_SYSTEM_PROCESSOR: x86_64 -- x86 detected -- Configuring done (3.3s) -- Generating done (0.8s) -- Build files have been written to: /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/build/cpu [ 6%] Building C object CMakeFiles/ggml.dir/ggml.c.o /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2432:5: warning: implicit conversion increases floating-point precision: 'float' to 'ggml_float' (aka 'double') [-Wdouble-promotion] GGML_F16_VEC_REDUCE(sumf, sum); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2064:37: note: expanded from macro 'GGML_F16_VEC_REDUCE' #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2054:33: note: expanded from macro 'GGML_F32Cx8_REDUCE' #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2000:11: note: expanded from macro 'GGML_F32x8_REDUCE' res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ ~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:3692:9: warning: implicit conversion increases floating-point precision: 'float' to 'ggml_float' (aka 'double') [-Wdouble-promotion] GGML_F16_VEC_REDUCE(sumf[k], sum[k]); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2064:37: note: expanded from macro 'GGML_F16_VEC_REDUCE' #define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2054:33: note: expanded from macro 'GGML_F32Cx8_REDUCE' #define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE ^ /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml.c:2000:11: note: expanded from macro 'GGML_F32x8_REDUCE' res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ ~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 warnings generated. [ 13%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 20%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 26%] Building C object CMakeFiles/ggml.dir/ggml-metal.m.o /Users/admin/GitHub/PublicRepos/ollama/llm/llama.cpp/gguf/ggml-metal.m:128:25: warning: unused variable 'msl_library_source' [-Wunused-const-variable] static NSString * const msl_library_source = @"see metal.metal"; ^ 1 warning generated. [ 33%] Building C object CMakeFiles/ggml.dir/k_quants.c.o [ 33%] Built target ggml [ 40%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 46%] Linking CXX static library libllama.a [ 46%] Built target llama [ 53%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o [ 60%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o [ 66%] Building CXX object common/CMakeFiles/common.dir/console.cpp.o [ 73%] Building CXX object common/CMakeFiles/common.dir/grammar-parser.cpp.o [ 80%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o [ 80%] Built target common [ 86%] Built target BUILD_INFO [ 93%] Building CXX object examples/server/CMakeFiles/server.dir/server.cpp.o [100%] Linking CXX executable ../../bin/server ld: warning: directory not found for option '-L/usr/local/opt/pcsc-lite/lib:-L/usr/local/opt/openssl@3/lib:-L/usr/local/opt/python3/lib:' [100%] Built target server $ cd ../.. $ go build . -
KEINOS created this gist
Nov 16, 2023 .There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,24 @@ # Running Ollama on Intel Mac (MBP Early 2015) This is a memorandom of running [Ollama](https://github.com/jmorganca/ollama) on Intel MacBook Pro. ```shellsession $ neofetch --stdout [email protected] ----------------------------- OS: macOS 12.7.1 21G920 x86_64 Host: MacBookPro12,1 Kernel: 21.6.0 Uptime: 4 days, 18 hours, 6 mins Packages: 365 (brew) Shell: bash 3.2.57 Resolution: 1680x1050@2x DE: Aqua WM: Quartz Compositor WM Theme: Blue (Dark) Terminal: Apple_Terminal Terminal Font: HackGenConsoleNF-Regular CPU: Intel i5-5257U (4) @ 2.70GHz GPU: Intel Iris Graphics 6100 Memory: 5637MiB / 8192MiB ``` This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,331 @@ 2023/11/07 19:20:47 images.go:824: total blobs: 11 2023/11/07 19:20:47 images.go:831: total unused blobs removed: 0 2023/11/07 19:20:47 routes.go:680: Listening on 127.0.0.1:11434 (version 0.1.8) [GIN] 2023/11/07 - 19:21:48 | 200 | 1.035598ms | 127.0.0.1 | HEAD "/" [GIN] 2023/11/07 - 19:21:48 | 200 | 5.39239ms | 127.0.0.1 | POST "/api/show" 2023/11/07 19:21:48 llama.go:384: starting llama runner 2023/11/07 19:21:48 llama.go:386: error starting the external llama runner: fork/exec /var/folders/8c/lmckjks95fj4h_jqzw4v3k_w0000gn/T/ollama3772859082/llama.cpp/gguf/build/metal/bin/ollama-runner: bad CPU type in executable 2023/11/07 19:21:48 llama.go:384: starting llama runner 2023/11/07 19:21:48 llama.go:442: waiting for llama runner to start responding {"timestamp":1699352509,"level":"WARNING","function":"server_params_parse","line":873,"message":"Not compiled with GPU offload support, --n-gpu-layers option will be ignored. See main README.md for information on enabling GPU BLAS support","n_gpu_layers":-1} {"timestamp":1699352509,"level":"INFO","function":"main","line":1324,"message":"build info","build":219,"commit":"9e70cc0"} {"timestamp":1699352509,"level":"INFO","function":"main","line":1330,"message":"system info","n_threads":2,"n_threads_batch":-1,"total_threads":4,"system_info":"AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | "} llama_model_loader: loaded meta data with 19 key-value pairs and 237 tensors from /Users/admin/.ollama/models/blobs/sha256:66002b78c70a22ab25e16cc9a1736c6cc6335398c7312e3eb33db202350afe66 (version GGUF V2 (latest)) llama_model_loader: - tensor 0: token_embd.weight q4_0 [ 3200, 32000, 1, 1 ] llama_model_loader: - tensor 1: blk.0.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 2: blk.0.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 3: blk.0.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 4: blk.0.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 5: blk.0.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 6: blk.0.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 7: blk.0.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 8: blk.0.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 9: blk.0.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 10: blk.1.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 11: blk.1.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 12: blk.1.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 13: blk.1.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 14: blk.1.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 15: blk.1.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 16: blk.1.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 17: blk.1.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 18: blk.1.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 19: blk.2.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 20: blk.2.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 21: blk.2.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 22: blk.2.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 23: blk.2.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 24: blk.2.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 25: blk.2.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 26: blk.2.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 27: blk.2.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 28: blk.3.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 29: blk.3.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 30: blk.3.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 31: blk.3.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 32: blk.3.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 33: blk.3.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 34: blk.3.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 35: blk.3.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 36: blk.3.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 37: blk.4.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 38: blk.4.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 39: blk.4.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 40: blk.4.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 41: blk.4.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 42: blk.4.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 43: blk.4.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 44: blk.4.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 45: blk.4.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 46: blk.5.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 47: blk.5.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 48: blk.5.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 49: blk.5.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 50: blk.5.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 51: blk.5.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 52: blk.5.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 53: blk.5.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 54: blk.5.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 55: blk.6.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 56: blk.6.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 57: blk.6.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 58: blk.6.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 59: blk.6.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 60: blk.6.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 61: blk.6.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 62: blk.6.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 63: blk.6.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 64: blk.7.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 65: blk.7.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 66: blk.7.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 67: blk.7.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 68: blk.7.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 69: blk.7.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 70: blk.7.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 71: blk.7.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 72: blk.7.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 73: blk.8.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 74: blk.8.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 75: blk.8.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 76: blk.8.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 77: blk.8.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 78: blk.8.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 79: blk.8.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 80: blk.8.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 81: blk.8.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 82: blk.9.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 83: blk.9.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 84: blk.9.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 85: blk.9.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 86: blk.9.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 87: blk.9.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 88: blk.9.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 89: blk.9.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 90: blk.9.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 91: blk.10.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 92: blk.10.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 93: blk.10.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 94: blk.10.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 95: blk.10.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 96: blk.10.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 97: blk.10.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 98: blk.10.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 99: blk.10.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 100: blk.11.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 101: blk.11.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 102: blk.11.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 103: blk.11.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 104: blk.11.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 105: blk.11.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 106: blk.11.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 107: blk.11.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 108: blk.11.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 109: blk.12.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 110: blk.12.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 111: blk.12.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 112: blk.12.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 113: blk.12.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 114: blk.12.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 115: blk.12.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 116: blk.12.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 117: blk.12.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 118: blk.13.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 119: blk.13.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 120: blk.13.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 121: blk.13.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 122: blk.13.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 123: blk.13.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 124: blk.13.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 125: blk.13.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 126: blk.13.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 127: blk.14.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 128: blk.14.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 129: blk.14.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 130: blk.14.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 131: blk.14.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 132: blk.14.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 133: blk.14.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 134: blk.14.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 135: blk.14.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 136: blk.15.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 137: blk.15.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 138: blk.15.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 139: blk.15.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 140: blk.15.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 141: blk.15.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 142: blk.15.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 143: blk.15.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 144: blk.15.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 145: blk.16.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 146: blk.16.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 147: blk.16.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 148: blk.16.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 149: blk.16.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 150: blk.16.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 151: blk.16.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 152: blk.16.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 153: blk.16.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 154: blk.17.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 155: blk.17.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 156: blk.17.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 157: blk.17.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 158: blk.17.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 159: blk.17.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 160: blk.17.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 161: blk.17.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 162: blk.17.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 163: blk.18.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 164: blk.18.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 165: blk.18.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 166: blk.18.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 167: blk.18.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 168: blk.18.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 169: blk.18.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 170: blk.18.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 171: blk.18.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 172: blk.19.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 173: blk.19.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 174: blk.19.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 175: blk.19.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 176: blk.19.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 177: blk.19.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 178: blk.19.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 179: blk.19.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 180: blk.19.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 181: blk.20.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 182: blk.20.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 183: blk.20.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 184: blk.20.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 185: blk.20.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 186: blk.20.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 187: blk.20.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 188: blk.20.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 189: blk.20.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 190: blk.21.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 191: blk.21.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 192: blk.21.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 193: blk.21.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 194: blk.21.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 195: blk.21.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 196: blk.21.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 197: blk.21.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 198: blk.21.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 199: blk.22.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 200: blk.22.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 201: blk.22.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 202: blk.22.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 203: blk.22.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 204: blk.22.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 205: blk.22.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 206: blk.22.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 207: blk.22.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 208: blk.23.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 209: blk.23.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 210: blk.23.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 211: blk.23.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 212: blk.23.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 213: blk.23.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 214: blk.23.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 215: blk.23.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 216: blk.23.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 217: blk.24.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 218: blk.24.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 219: blk.24.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 220: blk.24.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 221: blk.24.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 222: blk.24.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 223: blk.24.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 224: blk.24.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 225: blk.24.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 226: blk.25.attn_q.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 227: blk.25.attn_k.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 228: blk.25.attn_v.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 229: blk.25.attn_output.weight q4_0 [ 3200, 3200, 1, 1 ] llama_model_loader: - tensor 230: blk.25.ffn_gate.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 231: blk.25.ffn_down.weight q4_0 [ 8640, 3200, 1, 1 ] llama_model_loader: - tensor 232: blk.25.ffn_up.weight q4_0 [ 3200, 8640, 1, 1 ] llama_model_loader: - tensor 233: blk.25.attn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 234: blk.25.ffn_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 235: output_norm.weight f32 [ 3200, 1, 1, 1 ] llama_model_loader: - tensor 236: output.weight q8_0 [ 3200, 32000, 1, 1 ] llama_model_loader: - kv 0: general.architecture str llama_model_loader: - kv 1: general.name str llama_model_loader: - kv 2: llama.context_length u32 llama_model_loader: - kv 3: llama.embedding_length u32 llama_model_loader: - kv 4: llama.block_count u32 llama_model_loader: - kv 5: llama.feed_forward_length u32 llama_model_loader: - kv 6: llama.rope.dimension_count u32 llama_model_loader: - kv 7: llama.attention.head_count u32 llama_model_loader: - kv 8: llama.attention.head_count_kv u32 llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 llama_model_loader: - kv 10: general.file_type u32 llama_model_loader: - kv 11: tokenizer.ggml.model str llama_model_loader: - kv 12: tokenizer.ggml.tokens arr llama_model_loader: - kv 13: tokenizer.ggml.scores arr llama_model_loader: - kv 14: tokenizer.ggml.token_type arr llama_model_loader: - kv 15: tokenizer.ggml.bos_token_id u32 llama_model_loader: - kv 16: tokenizer.ggml.eos_token_id u32 llama_model_loader: - kv 17: tokenizer.ggml.padding_token_id u32 llama_model_loader: - kv 18: general.quantization_version u32 llama_model_loader: - type f32: 53 tensors llama_model_loader: - type q4_0: 183 tensors llama_model_loader: - type q8_0: 1 tensors llm_load_vocab: special tokens definition check successful ( 259/32000 ). llm_load_print_meta: format = GGUF V2 (latest) llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = SPM llm_load_print_meta: n_vocab = 32000 llm_load_print_meta: n_merges = 0 llm_load_print_meta: n_ctx_train = 2048 llm_load_print_meta: n_embd = 3200 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 32 llm_load_print_meta: n_layer = 26 llm_load_print_meta: n_rot = 100 llm_load_print_meta: n_gqa = 1 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-06 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: n_ff = 8640 llm_load_print_meta: freq_base_train = 10000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: model type = 3B llm_load_print_meta: model ftype = mostly Q4_0 llm_load_print_meta: model params = 3.43 B llm_load_print_meta: model size = 1.84 GiB (4.62 BPW) llm_load_print_meta: general.name = pankajmathur llm_load_print_meta: BOS token = 1 '<s>' llm_load_print_meta: EOS token = 2 '</s>' llm_load_print_meta: UNK token = 0 '<unk>' llm_load_print_meta: PAD token = 0 '<unk>' llm_load_print_meta: LF token = 13 '<0x0A>' llm_load_tensors: ggml ctx size = 0.08 MB llm_load_tensors: mem required = 1887.57 MB ................ ............................................. ............... ....... ... ........ llama_new_context_with_model: n_ctx = 2048 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: kv self size = 650.00 MB llama_new_context_with_model: compute buffer total size = 156.88 MB llama server listening at http://127.0.0.1:55568 {"timestamp":1699352522,"level":"INFO","function":"main","line":1749,"message":"HTTP server listening","hostname":"127.0.0.1","port":55568} {"timestamp":1699352522,"level":"INFO","function":"log_server_request","line":1240,"message":"request","remote_addr":"127.0.0.1","remote_port":49743,"status":200,"method":"HEAD","path":"/","params":{}} 2023/11/07 19:22:02 llama.go:456: llama runner started in 13.401789 seconds [GIN] 2023/11/07 - 19:22:02 | 200 | 13.791850245s | 127.0.0.1 | POST "/api/generate" {"timestamp":1699352526,"level":"INFO","function":"log_server_request","line":1240,"message":"request","remote_addr":"127.0.0.1","remote_port":49743,"status":200,"method":"HEAD","path":"/","params":{}} 2023/11/07 19:22:06 llama.go:399: signal: segmentation fault 2023/11/07 19:22:06 llama.go:473: llama runner stopped successfully [GIN] 2023/11/07 - 19:22:06 | 200 | 265.904241ms | 127.0.0.1 | POST "/api/generate" [GIN] 2023/11/07 - 19:22:37 | 200 | 17.588µs | 127.0.0.1 | HEAD "/" [GIN] 2023/11/07 - 19:22:37 | 200 | 1.539704ms | 127.0.0.1 | GET "/api/tags" 2023/11/07 19:23:12 llama.go:473: llama runner stopped successfully 2023/11/09 19:03:00 images.go:824: total blobs: 11 2023/11/09 19:03:00 images.go:831: total unused blobs removed: 0 2023/11/09 19:03:00 routes.go:680: Listening on 127.0.0.1:11434 (version 0.1.8)