@@ -53,36 +53,42 @@ version = v"0.0.16" # fake version number
53
53
# 0.0.14 2024-01-04 b1767 https://github.com/ggerganov/llama.cpp/releases/tag/b1767
54
54
# 0.0.15 2024-01-09 b1796 https://github.com/ggerganov/llama.cpp/releases/tag/b1796
55
55
# 0.0.16 2024-03-10 b2382 https://github.com/ggerganov/llama.cpp/releases/tag/b2382
56
-
56
+ # 0.0.17 2024-12-20 b4371 https://github.com/ggerganov/llama.cpp/releases/tag/b4371
57
57
58
58
sources = [
59
- GitSource (" https://github.com/ggerganov/llama.cpp.git" ,
60
- " 621e86b331f8b0e71f79fd82a4ae1cd54c3e4396" ),
59
+ GitSource (" https://github.com/ggerganov/llama.cpp.git" , " eb5c3dc64bd967f2e23c87d9dec195f45468de60" ),
60
+ ArchiveSource (" https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX10.15.sdk.tar.xz" ,
61
+ " 2408d07df7f324d3beea818585a6d990ba99587c218a3969f924dfcc4de93b62" ),
61
62
]
62
63
63
64
script = raw """
65
+ if [[ "${target}" == x86_64-apple-darwin* ]]; then
66
+ # Install a newer SDK which supports `std::filesystem`
67
+ pushd ${WORKSPACE}/srcdir/MacOSX10.*.sdk
68
+ rm -rf /opt/${target}/${target}/sys-root/System
69
+ cp -a usr/* "/opt/${target}/${target}/sys-root/usr/"
70
+ cp -a System "/opt/${target}/${target}/sys-root/"
71
+ popd
72
+ fi
73
+
64
74
cd $WORKSPACE/srcdir/llama.cpp*
65
75
66
76
# remove compiler flags forbidden in BinaryBuilder
67
77
sed -i -e 's/-funsafe-math-optimizations//g' CMakeLists.txt
68
78
69
- EXTRA_CMAKE_ARGS=
79
+ EXTRA_CMAKE_ARGS=()
70
80
if [[ "${target}" == *-linux-* ]]; then
71
81
# otherwise we have undefined reference to `clock_gettime' when
72
82
# linking the `main' example program
73
- EXTRA_CMAKE_ARGS=' -DCMAKE_EXE_LINKER_FLAGS="-lrt"'
83
+ EXTRA_CMAKE_ARGS+=( -DCMAKE_EXE_LINKER_FLAGS="-lrt")
74
84
fi
75
85
76
- # Use Metal on Apple Silicon, disable otherwise (eg, disable for Intel-based MacOS)
77
- if [[ "${target}" == aarch64-apple-darwin* ]]; then
78
- EXTRA_CMAKE_ARGS="$EXTRA_CMAKE_ARGS -DLLAMA_METAL=ON"
79
- else
80
- EXTRA_CMAKE_ARGS="$EXTRA_CMAKE_ARGS -DLLAMA_METAL=OFF"
86
+ # Disable Metal on Intel Apple platforms
87
+ if [[ "${target}" == x86_64-apple-darwin* ]]; then
88
+ EXTRA_CMAKE_ARGS+=(-DGGML_METAL=OFF)
81
89
fi
82
90
83
- mkdir build && cd build
84
-
85
- cmake .. \
91
+ cmake -Bbuild -GNinja \
86
92
-DCMAKE_INSTALL_PREFIX=$prefix \
87
93
-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TARGET_TOOLCHAIN} \
88
94
-DCMAKE_BUILD_TYPE=RELEASE \
@@ -98,40 +104,72 @@ cmake .. \
98
104
-DLLAMA_BLAS=OFF \
99
105
-DLLAMA_CUBLAS=OFF \
100
106
-DLLAMA_CLBLAST=OFF \
101
- $EXTRA_CMAKE_ARGS
102
- make -j${nproc}
103
-
104
- make install
107
+ "${EXTRA_CMAKE_ARGS[@]}"
108
+ cmake --build build
109
+ cmake --install build
110
+ install_license LICENSE
111
+ """
105
112
106
- # install header files
107
- for hdr in ../*.h; do
108
- install -Dvm 644 "${hdr}" "${includedir}/$(basename "${hdr}")"
109
- done
113
+ platforms = supported_platforms ()
110
114
111
- install_license ../LICENSE
112
- """
115
+ # aarch64-linux-musl:
116
+ # /workspace/srcdir/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c:2398:53: error: ‘HWCAP_ASIMDDP’ undeclared (first use in this function); did you mean ‘HWCAP_ASIMDHP’?
117
+ filter! (p -> ! (Sys. islinux (p) && arch (p) == " aarch64" && libc (p) == " musl" ), platforms)
113
118
114
- platforms = supported_platforms (; exclude= p -> arch (p) == " powerpc64le" || (arch (p) == " i686" && Sys. iswindows (p)) || (arch (p) in [" armv6l" , " armv7l" ]))
115
119
platforms = expand_cxxstring_abis (platforms)
116
120
117
121
products = [
118
- ExecutableProduct (" baby-llama" , :baby_llama ),
119
- ExecutableProduct (" benchmark" , :benchmark ),
120
- ExecutableProduct (" embedding" , :embedding ),
121
- ExecutableProduct (" main" , :main ),
122
- ExecutableProduct (" perplexity" , :perplexity ),
123
- ExecutableProduct (" quantize" , :quantize ),
124
- ExecutableProduct (" quantize-stats" , :quantize_stats ),
125
- ExecutableProduct (" save-load-state" , :save_load_state ),
126
- ExecutableProduct (" server" , :server ),
127
- ExecutableProduct (" simple" , :simple ),
128
- ExecutableProduct (" train-text-from-scratch" , :train_text_from_scratch ),
129
- LibraryProduct (" libggml_shared" , :libggml ),
122
+ ExecutableProduct (" llama-batched" , :llama_batched ),
123
+ ExecutableProduct (" llama-batched-bench" , :llama_batched_bench ),
124
+ ExecutableProduct (" llama-bench" , :llama_bench ),
125
+ ExecutableProduct (" llama-cli" , :llama_cli ),
126
+ ExecutableProduct (" llama-convert-llama2c-to-ggml" , :llama_convert_llama2c_to_ggml ),
127
+ ExecutableProduct (" llama-cvector-generator" , :llama_cvector_generator ),
128
+ ExecutableProduct (" llama-embedding" , :llama_embedding ),
129
+ ExecutableProduct (" llama-eval-callback" , :llama_eval_callback ),
130
+ ExecutableProduct (" llama-export-lora" , :llama_export_lora ),
131
+ # ExecutableProduct("llama-gbnf-validator", :llama_gbnf_validator), # not built on Windows
132
+ ExecutableProduct (" llama-gen-docs" , :llama_gen_docs ),
133
+ ExecutableProduct (" llama-gguf" , :llama_gguf ),
134
+ ExecutableProduct (" llama-gguf-hash" , :llama_gguf_hash ),
135
+ ExecutableProduct (" llama-gguf-split" , :llama_gguf_split ),
136
+ ExecutableProduct (" llama-gritlm" , :llama_gritlm ),
137
+ ExecutableProduct (" llama-imatrix" , :llama_imatrix ),
138
+ ExecutableProduct (" llama-infill" , :llama_infill ),
139
+ ExecutableProduct (" llama-llava-cli" , :llama_llava_cli ),
140
+ ExecutableProduct (" llama-lookahead" , :llama_lookahead ),
141
+ ExecutableProduct (" llama-lookup" , :llama_lookup ),
142
+ ExecutableProduct (" llama-lookup-create" , :llama_lookup_create ),
143
+ ExecutableProduct (" llama-lookup-merge" , :llama_lookup_merge ),
144
+ ExecutableProduct (" llama-lookup-stats" , :llama_lookup_stats ),
145
+ ExecutableProduct (" llama-minicpmv-cli" , :llama_minicpmv_cli ),
146
+ ExecutableProduct (" llama-parallel" , :llama_parallel ),
147
+ ExecutableProduct (" llama-passkey" , :llama_passkey ),
148
+ ExecutableProduct (" llama-perplexity" , :llama_perplexity ),
149
+ ExecutableProduct (" llama-quantize" , :llama_quantize ),
150
+ # ExecutableProduct("llama-quantize-stats", :llama_quantize_stats), # not built on Windows
151
+ ExecutableProduct (" llama-qwen2vl-cli" , :llama_qwen2vl_cli ),
152
+ ExecutableProduct (" llama-retrieval" , :llama_retrieval ),
153
+ ExecutableProduct (" llama-run" , :llama_run ),
154
+ ExecutableProduct (" llama-save-load-state" , :llama_save_load_state ),
155
+ ExecutableProduct (" llama-server" , :llama_server ),
156
+ ExecutableProduct (" llama-simple" , :llama_simple ),
157
+ ExecutableProduct (" llama-simple-chat" , :llama_simple_chat ),
158
+ ExecutableProduct (" llama-speculative" , :llama_speculative ),
159
+ ExecutableProduct (" llama-speculative-simple" , :llama_speculative_simple ),
160
+ ExecutableProduct (" llama-tokenize" , :llama_tokenize ),
161
+ ExecutableProduct (" llama-tts" , :llama_tts ),
162
+
163
+ LibraryProduct ([" libggml-base" , " ggml-base" ], :libggml_base ),
164
+ LibraryProduct ([" libggml-cpu" , " ggml-cpu" ], :libggml_cpu ),
165
+ LibraryProduct ([" libggml" , " ggml" ], :libggml ),
130
166
LibraryProduct (" libllama" , :libllama ),
167
+ LibraryProduct (" libllava_shared" , :libllava_shared ),
131
168
]
132
169
133
- dependencies = Dependency[
170
+ dependencies = [
171
+ Dependency (PackageSpec (name= " CompilerSupportLibraries_jll" , uuid= " e66e0078-7015-5450-92f7-15fbd957f2ae" )),
134
172
]
135
173
136
174
build_tarballs (ARGS , name, version, sources, script, platforms, products, dependencies;
137
- julia_compat= " 1.6" , preferred_gcc_version= v " 10" )
175
+ julia_compat= " 1.6" , preferred_gcc_version= v " 10" )
0 commit comments