1
1
# Define the default target now so that it is always the first target
2
2
BUILD_TARGETS = \
3
3
libllava.a \
4
- llama-baby-llama \
5
4
llama-batched \
6
5
llama-batched-bench \
7
6
llama-bench \
@@ -56,14 +55,14 @@ TEST_TARGETS = \
56
55
tests/test-llama-grammar \
57
56
tests/test-log \
58
57
tests/test-model-load-cancel \
59
- tests/test-opt \
60
58
tests/test-quantize-fns \
61
59
tests/test-quantize-perf \
62
60
tests/test-rope \
63
61
tests/test-sampling \
64
62
tests/test-tokenizer-0 \
65
63
tests/test-tokenizer-1-bpe \
66
64
tests/test-tokenizer-1-spm
65
+ # tests/test-opt \
67
66
68
67
# Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned
69
68
LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding vdot q8dot convert-llama2c-to-ggml \
@@ -916,6 +915,7 @@ endif # GGML_METAL
916
915
917
916
OBJ_GGML += \
918
917
ggml/src/ggml.o \
918
+ ggml/src/ggml-cpu.o \
919
919
ggml/src/ggml-alloc.o \
920
920
ggml/src/ggml-backend.o \
921
921
ggml/src/ggml-quants.o \
@@ -936,7 +936,6 @@ OBJ_COMMON = \
936
936
common/console.o \
937
937
common/ngram-cache.o \
938
938
common/sampling.o \
939
- common/train.o \
940
939
common/build-info.o \
941
940
common/json-schema-to-grammar.o
942
941
@@ -1048,6 +1047,12 @@ ggml/src/ggml.o: \
1048
1047
ggml/include/ggml.h
1049
1048
$(CC ) $(CFLAGS ) -c $< -o $@
1050
1049
1050
+ ggml/src/ggml-cpu.o : \
1051
+ ggml/src/ggml-cpu.c \
1052
+ ggml/include/ggml.h \
1053
+ ggml/src/ggml-common.h
1054
+ $(CC ) $(CFLAGS ) -c $< -o $@
1055
+
1051
1056
ggml/src/ggml-alloc.o : \
1052
1057
ggml/src/ggml-alloc.c \
1053
1058
ggml/include/ggml.h \
@@ -1213,11 +1218,6 @@ common/json-schema-to-grammar.o: \
1213
1218
common/json-schema-to-grammar.h
1214
1219
$(CXX ) $(CXXFLAGS ) -c $< -o $@
1215
1220
1216
- common/train.o : \
1217
- common/train.cpp \
1218
- common/train.h
1219
- $(CXX ) $(CXXFLAGS ) -c $< -o $@
1220
-
1221
1221
common/ngram-cache.o : \
1222
1222
common/ngram-cache.cpp \
1223
1223
common/ngram-cache.h
@@ -1390,11 +1390,6 @@ llama-bench: examples/llama-bench/llama-bench.cpp \
1390
1390
$(CXX ) $(CXXFLAGS ) -c $< -o $(call GET_OBJ_FILE, $< )
1391
1391
$(CXX ) $(CXXFLAGS ) $(filter-out % .h $< ,$^ ) $(call GET_OBJ_FILE, $< ) -o $@ $(LDFLAGS )
1392
1392
1393
- llama-baby-llama : examples/baby-llama/baby-llama.cpp \
1394
- $(OBJ_ALL )
1395
- $(CXX ) $(CXXFLAGS ) -c $< -o $(call GET_OBJ_FILE, $< )
1396
- $(CXX ) $(CXXFLAGS ) $(filter-out % .h $< ,$^ ) $(call GET_OBJ_FILE, $< ) -o $@ $(LDFLAGS )
1397
-
1398
1393
llama-export-lora : examples/export-lora/export-lora.cpp \
1399
1394
$(OBJ_ALL )
1400
1395
$(CXX ) $(CXXFLAGS ) -c $< -o $(call GET_OBJ_FILE, $< )
0 commit comments