@@ -148,19 +148,21 @@ ifdef LLAMA_PERF
148148 CXXFLAGS += -DGGML_PERF
149149endif
150150ifneq ($(filter aarch64% ,$(UNAME_M ) ) ,)
151+ # Apple M1, M2, etc.
152+ # Raspberry Pi 3, 4, Zero 2 (64-bit)
151153 CFLAGS += -mcpu=native
152154 CXXFLAGS += -mcpu=native
153155endif
154156ifneq ($(filter armv6% ,$(UNAME_M ) ) ,)
155- # Raspberry Pi 1, 2, 3
157+ # Raspberry Pi 1, Zero
156158 CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
157159endif
158160ifneq ($(filter armv7% ,$(UNAME_M ) ) ,)
159- # Raspberry Pi 4
161+ # Raspberry Pi 2
160162 CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
161163endif
162164ifneq ($(filter armv8% ,$(UNAME_M ) ) ,)
163- # Raspberry Pi 4
165+ # Raspberry Pi 3, 4, Zero 2 (32-bit)
164166 CFLAGS += -mfp16-format=ieee -mno-unaligned-access
165167endif
166168
@@ -192,41 +194,56 @@ llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama-util.h
192194common.o : examples/common.cpp examples/common.h
193195 $(CXX ) $(CXXFLAGS ) -c $< -o $@
194196
197+ libllama.so : llama.o ggml.o $(OBJS )
198+ $(CXX ) $(CXXFLAGS ) -shared -fPIC -o $@ $^ $(LDFLAGS )
199+
195200clean :
196- rm -vf * .o main quantize quantize-stats perplexity embedding benchmark-matmult
201+ rm -vf * .o main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state build-info.h
197202
198- main : examples/main/main.cpp ggml.o llama.o common.o $(OBJS )
199- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
203+ #
204+ # Examples
205+ #
206+
207+ main : examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS )
208+ $(CXX ) $(CXXFLAGS ) $(filter-out % .h,$^ ) -o $@ $(LDFLAGS )
200209 @echo
201210 @echo ' ==== Run ./main -h for help. ===='
202211 @echo
203212
204- quantize : examples/quantize/quantize.cpp ggml.o llama.o $(OBJS )
205- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
213+ quantize : examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS )
214+ $(CXX ) $(CXXFLAGS ) $( filter-out % .h, $^ ) -o $@ $(LDFLAGS )
206215
207- quantize-stats : examples/quantize-stats/quantize-stats.cpp ggml.o llama.o $(OBJS )
208- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
216+ quantize-stats : examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS )
217+ $(CXX ) $(CXXFLAGS ) $( filter-out % .h, $^ ) -o $@ $(LDFLAGS )
209218
210- perplexity : examples/perplexity/perplexity.cpp ggml.o llama.o common.o $(OBJS )
211- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
219+ perplexity : examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o common.o $(OBJS )
220+ $(CXX ) $(CXXFLAGS ) $( filter-out % .h, $^ ) -o $@ $(LDFLAGS )
212221
213- embedding : examples/embedding/embedding.cpp ggml.o llama.o common.o $(OBJS )
214- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
222+ embedding : examples/embedding/embedding.cpp build-info.h ggml.o llama.o common.o $(OBJS )
223+ $(CXX ) $(CXXFLAGS ) $( filter-out % .h, $^ ) -o $@ $(LDFLAGS )
215224
216- vdot : pocs/vdot/vdot .cpp ggml.o $(OBJS )
217- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
225+ save-load-state : examples/save-load-state/save-load-state .cpp build-info.h ggml.o llama.o common .o $(OBJS )
226+ $(CXX ) $(CXXFLAGS ) $( filter-out % .h, $^ ) -o $@ $(LDFLAGS )
218227
219- libllama.so : llama.o ggml.o $(OBJS )
220- $(CXX ) $(CXXFLAGS ) -shared -fPIC -o $@ $^ $(LDFLAGS )
228+ build-info.h : $(wildcard .git/index) scripts/build-info.sh
229+ @scripts/build-info.sh > $@ .tmp
230+ @if ! cmp -s $@ .tmp $@ ; then \
231+ mv $@ .tmp $@ ; \
232+ else \
233+ rm $@ .tmp; \
234+ fi
221235
222236#
223237# Tests
224238#
225239
226- benchmark-matmult : examples/benchmark/benchmark-matmult.cpp ggml.o $(OBJS )
227- $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
240+ benchmark-matmult : examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS )
241+ $(CXX ) $(CXXFLAGS ) $( filter-out % .h, $^ ) -o $@ $(LDFLAGS )
228242 ./$@
229243
244+ vdot : pocs/vdot/vdot.cpp ggml.o $(OBJS )
245+ $(CXX ) $(CXXFLAGS ) $^ -o $@ $(LDFLAGS )
246+
230247.PHONY : tests
231248tests :
232249 bash ./tests/run-tests.sh
0 commit comments