diff --git a/examples/demo-apps/android/LlamaDemo/setup.sh b/examples/demo-apps/android/LlamaDemo/setup.sh index 7ca13d616eb..212e214d377 100644 --- a/examples/demo-apps/android/LlamaDemo/setup.sh +++ b/examples/demo-apps/android/LlamaDemo/setup.sh @@ -34,6 +34,9 @@ cmake examples/models/llama2 \ -DANDROID_ABI="$ANDROID_ABI" \ -DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \ -DEXECUTORCH_USE_TIKTOKEN="${EXECUTORCH_USE_TIKTOKEN}" \ + -DEXECUTORCH_BUILD_CUSTOM=ON \ + -DEXECUTORCH_BUILD_OPTIMIZED=ON \ + -DEXECUTORCH_BUILD_XNNPACK=ON \ -DCMAKE_BUILD_TYPE=Release \ -B"${CMAKE_OUT}"/examples/models/llama2 diff --git a/extension/android/CMakeLists.txt b/extension/android/CMakeLists.txt index 84a5437a073..ec8cf850317 100644 --- a/extension/android/CMakeLists.txt +++ b/extension/android/CMakeLists.txt @@ -68,6 +68,8 @@ if(EXECUTORCH_BUILD_LLAMA_JNI) set_property(TARGET custom_ops PROPERTY IMPORTED_LOCATION ${CUSTOM_OPS_PATH}) target_link_options_shared_lib(custom_ops) + target_link_options_shared_lib(quantized_ops_lib) + if(TARGET pthreadpool) set(LLAMA_JNI_SRCS jni/jni_layer_llama.cpp ../../backends/xnnpack/threadpool/cpuinfo_utils.cpp) else() @@ -83,7 +85,7 @@ if(EXECUTORCH_BUILD_LLAMA_JNI) endif() target_include_directories(executorch_llama_jni PRIVATE ${_common_include_directories}) target_link_libraries(executorch_llama_jni ${link_libraries} llama_runner - custom_ops cpublas eigen_blas) + custom_ops cpublas eigen_blas quantized_kernels quantized_ops_lib) target_compile_options(executorch_llama_jni PUBLIC ${_common_compile_options}) if(EXECUTORCH_USE_TIKTOKEN) set(ABSL_ENABLE_INSTALL ON)