mirror of
https://github.com/bytecodealliance/wasm-micro-runtime.git
synced 2025-05-08 12:46:14 +00:00

Minimum support: - [x] accept (WasmEdge) customized model parameters. metadata. - [x] Target [wasmedge-ggml examples](https://github.com/second-state/WasmEdge-WASINN-examples/tree/master/wasmedge-ggml) - [x] basic - [x] chatml - [x] gemma - [x] llama - [x] qwen --- In the future, to support if required: - [ ] Target [wasmedge-ggml examples](https://github.com/second-state/WasmEdge-WASINN-examples/tree/master/wasmedge-ggml) - [ ] command-r. (>70G memory requirement) - [ ] embedding. (embedding mode) - [ ] grammar. (use the grammar option to constrain the model to generate the JSON output) - [ ] llama-stream. (new APIS `compute_single`, `get_output_single`, `fini_single`) - [ ] llava. (image representation) - [ ] llava-base64-stream. (image representation) - [ ] multimodel. (image representation) - [ ] Target [llamaedge](https://github.com/LlamaEdge/LlamaEdge)
19 lines
483 B
CMake
19 lines
483 B
CMake
# Copyright (C) 2019 Intel Corporation. All rights reserved.
|
|
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
include(FetchContent)
|
|
|
|
set(LLAMA_SOURCE_DIR "${WAMR_ROOT_DIR}/core/deps/llama.cpp")
|
|
|
|
FetchContent_Declare(
|
|
llamacpp
|
|
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
|
|
GIT_TAG b3573
|
|
SOURCE_DIR ${LLAMA_SOURCE_DIR}
|
|
)
|
|
|
|
set(LLAMA_BUILD_TESTS OFF)
|
|
set(LLAMA_BUILD_EXAMPLES OFF)
|
|
set(LLAMA_BUILD_SERVER OFF)
|
|
FetchContent_MakeAvailable(llamacpp)
|