[COPY] --- T2-COPYRIGHT-BEGIN --- [COPY] t2/package/*/llama-cpp/llama-cpp.desc [COPY] Copyright (C) 2025 The T2 SDE Project [COPY] SPDX-License-Identifier: GPL-2.0 [COPY] --- T2-COPYRIGHT-END --- [I] LLM inference in C/C++ [T] The main goal of llama.cpp is to enable LLM inference with minimal setup [T] and state-of-the-art performance on a wide range of hardware - locally and [T] in the cloud. [U] https://github.com/ggerganov/llama.cpp [A] llama-cpp Authors [M] The T2 Project [C] extra/development [V] b4589 [L] MIT [S] Stable [P] X -----5---9 700.000 [O] prefix=opt/llama-cpp [O] var_append cmakeopt " " "-DLLAMA_CURL=ON -DLLAMA_NATIVE=ON" [O] hook_add postmake 5 "cmake $cmakeopt" [O] hook_add postmake 5 "$MAKE $makeinstopt" [O] hook_add postmake 5 "cp -rvf {models,scripts,prompts,examples,docs} $root/$prefix" [D] 7e9a31e7d3940b05129ad2c38d490cf94d2cbf250fb2ca2168af7b49 llama-cpp-b4589.tar.gz git+https://github.com/ggerganov/llama.cpp.git b4589