llama.cpp
手动编译
原始库
# 拉取llama.cpp仓库
git clone https://github.com/ggml-org/llama.cpp.git
# 安装依赖
# 从NV官网下载cuda toolkit,并且安装
# 配置环境变量
export PATH=/usr/local/cuda/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
# 然后执行
sudo ldconfig
# 安装cURL开发库
sudo apt-get update && sudo apt-get install -y libcurl4-openssl-dev
# 开始编译
cmake -B build -DGGML_CUDA=ON
cmake --build build --config Release -j6SeedVR-GGUF
# 切分支到 tags/b3962
git checkout tags/b3962
# 然后打上从上面仓库中拿到的补丁
git apply lcpp-seedvr.patch
# 然后重新编译
rm -rf build
cmake -B build -DGGML_CUDA=ON
cmake --build build --config Release -j6