mirror of
https://github.com/ollama/ollama.git
synced 2026-04-23 09:15:44 +02:00
Remove the vendored GGML and llama.cpp backend, CGO runner, Go model implementations, and sample. llama-server (built from upstream llama.cpp via FetchContent) is now the sole inference engine for GGUF-based models. (Safetensor based models continue to run on the new MLX engine.) This allows us to more rapidly pick up new capabilities and fixes from llama.cpp as they come out. On windows this now requires recent AMD driver versions to support ROCm v7 as llama.cpp currently does not support building against v6.
60 lines
1.3 KiB
JSON
60 lines
1.3 KiB
JSON
{
|
|
"version": 3,
|
|
"configurePresets": [
|
|
{
|
|
"name": "Default",
|
|
"binaryDir": "${sourceDir}/build",
|
|
"installDir": "${sourceDir}/dist",
|
|
"cacheVariables": {
|
|
"CMAKE_BUILD_TYPE": "Release",
|
|
"CMAKE_MSVC_RUNTIME_LIBRARY": "MultiThreaded"
|
|
}
|
|
},
|
|
{
|
|
"name": "MLX",
|
|
"inherits": [ "Default" ],
|
|
"cacheVariables": {
|
|
"MLX_ENGINE": "ON",
|
|
"OLLAMA_RUNNER_DIR": "mlx"
|
|
}
|
|
},
|
|
{
|
|
"name": "MLX CUDA 12",
|
|
"inherits": [ "MLX" ],
|
|
"cacheVariables": {
|
|
"OLLAMA_RUNNER_DIR": "mlx_cuda_v12"
|
|
}
|
|
},
|
|
{
|
|
"name": "MLX CUDA 13",
|
|
"inherits": [ "MLX" ],
|
|
"cacheVariables": {
|
|
"MLX_CUDA_ARCHITECTURES": "86;89;90;90a;100;103;75-virtual;80-virtual;110-virtual;120-virtual;121-virtual",
|
|
"OLLAMA_RUNNER_DIR": "mlx_cuda_v13"
|
|
}
|
|
}
|
|
],
|
|
"buildPresets": [
|
|
{
|
|
"name": "Default",
|
|
"configurePreset": "Default",
|
|
"configuration": "Release"
|
|
},
|
|
{
|
|
"name": "MLX",
|
|
"targets": [ "mlx", "mlxc" ],
|
|
"configurePreset": "MLX"
|
|
},
|
|
{
|
|
"name": "MLX CUDA 12",
|
|
"targets": [ "mlx", "mlxc" ],
|
|
"configurePreset": "MLX CUDA 12"
|
|
},
|
|
{
|
|
"name": "MLX CUDA 13",
|
|
"targets": [ "mlx", "mlxc" ],
|
|
"configurePreset": "MLX CUDA 13"
|
|
}
|
|
]
|
|
}
|