mirror of
https://github.com/ollama/ollama.git
synced 2026-04-23 01:05:47 +02:00
Remove the vendored GGML and llama.cpp backend, CGO runner, Go model implementations, and sample. llama-server (built from upstream llama.cpp via FetchContent) is now the sole inference engine for GGUF-based models. (Safetensor based models continue to run on the new MLX engine.) This allows us to more rapidly pick up new capabilities and fixes from llama.cpp as they come out. On windows this now requires recent AMD driver versions to support ROCm v7 as llama.cpp currently does not support building against v6.
301 lines
14 KiB
YAML
301 lines
14 KiB
YAML
name: test
|
|
|
|
concurrency:
|
|
# For PRs, later CI runs preempt previous ones. e.g. a force push on a PR
|
|
# cancels running CI jobs and starts all new ones.
|
|
#
|
|
# For non-PR pushes, concurrency.group needs to be unique for every distinct
|
|
# CI run we want to have happen. Use run_id, which in practice means all
|
|
# non-PR CI runs will be allowed to run without preempting each other.
|
|
group: ${{ github.workflow }}-$${{ github.pull_request.number || github.run_id }}
|
|
cancel-in-progress: true
|
|
|
|
on:
|
|
pull_request:
|
|
paths:
|
|
- '**/*'
|
|
- '!docs/**'
|
|
- '!README.md'
|
|
|
|
jobs:
|
|
changes:
|
|
runs-on: ubuntu-latest
|
|
outputs:
|
|
changed: ${{ steps.changes.outputs.changed }}
|
|
vendorsha: ${{ steps.changes.outputs.vendorsha }}
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
with:
|
|
fetch-depth: 0
|
|
- id: changes
|
|
run: |
|
|
changed() {
|
|
local BASE=${{ github.event.pull_request.base.sha }}
|
|
local HEAD=${{ github.event.pull_request.head.sha }}
|
|
local MERGE_BASE=$(git merge-base $BASE $HEAD)
|
|
git diff-tree -r --no-commit-id --name-only "$MERGE_BASE" "$HEAD" \
|
|
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
|
}
|
|
|
|
echo changed=$(changed 'llama/server/**/*' 'LLAMA_CPP_VERSION' '.github/**/*') | tee -a $GITHUB_OUTPUT
|
|
echo vendorsha=$(cat LLAMA_CPP_VERSION) | tee -a $GITHUB_OUTPUT
|
|
|
|
linux:
|
|
needs: [changes]
|
|
if: needs.changes.outputs.changed == 'True'
|
|
strategy:
|
|
matrix:
|
|
include:
|
|
- preset: CPU
|
|
- preset: CUDA
|
|
container: nvidia/cuda:13.0.0-devel-ubuntu22.04
|
|
flags: '-DCMAKE_CUDA_ARCHITECTURES=87'
|
|
- preset: ROCm
|
|
container: rocm/dev-ubuntu-22.04:7.2.1
|
|
extra-packages: rocm-libs
|
|
flags: '-DAMDGPU_TARGETS=gfx1010 -DCMAKE_PREFIX_PATH=/opt/rocm'
|
|
- preset: Vulkan
|
|
container: ubuntu:22.04
|
|
extra-packages: >
|
|
mesa-vulkan-drivers vulkan-tools
|
|
libvulkan1 libvulkan-dev
|
|
vulkan-sdk cmake ccache g++ make
|
|
- preset: 'MLX CUDA 13'
|
|
container: nvidia/cuda:13.0.0-devel-ubuntu22.04
|
|
extra-packages: libcudnn9-dev-cuda-13 libopenblas-dev liblapack-dev liblapacke-dev git curl
|
|
flags: '-DCMAKE_CUDA_ARCHITECTURES=87 -DBLAS_INCLUDE_DIRS=/usr/include/x86_64-linux-gnu -DLAPACK_INCLUDE_DIRS=/usr/include/x86_64-linux-gnu'
|
|
install-go: true
|
|
runs-on: linux
|
|
container: ${{ matrix.container }}
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
- run: |
|
|
[ -n "${{ matrix.container }}" ] || sudo=sudo
|
|
$sudo apt-get update
|
|
# Add LunarG Vulkan SDK apt repo for Ubuntu 22.04
|
|
if [ "${{ matrix.preset }}" = "Vulkan" ]; then
|
|
$sudo apt-get install -y --no-install-recommends wget gnupg ca-certificates software-properties-common
|
|
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | $sudo gpg --dearmor -o /usr/share/keyrings/lunarg-archive-keyring.gpg
|
|
# Use signed-by to bind the repo to the installed keyring to avoid NO_PUBKEY
|
|
echo "deb [signed-by=/usr/share/keyrings/lunarg-archive-keyring.gpg] https://packages.lunarg.com/vulkan/1.4.313 jammy main" | $sudo tee /etc/apt/sources.list.d/lunarg-vulkan-1.4.313-jammy.list > /dev/null
|
|
$sudo apt-get update
|
|
fi
|
|
$sudo apt-get install -y cmake ccache curl ${{ matrix.extra-packages }}
|
|
# llama-server requires CMake 3.24+, while Ubuntu 22.04 ships 3.22.
|
|
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.31.2/cmake-3.31.2-linux-$(uname -m).tar.gz | $sudo tar xz -C /usr/local --strip-components 1
|
|
# Export VULKAN_SDK if provided by LunarG package (defensive)
|
|
if [ -d "/usr/lib/x86_64-linux-gnu/vulkan" ] && [ "${{ matrix.preset }}" = "Vulkan" ]; then
|
|
echo "VULKAN_SDK=/usr" >> $GITHUB_ENV
|
|
fi
|
|
env:
|
|
DEBIAN_FRONTEND: noninteractive
|
|
- if: matrix.install-go
|
|
name: Install Go
|
|
run: |
|
|
GO_VERSION=$(awk '/^go / { print $2 }' go.mod)
|
|
curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-$(dpkg --print-architecture).tar.gz" | tar xz -C /usr/local
|
|
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
|
- uses: actions/cache@v4
|
|
with:
|
|
path: /github/home/.cache/ccache
|
|
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}-${{ needs.changes.outputs.vendorsha }}
|
|
- name: Build llama-server
|
|
run: |
|
|
cmake -S llama/server --preset cpu ${{ matrix.flags }}
|
|
cmake --build build/llama-server-cpu -- -l $(nproc)
|
|
|
|
windows:
|
|
needs: [changes]
|
|
if: needs.changes.outputs.changed == 'True'
|
|
strategy:
|
|
matrix:
|
|
include:
|
|
- preset: CPU
|
|
- preset: CUDA
|
|
install: https://developer.download.nvidia.com/compute/cuda/13.0.0/local_installers/cuda_13.0.0_windows.exe
|
|
flags: '-DCMAKE_CUDA_ARCHITECTURES=80'
|
|
cuda-components:
|
|
- '"cudart"'
|
|
- '"nvcc"'
|
|
- '"cublas"'
|
|
- '"cublas_dev"'
|
|
- '"crt"'
|
|
- '"nvvm"'
|
|
- '"nvptxcompiler"'
|
|
cuda-version: '13.0'
|
|
- preset: ROCm
|
|
install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-26.Q1-Win11-For-HIP.exe
|
|
rocm-version: '7.1'
|
|
flags: '-DAMDGPU_TARGETS=gfx1010 -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" -DCMAKE_CXX_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma"'
|
|
- preset: Vulkan
|
|
install: https://sdk.lunarg.com/sdk/download/1.4.321.1/windows/vulkansdk-windows-X64-1.4.321.1.exe
|
|
- preset: 'MLX CUDA 13'
|
|
install: https://developer.download.nvidia.com/compute/cuda/13.0.0/local_installers/cuda_13.0.0_windows.exe
|
|
cudnn-install: https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/windows-x86_64/cudnn-windows-x86_64-9.18.1.3_cuda13-archive.zip
|
|
flags: '-DCMAKE_CUDA_ARCHITECTURES=80'
|
|
cuda-components:
|
|
- '"cudart"'
|
|
- '"nvcc"'
|
|
- '"cublas"'
|
|
- '"cublas_dev"'
|
|
- '"cufft"'
|
|
- '"cufft_dev"'
|
|
- '"nvrtc"'
|
|
- '"nvrtc_dev"'
|
|
- '"crt"'
|
|
- '"nvvm"'
|
|
- '"nvptxcompiler"'
|
|
cuda-version: '13.0'
|
|
runs-on: windows
|
|
steps:
|
|
- run: |
|
|
choco install -y --no-progress ccache ninja
|
|
if (Get-Command ccache -ErrorAction SilentlyContinue) {
|
|
ccache -o cache_dir=${{ github.workspace }}\.ccache
|
|
}
|
|
- if: matrix.preset == 'CUDA' || matrix.preset == 'ROCm' || matrix.preset == 'Vulkan' || matrix.preset == 'MLX CUDA 13'
|
|
id: cache-install
|
|
uses: actions/cache/restore@v4
|
|
with:
|
|
path: |
|
|
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA
|
|
C:\Program Files\AMD\ROCm
|
|
C:\VulkanSDK
|
|
C:\Program Files\NVIDIA\CUDNN
|
|
key: ${{ matrix.install }}-${{ matrix.cudnn-install }}
|
|
- if: matrix.preset == 'CUDA' || matrix.preset == 'MLX CUDA 13'
|
|
name: Install CUDA ${{ matrix.cuda-version }}
|
|
run: |
|
|
$ErrorActionPreference = "Stop"
|
|
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
|
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
|
$subpackages = @(${{ join(matrix.cuda-components, ', ') }}) | Foreach-Object {"${_}_${{ matrix.cuda-version }}"}
|
|
Start-Process -FilePath .\install.exe -ArgumentList (@("-s") + $subpackages) -NoNewWindow -Wait
|
|
}
|
|
|
|
$cudaPath = (Resolve-Path "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*").path
|
|
echo "$cudaPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
|
- if: matrix.preset == 'ROCm'
|
|
name: Install ROCm ${{ matrix.rocm-version }}
|
|
run: |
|
|
$ErrorActionPreference = "Stop"
|
|
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
|
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
|
Start-Process -FilePath .\install.exe -ArgumentList '-install' -NoNewWindow -Wait
|
|
}
|
|
|
|
$hipPath = (Resolve-Path "C:\Program Files\AMD\ROCm\*").path
|
|
echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
|
echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "HIPCXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "HIP_PLATFORM=amd" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "CMAKE_PREFIX_PATH=$hipPath" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
- if: matrix.preset == 'Vulkan'
|
|
name: Install Vulkan ${{ matrix.rocm-version }}
|
|
run: |
|
|
$ErrorActionPreference = "Stop"
|
|
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
|
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
|
Start-Process -FilePath .\install.exe -ArgumentList "-c","--am","--al","in" -NoNewWindow -Wait
|
|
}
|
|
|
|
$vulkanPath = (Resolve-Path "C:\VulkanSDK\*").path
|
|
echo "$vulkanPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
|
echo "VULKAN_SDK=$vulkanPath" >> $env:GITHUB_ENV
|
|
- if: matrix.preset == 'MLX CUDA 13'
|
|
name: Install cuDNN for MLX
|
|
run: |
|
|
$ErrorActionPreference = "Stop"
|
|
$cudnnRoot = "C:\Program Files\NVIDIA\CUDNN"
|
|
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
|
Invoke-WebRequest -Uri "${{ matrix.cudnn-install }}" -OutFile "cudnn.zip"
|
|
Expand-Archive -Path cudnn.zip -DestinationPath cudnn-extracted
|
|
$cudnnDir = (Get-ChildItem -Path cudnn-extracted -Directory)[0].FullName
|
|
New-Item -ItemType Directory -Force -Path $cudnnRoot
|
|
Copy-Item -Path "$cudnnDir\*" -Destination "$cudnnRoot\" -Recurse
|
|
}
|
|
|
|
echo "CUDNN_ROOT_DIR=$cudnnRoot" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "CUDNN_INCLUDE_PATH=$cudnnRoot\include" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "CUDNN_LIBRARY_PATH=$cudnnRoot\lib\x64" | Out-File -FilePath $env:GITHUB_ENV -Append
|
|
echo "$cudnnRoot\bin\x64" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
|
- if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }}
|
|
uses: actions/cache/save@v4
|
|
with:
|
|
path: |
|
|
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA
|
|
C:\Program Files\AMD\ROCm
|
|
C:\VulkanSDK
|
|
C:\Program Files\NVIDIA\CUDNN
|
|
key: ${{ matrix.install }}-${{ matrix.cudnn-install }}
|
|
- uses: actions/checkout@v4
|
|
- uses: actions/cache@v4
|
|
with:
|
|
path: ${{ github.workspace }}\.ccache
|
|
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}-${{ needs.changes.outputs.vendorsha }}
|
|
- name: Build llama-server
|
|
if: matrix.preset != 'MLX CUDA 13'
|
|
run: |
|
|
Import-Module 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
|
Enter-VsDevShell -VsInstallPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
|
cmake -S llama\server --preset cpu ${{ matrix.flags }}
|
|
cmake --build build\llama-server-cpu -- -l $([Environment]::ProcessorCount)
|
|
env:
|
|
CMAKE_GENERATOR: Ninja
|
|
- name: Build MLX
|
|
if: matrix.preset == 'MLX CUDA 13'
|
|
run: |
|
|
Import-Module 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
|
Enter-VsDevShell -VsInstallPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
|
cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }}
|
|
cmake --build --preset "${{ matrix.preset }}" -- -l $([Environment]::ProcessorCount)
|
|
env:
|
|
CMAKE_GENERATOR: Ninja
|
|
|
|
go_mod_tidy:
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
- name: check that 'go mod tidy' is clean
|
|
run: go mod tidy --diff || (echo "Please run 'go mod tidy'." && exit 1)
|
|
|
|
test:
|
|
strategy:
|
|
matrix:
|
|
os: [ubuntu-latest, macos-latest, windows-latest]
|
|
runs-on: ${{ matrix.os }}
|
|
env:
|
|
CGO_ENABLED: '1'
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
- uses: actions/setup-go@v5
|
|
with:
|
|
go-version-file: 'go.mod'
|
|
cache-dependency-path: |
|
|
go.sum
|
|
LLAMA_CPP_VERSION
|
|
- uses: actions/setup-node@v4
|
|
with:
|
|
node-version: '20'
|
|
- name: Install UI dependencies
|
|
working-directory: ./app/ui/app
|
|
run: npm ci
|
|
- name: Install tscriptify
|
|
run: |
|
|
go install github.com/tkrajina/typescriptify-golang-structs/tscriptify@latest
|
|
- name: Run UI tests
|
|
if: ${{ startsWith(matrix.os, 'ubuntu') }}
|
|
working-directory: ./app/ui/app
|
|
run: npm test
|
|
- name: Run go generate
|
|
run: go generate ./...
|
|
|
|
- name: go test
|
|
if: always()
|
|
run: go test -count=1 -benchtime=1x ./...
|
|
|
|
- uses: golangci/golangci-lint-action@v9
|
|
with:
|
|
only-new-issues: true
|