mirror of
https://github.com/ollama/ollama.git
synced 2026-04-21 16:25:42 +02:00
113 lines
3.8 KiB
Plaintext
113 lines
3.8 KiB
Plaintext
export GOOS?=$(shell go env GOOS)
|
|
export GOARCH?=$(shell go env GOARCH)
|
|
|
|
build: llama/build/$(GOOS)-$(GOARCH)
|
|
|
|
export GOFLAGS=-trimpath
|
|
|
|
llama/build/%/runners/metal: GOFLAGS+=-tags=metal
|
|
llama/build/%/runners/cpu_avx: GOFLAGS+=-tags=avx
|
|
llama/build/%/runners/cpu_avx2: GOFLAGS+=-tags=avx2
|
|
llama/build/%/runners/cuda_v11: GOFLAGS+=-tags=cuda,cuda_v11
|
|
llama/build/%/runners/cuda_v12: GOFLAGS+=-tags=cuda,cuda_v12
|
|
llama/build/%/runners/rocm: GOFLAGS+=-tags=cuda,rocm
|
|
|
|
.PHONY: llama/build/darwin-amd64 llama/build/darwin-arm64
|
|
llama/build/darwin-amd64: llama/build/darwin-amd64/runners/cpu_avx
|
|
llama/build/darwin-arm64: llama/build/darwin-arm64/runners/metal
|
|
|
|
.PHONY: llama/build/linux-amd64 llama/build/linux-arm64
|
|
llama/build/linux-amd64: llama/build/linux-amd64/runners/cpu_avx
|
|
llama/build/linux-amd64: llama/build/linux-amd64/runners/cpu_avx2
|
|
llama/build/linux-arm64: llama/build/linux-arm64/runners/cpu_avx
|
|
llama/build/linux-arm64: llama/build/linux-arm64/runners/cpu_avx2
|
|
|
|
.PHONY: llama/build/windows-amd64 linux/build/windows-arm64
|
|
llama/build/windows-amd64: llama/build/windows-amd64/runners/cpu_avx
|
|
llama/build/windows-amd64: llama/build/windows-amd64/runners/cpu_avx2
|
|
llama/build/windows-arm64: llama/build/windows-arm64/runners/cpu_avx
|
|
llama/build/windows-arm64: llama/build/windows-arm64/runners/cpu_avx2
|
|
|
|
.PHONY: cuda_v11 cuda_v12
|
|
cuda_v11 cuda_v12 rocm:
|
|
$(MAKE) -C ml/backend/ggml/ggml/ggml-cuda $@
|
|
|
|
ifeq ($(GOOS),linux)
|
|
NVCC11=$(shell command -v /usr/local/cuda-11/bin/nvcc)
|
|
NVCC12=$(shell command -v /usr/local/cuda-12/bin/nvcc)
|
|
HIPCC=$(shell command -v hipcc)
|
|
else ifeq ($(GOOS),windows)
|
|
NVCC11=$(shell ls "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.?\\bin\\nvcc.exe")
|
|
NVCC12=$(shell ls "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v12.?\\bin\\nvcc.exe")
|
|
HIPCC=$(shell command -v hipcc)
|
|
endif
|
|
|
|
ifneq ($(NVCC11),)
|
|
ifeq ($(OLLAMA_SKIP_GENERATE_CUDA_11),)
|
|
CUDA_V11_TARGETS= \
|
|
llama/build/linux-amd64/runners/cuda_v11 \
|
|
llama/build/linux-arm64/runners/cuda_v11 \
|
|
llama/build/windows-amd64/runners/cuda_v11 \
|
|
llama/build/windows-arm64/runners/cuda_v11 \
|
|
|
|
$(CUDA_V11_TARGETS): cuda_v11
|
|
cuda_v11: export NVCC=$(NVCC11)
|
|
|
|
llama/build/linux-amd64: llama/build/linux-amd64/runners/cuda_v11
|
|
llama/build/linux-arm64: llama/build/linux-arm64/runners/cuda_v11
|
|
llama/build/windows-amd64: llama/build/windows-amd64/runners/cuda_v11
|
|
llama/build/windows-arm64: llama/build/windows-arm64/runners/cuda_v11
|
|
endif
|
|
endif
|
|
|
|
ifneq ($(NVCC12),)
|
|
ifeq ($(OLLAMA_SKIP_GENERATE_CUDA_12),)
|
|
CUDA_V12_TARGETS= \
|
|
llama/build/linux-amd64/runners/cuda_v12 \
|
|
llama/build/linux-arm64/runners/cuda_v12 \
|
|
llama/build/windows-amd64/runners/cuda_v12 \
|
|
llama/build/windows-arm64/runners/cuda_v12 \
|
|
|
|
$(CUDA_V12_TARGETS): cuda_v12
|
|
cuda_v12: export NVCC=$(NVCC12)
|
|
|
|
llama/build/linux-amd64: llama/build/linux-amd64/runners/cuda_v12
|
|
llama/build/linux-arm64: llama/build/linux-arm64/runners/cuda_v12
|
|
llama/build/windows-amd64: llama/build/windows-amd64/runners/cuda_v12
|
|
llama/build/windows-arm64: llama/build/windows-arm64/runners/cuda_v12
|
|
endif
|
|
endif
|
|
|
|
ifneq ($(HIPCC),)
|
|
ifeq ($(OLLAMA_SKIP_GENERATE_ROCM),)
|
|
ROCM_TARGETS= \
|
|
llama/build/linux-amd64/runners/rocm \
|
|
llama/build/linux-arm64/runners/rocm \
|
|
llama/build/windows-amd64/runners/rocm \
|
|
llama/build/windows-arm64/runners/rocm \
|
|
|
|
$(ROCM_TARGETS): rocm
|
|
rocm: export NVCC=$(HIPCC)
|
|
|
|
llama/build/linux-amd64: llama/build/linux-amd64/runners/rocm
|
|
llama/build/linux-arm64: llama/build/linux-arm64/runners/rocm
|
|
llama/build/windows-amd64: llama/build/windows-amd64/runners/rocm
|
|
llama/build/windows-arm64: llama/build/windows-arm64/runners/rocm
|
|
endif
|
|
endif
|
|
|
|
export CGO_ENABLED=1
|
|
export CGO_CPPFLAGS_ALLOW=-mfma|-mf16c
|
|
|
|
llama/build/%: cmd/runner always
|
|
mkdir -p $@; go build -o $@ ./$<
|
|
|
|
.PHONY: always
|
|
always:
|
|
|
|
clean:
|
|
$(RM) -r llama/build
|
|
|
|
realclean: clean
|
|
$(MAKE) -C ml/backend/ggml/ggml/ggml-cuda $<
|