mirror of
https://github.com/ollama/ollama.git
synced 2026-04-26 18:55:53 +02:00
* readme: add Ellama to list of community integrations (#9800) * readme: add screenpipe to community integrations (#9786) * Add support for ROCm gfx1151 (#9773) * conditionally enable parallel pipelines * sample: make mutations in transforms explicit (#9743) * updated minP to use early exit making use of sorted tokens * ml/backend/ggml: allocate memory with malloc when loading model (#9822) * runner: remove cache prompt flag from ollama runner (#9826) We do not need to bypass the prompt caching in the ollama runner yet, as only embedding models needed to bypass the prompt caching. When embedding models are implemented they can skip initializing this cache completely. * ollamarunner: Check for minBatch of context space when shifting Models can specify that a group of inputs need to be handled a single batch. However, context shifting didn't respect this and could trigger a break anyways. In this case, we should instead trigger a context shift earlier so that it occurs before the grouped batch. Note that there still some corner cases: - A long prompt that exceeds the context window can get truncated in the middle of an image. With the current models, this will result in the model not recognizing the image at all, which is pretty much the expected result with truncation. - The context window is set less than the minimum batch size. The only solution to this is to refuse to load the model with these settings. However, this can never occur with current models and default settings. Since users are unlikely to run into these scenarios, fixing them is left as a follow up. * Applied latest patches from McBane87 See this for details: https://github.com/whyvl/ollama-vulkan/issues/7#issuecomment-2708820861 Signed-off-by: Vadim Grinco <vadim@grinco.eu> * Add ability to enable flash attention on vulkan (#4) * discover: add flash attention handling for vulkan * envconfig: fix typo in config.go As part of the process some code was refactored and I added a new field FlashAttention to GpuInfo since the previous solution didn't allow for a granular check via vulkan extensions. As a side effect, this now allows for granular per-device FA support checking in other places --------- Signed-off-by: Vadim Grinco <vadim@grinco.eu> Co-authored-by: zeo <108888572+zeozeozeo@users.noreply.github.com> Co-authored-by: Louis Beaumont <louis.beaumont@gmail.com> Co-authored-by: Daniel Hiltgen <dhiltgen@users.noreply.github.com> Co-authored-by: Michael Yang <mxyng@pm.me> Co-authored-by: Parth Sareen <parth.sareen@ollama.com> Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com> Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com> Co-authored-by: Jesse Gross <jesse@ollama.com> Co-authored-by: Nikita <50599445+nasrally@users.noreply.github.com>
188 lines
5.0 KiB
Go
188 lines
5.0 KiB
Go
package discover
|
|
|
|
import (
|
|
"fmt"
|
|
"log/slog"
|
|
|
|
"github.com/ollama/ollama/format"
|
|
)
|
|
|
|
type memInfo struct {
|
|
TotalMemory uint64 `json:"total_memory,omitempty"`
|
|
FreeMemory uint64 `json:"free_memory,omitempty"`
|
|
FreeSwap uint64 `json:"free_swap,omitempty"` // TODO split this out for system only
|
|
}
|
|
|
|
// Beginning of an `ollama info` command
|
|
type GpuInfo struct { // TODO better name maybe "InferenceProcessor"?
|
|
memInfo
|
|
Library string `json:"library,omitempty"`
|
|
|
|
// Optional variant to select (e.g. versions, cpu feature flags)
|
|
Variant string `json:"variant"`
|
|
|
|
// MinimumMemory represents the minimum memory required to use the GPU
|
|
MinimumMemory uint64 `json:"-"`
|
|
|
|
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
|
DependencyPath []string `json:"lib_path,omitempty"`
|
|
|
|
// Extra environment variables specific to the GPU as list of [key,value]
|
|
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
|
|
|
// Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
|
|
// the FreeMemory is best effort, and may over or under report actual memory usage
|
|
// False indicates FreeMemory can generally be trusted on this GPU
|
|
UnreliableFreeMemory bool
|
|
|
|
// GPU information
|
|
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
|
Name string `json:"name"` // user friendly name if available
|
|
Compute string `json:"compute"` // Compute Capability or gfx
|
|
FlashAttention bool `json:"flash_attention"` // is flash attention supported
|
|
|
|
// Driver Information - TODO no need to put this on each GPU
|
|
DriverMajor int `json:"driver_major,omitempty"`
|
|
DriverMinor int `json:"driver_minor,omitempty"`
|
|
|
|
// TODO other performance capability info to help in scheduling decisions
|
|
}
|
|
|
|
func (gpu GpuInfo) RunnerName() string {
|
|
if gpu.Variant != "" {
|
|
return gpu.Library + "_" + gpu.Variant
|
|
}
|
|
return gpu.Library
|
|
}
|
|
|
|
type CPUInfo struct {
|
|
GpuInfo
|
|
CPUs []CPU
|
|
}
|
|
|
|
// CPU type represents a CPU Package occupying a socket
|
|
type CPU struct {
|
|
ID string `cpuinfo:"processor"`
|
|
VendorID string `cpuinfo:"vendor_id"`
|
|
ModelName string `cpuinfo:"model name"`
|
|
CoreCount int
|
|
EfficiencyCoreCount int // Performance = CoreCount - Efficiency
|
|
ThreadCount int
|
|
}
|
|
|
|
type CudaGPUInfo struct {
|
|
GpuInfo
|
|
OSOverhead uint64 // Memory overhead between the driver library and management library
|
|
index int //nolint:unused,nolintlint
|
|
computeMajor int //nolint:unused,nolintlint
|
|
computeMinor int //nolint:unused,nolintlint
|
|
}
|
|
type CudaGPUInfoList []CudaGPUInfo
|
|
|
|
type RocmGPUInfo struct {
|
|
GpuInfo
|
|
usedFilepath string //nolint:unused,nolintlint
|
|
index int //nolint:unused,nolintlint
|
|
}
|
|
type RocmGPUInfoList []RocmGPUInfo
|
|
|
|
type OneapiGPUInfo struct {
|
|
GpuInfo
|
|
driverIndex int //nolint:unused,nolintlint
|
|
gpuIndex int //nolint:unused,nolintlint
|
|
}
|
|
type OneapiGPUInfoList []OneapiGPUInfo
|
|
|
|
type VulkanGPUInfo struct {
|
|
GpuInfo
|
|
index int
|
|
}
|
|
|
|
type VulkanGPUInfoList []VulkanGPUInfo
|
|
|
|
type GpuInfoList []GpuInfo
|
|
|
|
type UnsupportedGPUInfo struct {
|
|
GpuInfo
|
|
Reason string `json:"reason"`
|
|
}
|
|
|
|
// Split up the set of gpu info's by Library and variant
|
|
func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
|
resp := []GpuInfoList{}
|
|
libs := []string{}
|
|
for _, info := range l {
|
|
found := false
|
|
requested := info.Library
|
|
if info.Variant != "" {
|
|
requested += "_" + info.Variant
|
|
}
|
|
for i, lib := range libs {
|
|
if lib == requested {
|
|
resp[i] = append(resp[i], info)
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
libs = append(libs, requested)
|
|
resp = append(resp, []GpuInfo{info})
|
|
}
|
|
}
|
|
return resp
|
|
}
|
|
|
|
// Report the GPU information into the log an Info level
|
|
func (l GpuInfoList) LogDetails() {
|
|
for _, g := range l {
|
|
slog.Info("inference compute",
|
|
"id", g.ID,
|
|
"library", g.Library,
|
|
"variant", g.Variant,
|
|
"compute", g.Compute,
|
|
"driver", fmt.Sprintf("%d.%d", g.DriverMajor, g.DriverMinor),
|
|
"name", g.Name,
|
|
"total", format.HumanBytes2(g.TotalMemory),
|
|
"available", format.HumanBytes2(g.FreeMemory),
|
|
)
|
|
}
|
|
}
|
|
|
|
// Sort by Free Space
|
|
type ByFreeMemory []GpuInfo
|
|
|
|
func (a ByFreeMemory) Len() int { return len(a) }
|
|
func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
|
|
|
|
type SystemInfo struct {
|
|
System CPUInfo `json:"system"`
|
|
GPUs []GpuInfo `json:"gpus"`
|
|
UnsupportedGPUs []UnsupportedGPUInfo `json:"unsupported_gpus"`
|
|
DiscoveryErrors []string `json:"discovery_errors"`
|
|
}
|
|
|
|
// Return the optimal number of threads to use for inference
|
|
func (si SystemInfo) GetOptimalThreadCount() int {
|
|
if len(si.System.CPUs) == 0 {
|
|
return 0
|
|
}
|
|
|
|
coreCount := 0
|
|
for _, c := range si.System.CPUs {
|
|
coreCount += c.CoreCount - c.EfficiencyCoreCount
|
|
}
|
|
|
|
return coreCount
|
|
}
|
|
|
|
// For each GPU, check if it does NOT support flash attention
|
|
func (l GpuInfoList) FlashAttentionSupported() bool {
|
|
for _, gpu := range l {
|
|
if !gpu.FlashAttention {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|