mirror of
https://github.com/ollama/ollama.git
synced 2026-04-17 21:54:08 +02:00
* bench: add prompt calibration, context size flag, and NumCtx reporting Add --num-ctx flag to set context size, and report NumCtx in model info header. Calibrate tokens-per-word ratio during warmup using actual tokenization metrics from the model, replacing the fixed 1.3 heuristic. This produces more accurate prompt token counts for --prompt-tokens. Also add fetchContextLength() to query running model context via /api/ps. * integration: improve vision test robustness and add thinking tests Add skipIfNoVisionOverride() to skip vision tests when OLLAMA_TEST_MODEL is set to a non-vision model. Add Think:false to context exhaustion test to prevent thinking models from using all context before the test can measure it. Add third test image (ollama homepage) and replace OCR test with ImageDescription test using it. Relax match strings for broader model compatibility. Add TestThinkingEnabled and TestThinkingSuppressed to verify thinking output and channel tag handling. * gemma4: add Gemma 4 GGML model support Add full Gemma 4 model family support (E2B, E4B, 26B MoE, 31B Dense) for the GGML backend including text, vision, converter, parser, and renderer. Text model features: - Sliding window + full attention with per-layer patterns - KV sharing across layers with donor map - Per-layer embeddings (PLE) with learned projections - MoE routing with RMSNorm + learned scale - Proportional RoPE with freq_factors for global attention - Final logit softcapping Vision model features: - SigLIP vision encoder with 2D RoPE - ClippableLinear with input/output clamping via packed v.clamp_data - Adaptive average pooling with nMerge kernel - Multi-modal projection with unweighted RMSNorm Converter: - Safetensors to GGUF with vision tensor renaming - Fused MoE gate_up_proj splitting - Vision patch embedding reshape (HF to Conv2D layout) - Packed clamp data tensor for ClippableLinear bounds - Proportional RoPE freq_factors generation Also includes: - BackendGet() on ml.Tensor for reading weight tensor data - Q6_K CUDA get_rows kernel support - MoE-aware ffn_down quantization layer counting - Gemma4 parser with tool calling and thinking support - Gemma4 renderer with structured tool format - Architecture-based auto-detection of renderer/parser/stop tokens - Integration test gemma4 model list additions * gemma4: add audio support with USM conformer encoder Add audio encoding for Gemma 4 using the USM conformer architecture: - Converter: audio tensor mapping, SSCP/conformer/embedder name replacements, softplus repacker for per_dim_scale, F32 enforcement for conv weights - GGML backend: Conv1DDW and PadExt tensor ops - Audio encoder: SSCP Conv2D, 12 conformer blocks (FFW + block-local attention with relative position embeddings + LightConv1d + FFW), output projection, audio-to-text embedding projector - Audio preprocessing: WAV decode, mel spectrogram, FFT (pure Go) - Model wiring: WAV detection, audio token handling, unified PostTokenize Correctly transcribes "why is the sky blue" from test audio. * integration: add gemma4 audio tests including OpenAI API coverage Test audio transcription and response via the Ollama native API, plus two new tests exercising the OpenAI-compatible endpoints: - /v1/audio/transcriptions (multipart form upload) - /v1/chat/completions with input_audio content type All tests use capability checks and skip models without audio support. * gemma4: add OpenAI audio API support and capability detection - Add CapabilityAudio and detect from audio.block_count in GGUF - Add /v1/audio/transcriptions endpoint with TranscriptionMiddleware - Add input_audio content type support in /v1/chat/completions - Add TranscriptionRequest/Response types in openai package * gemma4: add audio input support for run command - /audio toggle in interactive mode for voice chat - Platform-specific microphone recording (AVFoundation on macOS, PulseAudio/ALSA on Linux, WASAPI on Windows) - Space to start/stop recording, automatic chunking for long audio * gemma4: add transcribe command (ollama transcribe MODEL) - Interactive mode with readline prompt and slash commands - Non-interactive mode for piped audio or record-until-Ctrl+C - Chunked streaming transcription for long recordings - Word-wrapped output matching run command style * gemma4: add parser, renderer, and integration test plumbing * gemma4: fix renderer to emit BOS token * gemma4: add OpenAI audio transcription API and input_audio support * gemma4: update converter for new weight drop naming * gemma4: add per_expert_scale to MoE router and fix moe_intermediate_size config * gemma4: rewrite renderer to match HF Jinja2 template exactly Fix 8 bugs found by building 55 reference tests verified against the HF Jinja2 chat template (VERIFY_JINJA2=1 shells out to Python): - Tool responses use separate <|turn>tool turns (not inline tags) - Tool calls emitted before content in assistant messages - Thinking content stripped from assistant history (strip_thinking) - User, tool, and system content trimmed (template does | trim) - Empty system message still emits system turn (check role, not content) - Nested object properties rendered recursively with required field - Array items specification rendered for array-type properties - OBJECT/ARRAY type-specific rendering comma logic matches template Also adds Required field to api.ToolProperty for nested object schemas, replaces old gemma4_test.go with comprehensive gemma4_reference_test.go, and commits the Jinja2 template as testdata for verification. * gemma4: fix MoE fused gate_up split and multiline tool-call arg parsing - Text MoE: split `ffn_gate_up_exps` into contiguous `[gate|up]` halves instead of stride-2 slices. - Parser: escape control characters in `<|"|>...<|"|>` string literals when converting tool-call args to JSON. - Fixes warnings like `invalid character '\n' in string literal` for multiline tool arguments. - Add Gemma4 parser regressions for multiline tool-call args and `gemma4ArgsToJSON`. * cmd: simplify audio input to dropped file attachments * gemma4: use full SWA memory for better cache reuse * gemma4: initialize clamps after backend load * convert: align gemma4 audio tensor renames with llama.cpp * Remove redundant comments in gemma4 vision model * Format Gemma4 MoE block field alignment * use 4096 kvcache.NewSWAMemCache * convert: support new Gemma4 audio_tower tensor naming (#15221) Co-authored-by: jmorganca <jmorganca@gmail.com> * fix integration test defaults for audio * review comments and lint fixes * remove unused audio/video files --------- Co-authored-by: jmorganca <jmorganca@gmail.com>
722 lines
21 KiB
Go
722 lines
21 KiB
Go
package cmd
|
|
|
|
import (
|
|
"cmp"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"regexp"
|
|
"slices"
|
|
"strings"
|
|
|
|
"github.com/spf13/cobra"
|
|
|
|
"github.com/ollama/ollama/api"
|
|
"github.com/ollama/ollama/envconfig"
|
|
"github.com/ollama/ollama/internal/modelref"
|
|
"github.com/ollama/ollama/readline"
|
|
"github.com/ollama/ollama/types/errtypes"
|
|
"github.com/ollama/ollama/types/model"
|
|
)
|
|
|
|
type MultilineState int
|
|
|
|
const (
|
|
MultilineNone MultilineState = iota
|
|
MultilinePrompt
|
|
MultilineSystem
|
|
)
|
|
|
|
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|
usage := func() {
|
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
|
fmt.Fprintln(os.Stderr, " /set Set session variables")
|
|
fmt.Fprintln(os.Stderr, " /show Show model information")
|
|
fmt.Fprintln(os.Stderr, " /load <model> Load a session or model")
|
|
fmt.Fprintln(os.Stderr, " /save <model> Save your current session")
|
|
fmt.Fprintln(os.Stderr, " /clear Clear session context")
|
|
fmt.Fprintln(os.Stderr, " /bye Exit")
|
|
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
|
fmt.Fprintln(os.Stderr, " /? shortcuts Help for keyboard shortcuts")
|
|
|
|
fmt.Fprintln(os.Stderr, "")
|
|
fmt.Fprintln(os.Stderr, "Use \"\"\" to begin a multi-line message.")
|
|
|
|
if opts.MultiModal {
|
|
fmt.Fprintf(os.Stderr, "Use %s to include .jpg, .png, .webp images, or .wav audio files.\n", filepath.FromSlash("/path/to/file"))
|
|
}
|
|
|
|
fmt.Fprintln(os.Stderr, "")
|
|
}
|
|
|
|
usageSet := func() {
|
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
|
fmt.Fprintln(os.Stderr, " /set parameter ... Set a parameter")
|
|
fmt.Fprintln(os.Stderr, " /set system <string> Set system message")
|
|
fmt.Fprintln(os.Stderr, " /set history Enable history")
|
|
fmt.Fprintln(os.Stderr, " /set nohistory Disable history")
|
|
fmt.Fprintln(os.Stderr, " /set wordwrap Enable wordwrap")
|
|
fmt.Fprintln(os.Stderr, " /set nowordwrap Disable wordwrap")
|
|
fmt.Fprintln(os.Stderr, " /set format json Enable JSON mode")
|
|
fmt.Fprintln(os.Stderr, " /set noformat Disable formatting")
|
|
fmt.Fprintln(os.Stderr, " /set verbose Show LLM stats")
|
|
fmt.Fprintln(os.Stderr, " /set quiet Disable LLM stats")
|
|
fmt.Fprintln(os.Stderr, " /set think Enable thinking")
|
|
fmt.Fprintln(os.Stderr, " /set nothink Disable thinking")
|
|
fmt.Fprintln(os.Stderr, "")
|
|
}
|
|
|
|
usageShortcuts := func() {
|
|
fmt.Fprintln(os.Stderr, "Available keyboard shortcuts:")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + a Move to the beginning of the line (Home)")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + e Move to the end of the line (End)")
|
|
fmt.Fprintln(os.Stderr, " Alt + b Move back (left) one word")
|
|
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor")
|
|
fmt.Fprintln(os.Stderr, "")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + g Open default editor to compose a prompt")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
|
fmt.Fprintln(os.Stderr, " Ctrl + d Exit ollama (/bye)")
|
|
fmt.Fprintln(os.Stderr, "")
|
|
}
|
|
|
|
usageShow := func() {
|
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
|
fmt.Fprintln(os.Stderr, " /show info Show details for this model")
|
|
fmt.Fprintln(os.Stderr, " /show license Show model license")
|
|
fmt.Fprintln(os.Stderr, " /show modelfile Show Modelfile for this model")
|
|
fmt.Fprintln(os.Stderr, " /show parameters Show parameters for this model")
|
|
fmt.Fprintln(os.Stderr, " /show system Show system message")
|
|
fmt.Fprintln(os.Stderr, " /show template Show prompt template")
|
|
fmt.Fprintln(os.Stderr, "")
|
|
}
|
|
|
|
// only list out the most common parameters
|
|
usageParameters := func() {
|
|
fmt.Fprintln(os.Stderr, "Available Parameters:")
|
|
fmt.Fprintln(os.Stderr, " /set parameter seed <int> Random number seed")
|
|
fmt.Fprintln(os.Stderr, " /set parameter num_predict <int> Max number of tokens to predict")
|
|
fmt.Fprintln(os.Stderr, " /set parameter top_k <int> Pick from top k num of tokens")
|
|
fmt.Fprintln(os.Stderr, " /set parameter top_p <float> Pick token based on sum of probabilities")
|
|
fmt.Fprintln(os.Stderr, " /set parameter min_p <float> Pick token based on top token probability * min_p")
|
|
fmt.Fprintln(os.Stderr, " /set parameter num_ctx <int> Set the context size")
|
|
fmt.Fprintln(os.Stderr, " /set parameter temperature <float> Set creativity level")
|
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
|
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
|
fmt.Fprintln(os.Stderr, " /set parameter stop <string> <string> ... Set the stop parameters")
|
|
fmt.Fprintln(os.Stderr, "")
|
|
}
|
|
|
|
scanner, err := readline.New(readline.Prompt{
|
|
Prompt: ">>> ",
|
|
AltPrompt: "... ",
|
|
Placeholder: "Send a message (/? for help)",
|
|
AltPlaceholder: "Press Enter to send",
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if envconfig.NoHistory() {
|
|
scanner.HistoryDisable()
|
|
}
|
|
|
|
fmt.Print(readline.StartBracketedPaste)
|
|
defer fmt.Printf(readline.EndBracketedPaste)
|
|
|
|
var sb strings.Builder
|
|
var multiline MultilineState
|
|
var thinkExplicitlySet bool = opts.Think != nil
|
|
|
|
for {
|
|
line, err := scanner.Readline()
|
|
switch {
|
|
case errors.Is(err, io.EOF):
|
|
fmt.Println()
|
|
return nil
|
|
case errors.Is(err, readline.ErrInterrupt):
|
|
if line == "" {
|
|
fmt.Println("\nUse Ctrl + d or /bye to exit.")
|
|
}
|
|
|
|
scanner.Prompt.UseAlt = false
|
|
sb.Reset()
|
|
|
|
continue
|
|
case errors.Is(err, readline.ErrEditPrompt):
|
|
sb.Reset()
|
|
content, err := editInExternalEditor(line)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
|
continue
|
|
}
|
|
if strings.TrimSpace(content) == "" {
|
|
continue
|
|
}
|
|
scanner.Prefill = content
|
|
continue
|
|
case err != nil:
|
|
return err
|
|
}
|
|
|
|
switch {
|
|
case multiline != MultilineNone:
|
|
// check if there's a multiline terminating string
|
|
before, ok := strings.CutSuffix(line, `"""`)
|
|
sb.WriteString(before)
|
|
if !ok {
|
|
fmt.Fprintln(&sb)
|
|
scanner.Prompt.UseAlt = true
|
|
continue
|
|
}
|
|
|
|
switch multiline {
|
|
case MultilineSystem:
|
|
opts.System = sb.String()
|
|
opts.Messages = append(opts.Messages, api.Message{Role: "system", Content: opts.System})
|
|
fmt.Println("Set system message.")
|
|
sb.Reset()
|
|
}
|
|
|
|
multiline = MultilineNone
|
|
scanner.Prompt.UseAlt = false
|
|
case strings.HasPrefix(line, `"""`):
|
|
line := strings.TrimPrefix(line, `"""`)
|
|
line, ok := strings.CutSuffix(line, `"""`)
|
|
sb.WriteString(line)
|
|
if !ok {
|
|
// no multiline terminating string; need more input
|
|
fmt.Fprintln(&sb)
|
|
multiline = MultilinePrompt
|
|
scanner.Prompt.UseAlt = true
|
|
}
|
|
case scanner.Pasting:
|
|
fmt.Fprintln(&sb, line)
|
|
continue
|
|
case strings.HasPrefix(line, "/list"):
|
|
args := strings.Fields(line)
|
|
if err := ListHandler(cmd, args[1:]); err != nil {
|
|
return err
|
|
}
|
|
case strings.HasPrefix(line, "/load"):
|
|
args := strings.Fields(line)
|
|
if len(args) != 2 {
|
|
fmt.Println("Usage:\n /load <modelname>")
|
|
continue
|
|
}
|
|
origOpts := opts.Copy()
|
|
|
|
opts.Model = args[1]
|
|
opts.Messages = []api.Message{}
|
|
fmt.Printf("Loading model '%s'\n", opts.Model)
|
|
opts.Think, err = inferThinkingOption(nil, &opts, thinkExplicitlySet)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), "not found") {
|
|
fmt.Printf("Couldn't find model '%s'\n", opts.Model)
|
|
opts = origOpts.Copy()
|
|
continue
|
|
}
|
|
return err
|
|
}
|
|
if err := loadOrUnloadModel(cmd, &opts); err != nil {
|
|
if strings.Contains(err.Error(), "not found") {
|
|
fmt.Printf("Couldn't find model '%s'\n", opts.Model)
|
|
opts = origOpts.Copy()
|
|
continue
|
|
}
|
|
if strings.Contains(err.Error(), "does not support thinking") {
|
|
fmt.Printf("error: %v\n", err)
|
|
continue
|
|
}
|
|
return err
|
|
}
|
|
continue
|
|
case strings.HasPrefix(line, "/save"):
|
|
args := strings.Fields(line)
|
|
if len(args) != 2 {
|
|
fmt.Println("Usage:\n /save <modelname>")
|
|
continue
|
|
}
|
|
|
|
client, err := api.ClientFromEnvironment()
|
|
if err != nil {
|
|
fmt.Println("error: couldn't connect to ollama server")
|
|
return err
|
|
}
|
|
|
|
req := NewCreateRequest(args[1], opts)
|
|
fn := func(resp api.ProgressResponse) error { return nil }
|
|
err = client.Create(cmd.Context(), req, fn)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), errtypes.InvalidModelNameErrMsg) {
|
|
fmt.Printf("error: The model name '%s' is invalid\n", args[1])
|
|
continue
|
|
}
|
|
return err
|
|
}
|
|
fmt.Printf("Created new model '%s'\n", args[1])
|
|
continue
|
|
case strings.HasPrefix(line, "/clear"):
|
|
opts.Messages = []api.Message{}
|
|
if opts.System != "" {
|
|
newMessage := api.Message{Role: "system", Content: opts.System}
|
|
opts.Messages = append(opts.Messages, newMessage)
|
|
}
|
|
fmt.Println("Cleared session context")
|
|
continue
|
|
case strings.HasPrefix(line, "/set"):
|
|
args := strings.Fields(line)
|
|
if len(args) > 1 {
|
|
switch args[1] {
|
|
case "history":
|
|
scanner.HistoryEnable()
|
|
case "nohistory":
|
|
scanner.HistoryDisable()
|
|
case "wordwrap":
|
|
opts.WordWrap = true
|
|
fmt.Println("Set 'wordwrap' mode.")
|
|
case "nowordwrap":
|
|
opts.WordWrap = false
|
|
fmt.Println("Set 'nowordwrap' mode.")
|
|
case "verbose":
|
|
if err := cmd.Flags().Set("verbose", "true"); err != nil {
|
|
return err
|
|
}
|
|
fmt.Println("Set 'verbose' mode.")
|
|
case "quiet":
|
|
if err := cmd.Flags().Set("verbose", "false"); err != nil {
|
|
return err
|
|
}
|
|
fmt.Println("Set 'quiet' mode.")
|
|
case "think":
|
|
thinkValue := api.ThinkValue{Value: true}
|
|
var maybeLevel string
|
|
if len(args) > 2 {
|
|
maybeLevel = args[2]
|
|
}
|
|
if maybeLevel != "" {
|
|
// TODO(drifkin): validate the level, could be model dependent
|
|
// though... It will also be validated on the server once a call is
|
|
// made.
|
|
thinkValue.Value = maybeLevel
|
|
}
|
|
opts.Think = &thinkValue
|
|
thinkExplicitlySet = true
|
|
if client, err := api.ClientFromEnvironment(); err == nil {
|
|
ensureThinkingSupport(cmd.Context(), client, opts.Model)
|
|
}
|
|
if maybeLevel != "" {
|
|
fmt.Printf("Set 'think' mode to '%s'.\n", maybeLevel)
|
|
} else {
|
|
fmt.Println("Set 'think' mode.")
|
|
}
|
|
case "nothink":
|
|
opts.Think = &api.ThinkValue{Value: false}
|
|
thinkExplicitlySet = true
|
|
if client, err := api.ClientFromEnvironment(); err == nil {
|
|
ensureThinkingSupport(cmd.Context(), client, opts.Model)
|
|
}
|
|
fmt.Println("Set 'nothink' mode.")
|
|
case "format":
|
|
if len(args) < 3 || args[2] != "json" {
|
|
fmt.Println("Invalid or missing format. For 'json' mode use '/set format json'")
|
|
} else {
|
|
opts.Format = args[2]
|
|
fmt.Printf("Set format to '%s' mode.\n", args[2])
|
|
}
|
|
case "noformat":
|
|
opts.Format = ""
|
|
fmt.Println("Disabled format.")
|
|
case "parameter":
|
|
if len(args) < 4 {
|
|
usageParameters()
|
|
continue
|
|
}
|
|
params := args[3:]
|
|
fp, err := api.FormatParams(map[string][]string{args[2]: params})
|
|
if err != nil {
|
|
fmt.Printf("Couldn't set parameter: %q\n", err)
|
|
continue
|
|
}
|
|
fmt.Printf("Set parameter '%s' to '%s'\n", args[2], strings.Join(params, ", "))
|
|
opts.Options[args[2]] = fp[args[2]]
|
|
case "system":
|
|
if len(args) < 3 {
|
|
usageSet()
|
|
continue
|
|
}
|
|
|
|
multiline = MultilineSystem
|
|
|
|
line := strings.Join(args[2:], " ")
|
|
line, ok := strings.CutPrefix(line, `"""`)
|
|
if !ok {
|
|
multiline = MultilineNone
|
|
} else {
|
|
// only cut suffix if the line is multiline
|
|
line, ok = strings.CutSuffix(line, `"""`)
|
|
if ok {
|
|
multiline = MultilineNone
|
|
}
|
|
}
|
|
|
|
sb.WriteString(line)
|
|
if multiline != MultilineNone {
|
|
scanner.Prompt.UseAlt = true
|
|
continue
|
|
}
|
|
|
|
opts.System = sb.String() // for display in modelfile
|
|
newMessage := api.Message{Role: "system", Content: sb.String()}
|
|
// Check if the slice is not empty and the last message is from 'system'
|
|
if len(opts.Messages) > 0 && opts.Messages[len(opts.Messages)-1].Role == "system" {
|
|
// Replace the last message
|
|
opts.Messages[len(opts.Messages)-1] = newMessage
|
|
} else {
|
|
opts.Messages = append(opts.Messages, newMessage)
|
|
}
|
|
fmt.Println("Set system message.")
|
|
sb.Reset()
|
|
continue
|
|
default:
|
|
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
|
}
|
|
} else {
|
|
usageSet()
|
|
}
|
|
case strings.HasPrefix(line, "/show"):
|
|
args := strings.Fields(line)
|
|
if len(args) > 1 {
|
|
client, err := api.ClientFromEnvironment()
|
|
if err != nil {
|
|
fmt.Println("error: couldn't connect to ollama server")
|
|
return err
|
|
}
|
|
req := &api.ShowRequest{
|
|
Name: opts.Model,
|
|
System: opts.System,
|
|
Options: opts.Options,
|
|
}
|
|
resp, err := client.Show(cmd.Context(), req)
|
|
if err != nil {
|
|
fmt.Println("error: couldn't get model")
|
|
return err
|
|
}
|
|
|
|
switch args[1] {
|
|
case "info":
|
|
_ = showInfo(resp, false, os.Stderr)
|
|
case "license":
|
|
if resp.License == "" {
|
|
fmt.Println("No license was specified for this model.")
|
|
} else {
|
|
fmt.Println(resp.License)
|
|
}
|
|
case "modelfile":
|
|
fmt.Println(resp.Modelfile)
|
|
case "parameters":
|
|
fmt.Println("Model defined parameters:")
|
|
if resp.Parameters == "" {
|
|
fmt.Println(" No additional parameters were specified for this model.")
|
|
} else {
|
|
for _, l := range strings.Split(resp.Parameters, "\n") {
|
|
fmt.Printf(" %s\n", l)
|
|
}
|
|
}
|
|
fmt.Println()
|
|
if len(opts.Options) > 0 {
|
|
fmt.Println("User defined parameters:")
|
|
for k, v := range opts.Options {
|
|
fmt.Printf(" %-*s %v\n", 30, k, v)
|
|
}
|
|
fmt.Println()
|
|
}
|
|
case "system":
|
|
switch {
|
|
case opts.System != "":
|
|
fmt.Println(opts.System + "\n")
|
|
case resp.System != "":
|
|
fmt.Println(resp.System + "\n")
|
|
default:
|
|
fmt.Println("No system message was specified for this model.")
|
|
}
|
|
case "template":
|
|
if resp.Template != "" {
|
|
fmt.Println(resp.Template)
|
|
} else {
|
|
fmt.Println("No prompt template was specified for this model.")
|
|
}
|
|
default:
|
|
fmt.Printf("Unknown command '/show %s'. Type /? for help\n", args[1])
|
|
}
|
|
} else {
|
|
usageShow()
|
|
}
|
|
case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"):
|
|
args := strings.Fields(line)
|
|
if len(args) > 1 {
|
|
switch args[1] {
|
|
case "set", "/set":
|
|
usageSet()
|
|
case "show", "/show":
|
|
usageShow()
|
|
case "shortcut", "shortcuts":
|
|
usageShortcuts()
|
|
}
|
|
} else {
|
|
usage()
|
|
}
|
|
case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"):
|
|
return nil
|
|
case strings.HasPrefix(line, "/"):
|
|
args := strings.Fields(line)
|
|
isFile := false
|
|
|
|
if opts.MultiModal {
|
|
for _, f := range extractFileNames(line) {
|
|
if strings.HasPrefix(f, args[0]) {
|
|
isFile = true
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
if !isFile {
|
|
fmt.Printf("Unknown command '%s'. Type /? for help\n", args[0])
|
|
continue
|
|
}
|
|
|
|
sb.WriteString(line)
|
|
default:
|
|
sb.WriteString(line)
|
|
}
|
|
|
|
if sb.Len() > 0 && multiline == MultilineNone {
|
|
newMessage := api.Message{Role: "user", Content: sb.String()}
|
|
|
|
if opts.MultiModal {
|
|
msg, images, err := extractFileData(sb.String())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
newMessage.Content = msg
|
|
newMessage.Images = images
|
|
}
|
|
|
|
opts.Messages = append(opts.Messages, newMessage)
|
|
|
|
assistant, err := chat(cmd, opts)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), "does not support thinking") ||
|
|
strings.Contains(err.Error(), "invalid think value") {
|
|
fmt.Printf("error: %v\n", err)
|
|
sb.Reset()
|
|
continue
|
|
}
|
|
return err
|
|
}
|
|
if assistant != nil {
|
|
opts.Messages = append(opts.Messages, *assistant)
|
|
}
|
|
|
|
sb.Reset()
|
|
}
|
|
}
|
|
}
|
|
|
|
func NewCreateRequest(name string, opts runOptions) *api.CreateRequest {
|
|
parentModel := opts.ParentModel
|
|
|
|
modelName := model.ParseName(parentModel)
|
|
if !modelName.IsValid() {
|
|
parentModel = ""
|
|
}
|
|
|
|
// Preserve explicit cloud intent for sessions started with `:cloud`.
|
|
// Cloud model metadata can return a source-less parent_model (for example
|
|
// "qwen3.5"), which would otherwise make `/save` create a local derivative.
|
|
if modelref.HasExplicitCloudSource(opts.Model) && !modelref.HasExplicitCloudSource(parentModel) {
|
|
parentModel = ""
|
|
}
|
|
|
|
req := &api.CreateRequest{
|
|
Model: name,
|
|
From: cmp.Or(parentModel, opts.Model),
|
|
}
|
|
|
|
if opts.System != "" {
|
|
req.System = opts.System
|
|
}
|
|
|
|
if len(opts.Options) > 0 {
|
|
req.Parameters = opts.Options
|
|
}
|
|
|
|
if len(opts.Messages) > 0 {
|
|
req.Messages = opts.Messages
|
|
}
|
|
|
|
return req
|
|
}
|
|
|
|
func normalizeFilePath(fp string) string {
|
|
return strings.NewReplacer(
|
|
"\\ ", " ", // Escaped space
|
|
"\\(", "(", // Escaped left parenthesis
|
|
"\\)", ")", // Escaped right parenthesis
|
|
"\\[", "[", // Escaped left square bracket
|
|
"\\]", "]", // Escaped right square bracket
|
|
"\\{", "{", // Escaped left curly brace
|
|
"\\}", "}", // Escaped right curly brace
|
|
"\\$", "$", // Escaped dollar sign
|
|
"\\&", "&", // Escaped ampersand
|
|
"\\;", ";", // Escaped semicolon
|
|
"\\'", "'", // Escaped single quote
|
|
"\\\\", "\\", // Escaped backslash
|
|
"\\*", "*", // Escaped asterisk
|
|
"\\?", "?", // Escaped question mark
|
|
"\\~", "~", // Escaped tilde
|
|
).Replace(fp)
|
|
}
|
|
|
|
func extractFileNames(input string) []string {
|
|
// Regex to match file paths starting with optional drive letter, / ./ \ or .\ and include escaped or unescaped spaces (\ or %20)
|
|
// and followed by more characters and a file extension
|
|
// This will capture non filename strings, but we'll check for file existence to remove mismatches
|
|
regexPattern := `(?:[a-zA-Z]:)?(?:\./|/|\\)[\S\\ ]+?\.(?i:jpg|jpeg|png|webp|wav)\b`
|
|
re := regexp.MustCompile(regexPattern)
|
|
|
|
return re.FindAllString(input, -1)
|
|
}
|
|
|
|
func extractFileData(input string) (string, []api.ImageData, error) {
|
|
filePaths := extractFileNames(input)
|
|
var imgs []api.ImageData
|
|
|
|
for _, fp := range filePaths {
|
|
nfp := normalizeFilePath(fp)
|
|
data, err := getImageData(nfp)
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
continue
|
|
} else if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Couldn't process file: %q\n", err)
|
|
return "", imgs, err
|
|
}
|
|
ext := strings.ToLower(filepath.Ext(nfp))
|
|
switch ext {
|
|
case ".wav":
|
|
fmt.Fprintf(os.Stderr, "Added audio '%s'\n", nfp)
|
|
default:
|
|
fmt.Fprintf(os.Stderr, "Added image '%s'\n", nfp)
|
|
}
|
|
input = strings.ReplaceAll(input, "'"+nfp+"'", "")
|
|
input = strings.ReplaceAll(input, "'"+fp+"'", "")
|
|
input = strings.ReplaceAll(input, fp, "")
|
|
imgs = append(imgs, data)
|
|
}
|
|
return strings.TrimSpace(input), imgs, nil
|
|
}
|
|
|
|
func editInExternalEditor(content string) (string, error) {
|
|
editor := envconfig.Editor()
|
|
if editor == "" {
|
|
editor = os.Getenv("VISUAL")
|
|
}
|
|
if editor == "" {
|
|
editor = os.Getenv("EDITOR")
|
|
}
|
|
if editor == "" {
|
|
editor = defaultEditor
|
|
}
|
|
|
|
// Check that the editor binary exists
|
|
name := strings.Fields(editor)[0]
|
|
if _, err := exec.LookPath(name); err != nil {
|
|
return "", fmt.Errorf("editor %q not found, set OLLAMA_EDITOR to the path of your preferred editor", name)
|
|
}
|
|
|
|
tmpFile, err := os.CreateTemp("", "ollama-prompt-*.txt")
|
|
if err != nil {
|
|
return "", fmt.Errorf("creating temp file: %w", err)
|
|
}
|
|
defer os.Remove(tmpFile.Name())
|
|
|
|
if content != "" {
|
|
if _, err := tmpFile.WriteString(content); err != nil {
|
|
tmpFile.Close()
|
|
return "", fmt.Errorf("writing to temp file: %w", err)
|
|
}
|
|
}
|
|
tmpFile.Close()
|
|
|
|
args := strings.Fields(editor)
|
|
args = append(args, tmpFile.Name())
|
|
cmd := exec.Command(args[0], args[1:]...)
|
|
cmd.Stdin = os.Stdin
|
|
cmd.Stdout = os.Stdout
|
|
cmd.Stderr = os.Stderr
|
|
|
|
if err := cmd.Run(); err != nil {
|
|
return "", fmt.Errorf("editor exited with error: %w", err)
|
|
}
|
|
|
|
data, err := os.ReadFile(tmpFile.Name())
|
|
if err != nil {
|
|
return "", fmt.Errorf("reading temp file: %w", err)
|
|
}
|
|
|
|
return strings.TrimRight(string(data), "\n"), nil
|
|
}
|
|
|
|
func getImageData(filePath string) ([]byte, error) {
|
|
file, err := os.Open(filePath)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer file.Close()
|
|
|
|
buf := make([]byte, 512)
|
|
_, err = file.Read(buf)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
contentType := http.DetectContentType(buf)
|
|
allowedTypes := []string{"image/jpeg", "image/jpg", "image/png", "image/webp", "audio/wave"}
|
|
if !slices.Contains(allowedTypes, contentType) {
|
|
return nil, fmt.Errorf("invalid file type: %s", contentType)
|
|
}
|
|
|
|
info, err := file.Stat()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var maxSize int64 = 100 * 1024 * 1024 // 100MB
|
|
if info.Size() > maxSize {
|
|
return nil, errors.New("file size exceeds maximum limit (100MB)")
|
|
}
|
|
|
|
buf = make([]byte, info.Size())
|
|
_, err = file.Seek(0, 0)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
_, err = io.ReadFull(file, buf)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return buf, nil
|
|
}
|