mirror of
https://github.com/ollama/ollama.git
synced 2026-04-20 15:55:46 +02:00
Compare commits
3 Commits
launch-cop
...
hoyyeva/op
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a3ed0a1b4 | ||
|
|
03f9e57274 | ||
|
|
30d9100fff |
@@ -1,16 +1,21 @@
|
||||
package launch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/cmd/internal/fileutil"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
modeltype "github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
// OpenCode implements Runner and Editor for OpenCode integration.
|
||||
@@ -229,6 +234,9 @@ func readModelJSONModels() []string {
|
||||
}
|
||||
|
||||
func buildModelEntries(modelList []string) map[string]any {
|
||||
client := api.NewClient(envconfig.Host(), http.DefaultClient)
|
||||
ctx := context.Background()
|
||||
|
||||
models := make(map[string]any)
|
||||
for _, model := range modelList {
|
||||
entry := map[string]any{
|
||||
@@ -242,7 +250,49 @@ func buildModelEntries(modelList []string) map[string]any {
|
||||
}
|
||||
}
|
||||
}
|
||||
applyOpenCodeReasoning(ctx, client, model, entry)
|
||||
models[model] = entry
|
||||
}
|
||||
return models
|
||||
}
|
||||
|
||||
// applyOpenCodeReasoning detects thinking capability and sets reasoning config
|
||||
// on the model entry. When the model supports thinking, it sets "reasoning": true
|
||||
// and configures variants for the OpenCode TUI:
|
||||
// - GPT-OSS: supports variable effort levels (low/medium/high) and defaults to
|
||||
// medium via options. Thinking cannot be turned off.
|
||||
// - Other models: only support on/off. Disables built-in low/medium/high variants
|
||||
// and adds a "none" variant so users can toggle thinking off via Ctrl+T.
|
||||
//
|
||||
// When the model does not support thinking, no reasoning config is set.
|
||||
func applyOpenCodeReasoning(ctx context.Context, client *api.Client, modelName string, entry map[string]any) {
|
||||
resp, err := client.Show(ctx, &api.ShowRequest{Model: modelName})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(resp.Capabilities, modeltype.CapabilityThinking) {
|
||||
entry["reasoning"] = true
|
||||
|
||||
if strings.Contains(modelName, "gpt-oss") {
|
||||
// GPT-OSS models support variable thinking effort levels
|
||||
// and cannot turn thinking off. Keep the built-in
|
||||
// low/medium/high variants as-is and default to medium.
|
||||
options, ok := entry["options"].(map[string]any)
|
||||
if !ok {
|
||||
options = make(map[string]any)
|
||||
}
|
||||
options["reasoningEffort"] = "medium"
|
||||
entry["options"] = options
|
||||
} else {
|
||||
// Most models only support thinking on or off.
|
||||
// Disable the built-in low/medium/high variants and add none.
|
||||
entry["variants"] = map[string]any{
|
||||
"none": map[string]any{"reasoningEffort": "none"},
|
||||
"low": map[string]any{"disabled": true},
|
||||
"medium": map[string]any{"disabled": true},
|
||||
"high": map[string]any{"disabled": true},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package launch
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -241,6 +243,133 @@ func TestLookupCloudModelLimit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// inlineConfigModel extracts a model entry from the inline config content.
|
||||
func inlineConfigModel(t *testing.T, content, model string) map[string]any {
|
||||
t.Helper()
|
||||
var cfg map[string]any
|
||||
if err := json.Unmarshal([]byte(content), &cfg); err != nil {
|
||||
t.Fatalf("configContent is not valid JSON: %v", err)
|
||||
}
|
||||
provider, _ := cfg["provider"].(map[string]any)
|
||||
ollama, _ := provider["ollama"].(map[string]any)
|
||||
models, _ := ollama["models"].(map[string]any)
|
||||
entry, ok := models[model].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("model %s not found in inline config", model)
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func TestOpenCodeEdit_ReasoningOnThinkingModel(t *testing.T) {
|
||||
setTestHome(t, t.TempDir())
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/show" {
|
||||
fmt.Fprintf(w, `{"capabilities":["thinking"],"model_info":{}}`)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer srv.Close()
|
||||
t.Setenv("OLLAMA_HOST", srv.URL)
|
||||
|
||||
o := &OpenCode{}
|
||||
if err := o.Edit([]string{"qwq"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
entry := inlineConfigModel(t, o.configContent, "qwq")
|
||||
if entry["reasoning"] != true {
|
||||
t.Error("expected reasoning = true for thinking model")
|
||||
}
|
||||
variants, ok := entry["variants"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected variants to be set")
|
||||
}
|
||||
none, ok := variants["none"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected none variant to be set")
|
||||
}
|
||||
if none["reasoningEffort"] != "none" {
|
||||
t.Errorf("none variant reasoningEffort = %v, want none", none["reasoningEffort"])
|
||||
}
|
||||
// Built-in low/medium/high should be disabled
|
||||
for _, level := range []string{"low", "medium", "high"} {
|
||||
v, ok := variants[level].(map[string]any)
|
||||
if !ok {
|
||||
t.Errorf("expected %s variant to exist", level)
|
||||
continue
|
||||
}
|
||||
if v["disabled"] != true {
|
||||
t.Errorf("expected %s variant to be disabled", level)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenCodeEdit_ReasoningLevelsOnGptOss(t *testing.T) {
|
||||
setTestHome(t, t.TempDir())
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/show" {
|
||||
fmt.Fprintf(w, `{"capabilities":["thinking"],"model_info":{}}`)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer srv.Close()
|
||||
t.Setenv("OLLAMA_HOST", srv.URL)
|
||||
|
||||
o := &OpenCode{}
|
||||
if err := o.Edit([]string{"gpt-oss:120b-cloud"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
entry := inlineConfigModel(t, o.configContent, "gpt-oss:120b-cloud")
|
||||
if entry["reasoning"] != true {
|
||||
t.Error("expected reasoning = true")
|
||||
}
|
||||
// GPT-OSS cannot turn thinking off and supports levels,
|
||||
// so no custom variants should be written.
|
||||
if entry["variants"] != nil {
|
||||
t.Errorf("expected no variants for gpt-oss, got %v", entry["variants"])
|
||||
}
|
||||
// Should default to medium reasoning effort
|
||||
opts, ok := entry["options"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("expected options to be set for gpt-oss")
|
||||
}
|
||||
if opts["reasoningEffort"] != "medium" {
|
||||
t.Errorf("reasoningEffort = %v, want medium", opts["reasoningEffort"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenCodeEdit_NoReasoningOnNonThinkingModel(t *testing.T) {
|
||||
setTestHome(t, t.TempDir())
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/show" {
|
||||
fmt.Fprintf(w, `{"capabilities":[],"model_info":{}}`)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}))
|
||||
defer srv.Close()
|
||||
t.Setenv("OLLAMA_HOST", srv.URL)
|
||||
|
||||
o := &OpenCode{}
|
||||
if err := o.Edit([]string{"llama3.2"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
entry := inlineConfigModel(t, o.configContent, "llama3.2")
|
||||
if entry["reasoning"] != nil {
|
||||
t.Errorf("expected no reasoning for non-thinking model, got %v", entry["reasoning"])
|
||||
}
|
||||
if entry["variants"] != nil {
|
||||
t.Errorf("expected no variants for non-thinking model, got %v", entry["variants"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindOpenCode(t *testing.T) {
|
||||
t.Run("fallback to ~/.opencode/bin", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
Reference in New Issue
Block a user