Compare commits

...

7 Commits

Author SHA1 Message Date
Eva Ho
7a3ed0a1b4 adding test 2026-04-14 15:28:40 -07:00
Eva Ho
03f9e57274 add test 2026-04-14 15:28:40 -07:00
Eva Ho
30d9100fff launch: add thinking capability detection to opencode 2026-04-14 15:28:40 -07:00
Eva H
698e04a14b launch: OpenCode inline config (#15586) 2026-04-14 15:08:42 -07:00
Eva H
1d9537bc33 launch/openclaw: fix --yes flag behaviour to skip channels configuration (#15589) 2026-04-14 13:57:35 -07:00
Eva H
120424d832 Revert "launch/opencode: use inline config (#15462)" (#15568) 2026-04-13 18:40:17 -07:00
Eva H
5818001610 launch: skip unchanged integration rewrite configration (#15491) 2026-04-13 17:18:56 -07:00
7 changed files with 288 additions and 77 deletions

View File

@@ -14,6 +14,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
"github.com/ollama/ollama/cmd/config"
) )
type stubEditorRunner struct { type stubEditorRunner struct {
@@ -722,6 +723,59 @@ func TestLauncherClientFilterDisabledCloudModels_ChecksStatusOncePerInvocation(t
} }
} }
func TestSavedMatchesModels(t *testing.T) {
tests := []struct {
name string
saved *config.IntegrationConfig
models []string
want bool
}{
{
name: "nil saved",
saved: nil,
models: []string{"llama3.2"},
want: false,
},
{
name: "identical order",
saved: &config.IntegrationConfig{Models: []string{"llama3.2", "qwen3:8b"}},
models: []string{"llama3.2", "qwen3:8b"},
want: true,
},
{
name: "different order",
saved: &config.IntegrationConfig{Models: []string{"llama3.2", "qwen3:8b"}},
models: []string{"qwen3:8b", "llama3.2"},
want: false,
},
{
name: "subset",
saved: &config.IntegrationConfig{Models: []string{"llama3.2", "qwen3:8b"}},
models: []string{"llama3.2"},
want: false,
},
{
name: "nil models in saved with non-nil models",
saved: &config.IntegrationConfig{Models: nil},
models: []string{"llama3.2"},
want: false,
},
{
name: "empty both",
saved: &config.IntegrationConfig{Models: nil},
models: nil,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := savedMatchesModels(tt.saved, tt.models); got != tt.want {
t.Fatalf("savedMatchesModels = %v, want %v", got, tt.want)
}
})
}
}
func TestPrepareEditorIntegration_SavesOnlyAfterSuccessfulEdit(t *testing.T) { func TestPrepareEditorIntegration_SavesOnlyAfterSuccessfulEdit(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
setTestHome(t, tmpDir) setTestHome(t, tmpDir)

View File

@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
"slices"
"strings" "strings"
"github.com/ollama/ollama/api" "github.com/ollama/ollama/api"
@@ -500,7 +501,7 @@ func (c *launcherClient) launchEditorIntegration(ctx context.Context, name strin
return nil return nil
} }
if needsConfigure || req.ModelOverride != "" { if (needsConfigure || req.ModelOverride != "") && !savedMatchesModels(saved, models) {
if err := prepareEditorIntegration(name, runner, editor, models); err != nil { if err := prepareEditorIntegration(name, runner, editor, models); err != nil {
return err return err
} }
@@ -846,6 +847,13 @@ func firstModel(models []string) string {
return models[0] return models[0]
} }
func savedMatchesModels(saved *config.IntegrationConfig, models []string) bool {
if saved == nil {
return false
}
return slices.Equal(saved.Models, models)
}
func editorPreCheckedModels(saved *config.IntegrationConfig, override string) []string { func editorPreCheckedModels(saved *config.IntegrationConfig, override string) []string {
if override == "" { if override == "" {
if saved == nil { if saved == nil {

View File

@@ -186,6 +186,11 @@ func (c *Openclaw) runChannelSetupPreflight(bin string) error {
if !isInteractiveSession() { if !isInteractiveSession() {
return nil return nil
} }
// --yes is headless; channel setup spawns an interactive picker we can't
// auto-answer, so skip it. Users can run `openclaw channels add` later.
if currentLaunchConfirmPolicy.yes {
return nil
}
for { for {
if c.channelsConfigured() { if c.channelsConfigured() {

View File

@@ -1304,6 +1304,46 @@ func TestOpenclawChannelSetupPreflight(t *testing.T) {
} }
}) })
t.Run("--yes skips preflight without channels configured", func(t *testing.T) {
tmpDir := t.TempDir()
setTestHome(t, tmpDir)
t.Setenv("PATH", tmpDir)
configDir := filepath.Join(tmpDir, ".openclaw")
if err := os.MkdirAll(configDir, 0o755); err != nil {
t.Fatal(err)
}
// Empty config = no channels configured. Without the --yes skip, the
// preflight would prompt and (on confirm) spawn `openclaw channels add`.
if err := os.WriteFile(filepath.Join(configDir, "openclaw.json"), []byte(`{}`), 0o644); err != nil {
t.Fatal(err)
}
bin := filepath.Join(tmpDir, "openclaw")
if err := os.WriteFile(bin, []byte("#!/bin/sh\nprintf '%s\\n' \"$*\" >> \"$HOME/invocations.log\"\n"), 0o755); err != nil {
t.Fatal(err)
}
oldInteractive := isInteractiveSession
isInteractiveSession = func() bool { return true }
defer func() { isInteractiveSession = oldInteractive }()
restore := withLaunchConfirmPolicy(launchConfirmPolicy{yes: true})
defer restore()
oldConfirmPrompt := DefaultConfirmPrompt
DefaultConfirmPrompt = func(prompt string, options ConfirmOptions) (bool, error) {
t.Fatalf("did not expect prompt in --yes mode: %s", prompt)
return false, nil
}
defer func() { DefaultConfirmPrompt = oldConfirmPrompt }()
if err := c.runChannelSetupPreflight("openclaw"); err != nil {
t.Fatalf("runChannelSetupPreflight() error = %v", err)
}
if _, err := os.Stat(filepath.Join(tmpDir, "invocations.log")); !os.IsNotExist(err) {
t.Fatalf("expected no channels add invocation in --yes mode, got err=%v", err)
}
})
t.Run("set up later prompts once and exits", func(t *testing.T) { t.Run("set up later prompts once and exits", func(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
setTestHome(t, tmpDir) setTestHome(t, tmpDir)

View File

@@ -1,16 +1,21 @@
package launch package launch
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"slices" "slices"
"strings"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/cmd/internal/fileutil" "github.com/ollama/ollama/cmd/internal/fileutil"
"github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/envconfig"
modeltype "github.com/ollama/ollama/types/model"
) )
// OpenCode implements Runner and Editor for OpenCode integration. // OpenCode implements Runner and Editor for OpenCode integration.
@@ -229,6 +234,9 @@ func readModelJSONModels() []string {
} }
func buildModelEntries(modelList []string) map[string]any { func buildModelEntries(modelList []string) map[string]any {
client := api.NewClient(envconfig.Host(), http.DefaultClient)
ctx := context.Background()
models := make(map[string]any) models := make(map[string]any)
for _, model := range modelList { for _, model := range modelList {
entry := map[string]any{ entry := map[string]any{
@@ -242,7 +250,49 @@ func buildModelEntries(modelList []string) map[string]any {
} }
} }
} }
applyOpenCodeReasoning(ctx, client, model, entry)
models[model] = entry models[model] = entry
} }
return models return models
} }
// applyOpenCodeReasoning detects thinking capability and sets reasoning config
// on the model entry. When the model supports thinking, it sets "reasoning": true
// and configures variants for the OpenCode TUI:
// - GPT-OSS: supports variable effort levels (low/medium/high) and defaults to
// medium via options. Thinking cannot be turned off.
// - Other models: only support on/off. Disables built-in low/medium/high variants
// and adds a "none" variant so users can toggle thinking off via Ctrl+T.
//
// When the model does not support thinking, no reasoning config is set.
func applyOpenCodeReasoning(ctx context.Context, client *api.Client, modelName string, entry map[string]any) {
resp, err := client.Show(ctx, &api.ShowRequest{Model: modelName})
if err != nil {
return
}
if slices.Contains(resp.Capabilities, modeltype.CapabilityThinking) {
entry["reasoning"] = true
if strings.Contains(modelName, "gpt-oss") {
// GPT-OSS models support variable thinking effort levels
// and cannot turn thinking off. Keep the built-in
// low/medium/high variants as-is and default to medium.
options, ok := entry["options"].(map[string]any)
if !ok {
options = make(map[string]any)
}
options["reasoningEffort"] = "medium"
entry["options"] = options
} else {
// Most models only support thinking on or off.
// Disable the built-in low/medium/high variants and add none.
entry["variants"] = map[string]any{
"none": map[string]any{"reasoningEffort": "none"},
"low": map[string]any{"disabled": true},
"medium": map[string]any{"disabled": true},
"high": map[string]any{"disabled": true},
}
}
}
}

View File

@@ -3,6 +3,8 @@ package launch
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"net/http/httptest"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@@ -241,6 +243,133 @@ func TestLookupCloudModelLimit(t *testing.T) {
} }
} }
// inlineConfigModel extracts a model entry from the inline config content.
func inlineConfigModel(t *testing.T, content, model string) map[string]any {
t.Helper()
var cfg map[string]any
if err := json.Unmarshal([]byte(content), &cfg); err != nil {
t.Fatalf("configContent is not valid JSON: %v", err)
}
provider, _ := cfg["provider"].(map[string]any)
ollama, _ := provider["ollama"].(map[string]any)
models, _ := ollama["models"].(map[string]any)
entry, ok := models[model].(map[string]any)
if !ok {
t.Fatalf("model %s not found in inline config", model)
}
return entry
}
func TestOpenCodeEdit_ReasoningOnThinkingModel(t *testing.T) {
setTestHome(t, t.TempDir())
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/api/show" {
fmt.Fprintf(w, `{"capabilities":["thinking"],"model_info":{}}`)
return
}
w.WriteHeader(http.StatusNotFound)
}))
defer srv.Close()
t.Setenv("OLLAMA_HOST", srv.URL)
o := &OpenCode{}
if err := o.Edit([]string{"qwq"}); err != nil {
t.Fatal(err)
}
entry := inlineConfigModel(t, o.configContent, "qwq")
if entry["reasoning"] != true {
t.Error("expected reasoning = true for thinking model")
}
variants, ok := entry["variants"].(map[string]any)
if !ok {
t.Fatal("expected variants to be set")
}
none, ok := variants["none"].(map[string]any)
if !ok {
t.Fatal("expected none variant to be set")
}
if none["reasoningEffort"] != "none" {
t.Errorf("none variant reasoningEffort = %v, want none", none["reasoningEffort"])
}
// Built-in low/medium/high should be disabled
for _, level := range []string{"low", "medium", "high"} {
v, ok := variants[level].(map[string]any)
if !ok {
t.Errorf("expected %s variant to exist", level)
continue
}
if v["disabled"] != true {
t.Errorf("expected %s variant to be disabled", level)
}
}
}
func TestOpenCodeEdit_ReasoningLevelsOnGptOss(t *testing.T) {
setTestHome(t, t.TempDir())
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/api/show" {
fmt.Fprintf(w, `{"capabilities":["thinking"],"model_info":{}}`)
return
}
w.WriteHeader(http.StatusNotFound)
}))
defer srv.Close()
t.Setenv("OLLAMA_HOST", srv.URL)
o := &OpenCode{}
if err := o.Edit([]string{"gpt-oss:120b-cloud"}); err != nil {
t.Fatal(err)
}
entry := inlineConfigModel(t, o.configContent, "gpt-oss:120b-cloud")
if entry["reasoning"] != true {
t.Error("expected reasoning = true")
}
// GPT-OSS cannot turn thinking off and supports levels,
// so no custom variants should be written.
if entry["variants"] != nil {
t.Errorf("expected no variants for gpt-oss, got %v", entry["variants"])
}
// Should default to medium reasoning effort
opts, ok := entry["options"].(map[string]any)
if !ok {
t.Fatal("expected options to be set for gpt-oss")
}
if opts["reasoningEffort"] != "medium" {
t.Errorf("reasoningEffort = %v, want medium", opts["reasoningEffort"])
}
}
func TestOpenCodeEdit_NoReasoningOnNonThinkingModel(t *testing.T) {
setTestHome(t, t.TempDir())
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/api/show" {
fmt.Fprintf(w, `{"capabilities":[],"model_info":{}}`)
return
}
w.WriteHeader(http.StatusNotFound)
}))
defer srv.Close()
t.Setenv("OLLAMA_HOST", srv.URL)
o := &OpenCode{}
if err := o.Edit([]string{"llama3.2"}); err != nil {
t.Fatal(err)
}
entry := inlineConfigModel(t, o.configContent, "llama3.2")
if entry["reasoning"] != nil {
t.Errorf("expected no reasoning for non-thinking model, got %v", entry["reasoning"])
}
if entry["variants"] != nil {
t.Errorf("expected no variants for non-thinking model, got %v", entry["variants"])
}
}
func TestFindOpenCode(t *testing.T) { func TestFindOpenCode(t *testing.T) {
t.Run("fallback to ~/.opencode/bin", func(t *testing.T) { t.Run("fallback to ~/.opencode/bin", func(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()

View File

@@ -28,79 +28,4 @@ To configure without launching:
ollama launch opencode --config ollama launch opencode --config
``` ```
### Manual setup <Note>`ollama launch opencode` passes its configuration to OpenCode inline via the `OPENCODE_CONFIG_CONTENT` environment variable. OpenCode deep-merges its config sources on startup, so anything you declare in `~/.config/opencode/opencode.json` is still respected and available inside OpenCode. Models declared only in `opencode.json` won't appear in `ollama launch`'s model-selection menu.</Note>
Add a configuration block to `~/.config/opencode/opencode.json`:
```json
{
"$schema": "https://opencode.ai/config.json",
"provider": {
"ollama": {
"npm": "@ai-sdk/openai-compatible",
"name": "Ollama",
"options": {
"baseURL": "http://localhost:11434/v1"
},
"models": {
"qwen3-coder": {
"name": "qwen3-coder"
}
}
}
}
}
```
## Cloud Models
`glm-4.7:cloud` is the recommended model for use with OpenCode.
Add the cloud configuration to `~/.config/opencode/opencode.json`:
```json
{
"$schema": "https://opencode.ai/config.json",
"provider": {
"ollama": {
"npm": "@ai-sdk/openai-compatible",
"name": "Ollama",
"options": {
"baseURL": "http://localhost:11434/v1"
},
"models": {
"glm-4.7:cloud": {
"name": "glm-4.7:cloud"
}
}
}
}
}
```
## Connecting to ollama.com
1. Create an [API key](https://ollama.com/settings/keys) from ollama.com and export it as `OLLAMA_API_KEY`.
2. Update `~/.config/opencode/opencode.json` to point to ollama.com:
```json
{
"$schema": "https://opencode.ai/config.json",
"provider": {
"ollama": {
"npm": "@ai-sdk/openai-compatible",
"name": "Ollama Cloud",
"options": {
"baseURL": "https://ollama.com/v1"
},
"models": {
"glm-4.7:cloud": {
"name": "glm-4.7:cloud"
}
}
}
}
}
```
Run `opencode` in a new terminal to load the new settings.