Compare commits

...

2 Commits

Author SHA1 Message Date
Eva Ho
0cd8a0a442 launch: add codex model metadata catalog 2026-04-23 17:09:41 -04:00
Eva H
85ff8e4a21 launch: keep launch recommended models in a fixed canonical order (#15750) 2026-04-23 16:33:00 -04:00
5 changed files with 403 additions and 43 deletions

View File

@@ -1,13 +1,20 @@
package launch
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"slices"
"strconv"
"strings"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/types/model"
"golang.org/x/mod/semver"
)
@@ -32,7 +39,7 @@ func (c *Codex) Run(model string, args []string) error {
return err
}
if err := ensureCodexConfig(); err != nil {
if err := ensureCodexConfig(model); err != nil {
return fmt.Errorf("failed to configure codex: %w", err)
}
@@ -46,9 +53,9 @@ func (c *Codex) Run(model string, args []string) error {
return cmd.Run()
}
// ensureCodexConfig writes a [profiles.ollama-launch] section to ~/.codex/config.toml
// with openai_base_url pointing to the local Ollama server.
func ensureCodexConfig() error {
// ensureCodexConfig writes a Codex profile and model catalog so Codex uses the
// local Ollama server and has model metadata available.
func ensureCodexConfig(modelName string) error {
home, err := os.UserHomeDir()
if err != nil {
return err
@@ -59,13 +66,18 @@ func ensureCodexConfig() error {
return err
}
catalogPath := filepath.Join(codexDir, "model.json")
if err := writeCodexModelCatalog(catalogPath, modelName); err != nil {
return err
}
configPath := filepath.Join(codexDir, "config.toml")
return writeCodexProfile(configPath)
return writeCodexProfile(configPath, catalogPath)
}
// writeCodexProfile ensures ~/.codex/config.toml has the ollama-launch profile
// and model provider sections with the correct base URL.
func writeCodexProfile(configPath string) error {
func writeCodexProfile(configPath, catalogPath string) error {
baseURL := envconfig.Host().String() + "/v1/"
sections := []struct {
@@ -78,6 +90,7 @@ func writeCodexProfile(configPath string) error {
fmt.Sprintf("openai_base_url = %q", baseURL),
`forced_login_method = "api"`,
fmt.Sprintf("model_provider = %q", codexProfileName),
fmt.Sprintf("model_catalog_json = %q", catalogPath),
},
},
{
@@ -121,6 +134,110 @@ func writeCodexProfile(configPath string) error {
return os.WriteFile(configPath, []byte(text), 0o644)
}
func writeCodexModelCatalog(catalogPath, modelName string) error {
entry := buildCodexModelEntry(modelName)
catalog := map[string]any{
"models": []any{entry},
}
data, err := json.MarshalIndent(catalog, "", " ")
if err != nil {
return err
}
return os.WriteFile(catalogPath, data, 0o644)
}
func buildCodexModelEntry(modelName string) map[string]any {
contextWindow := 0
hasVision := false
hasThinking := false
systemPrompt := ""
if l, ok := lookupCloudModelLimit(modelName); ok {
contextWindow = l.Context
}
client := api.NewClient(envconfig.Host(), http.DefaultClient)
resp, err := client.Show(context.Background(), &api.ShowRequest{Model: modelName})
if err == nil {
systemPrompt = resp.System
if slices.Contains(resp.Capabilities, model.CapabilityVision) {
hasVision = true
}
if slices.Contains(resp.Capabilities, model.CapabilityThinking) {
hasThinking = true
}
if !isCloudModelName(modelName) {
if n, ok := modelInfoContextLength(resp.ModelInfo); ok {
contextWindow = n
}
if resp.Details.Format != "safetensors" {
if ctxLen := envconfig.ContextLength(); ctxLen > 0 {
contextWindow = int(ctxLen)
}
if numCtx := parseNumCtx(resp.Parameters); numCtx > 0 {
contextWindow = numCtx
}
}
}
}
modalities := []string{"text"}
if hasVision {
modalities = append(modalities, "image")
}
reasoningLevels := []any{}
if hasThinking {
reasoningLevels = []any{
map[string]any{"effort": "low", "description": "Fast responses with lighter reasoning"},
map[string]any{"effort": "medium", "description": "Balances speed and reasoning depth"},
map[string]any{"effort": "high", "description": "Greater reasoning depth for complex problems"},
}
}
truncationMode := "bytes"
if isCloudModelName(modelName) {
truncationMode = "tokens"
}
return map[string]any{
"slug": modelName,
"display_name": modelName,
"context_window": contextWindow,
"apply_patch_tool_type": "function",
"shell_type": "default",
"visibility": "list",
"supported_in_api": true,
"priority": 0,
"truncation_policy": map[string]any{"mode": truncationMode, "limit": 10000},
"input_modalities": modalities,
"base_instructions": systemPrompt,
"support_verbosity": true,
"default_verbosity": "low",
"supports_parallel_tool_calls": false,
"supports_reasoning_summaries": hasThinking,
"supported_reasoning_levels": reasoningLevels,
"experimental_supported_tools": []any{},
}
}
func parseNumCtx(parameters string) int {
for _, line := range strings.Split(parameters, "\n") {
fields := strings.Fields(line)
if len(fields) == 2 && fields[0] == "num_ctx" {
if v, err := strconv.ParseFloat(fields[1], 64); err == nil {
return int(v)
}
}
}
return 0
}
func checkCodexVersion() error {
if _, err := exec.LookPath("codex"); err != nil {
return fmt.Errorf("codex is not installed, install with: npm install -g @openai/codex")

View File

@@ -1,6 +1,10 @@
package launch
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"slices"
@@ -37,8 +41,9 @@ func TestWriteCodexProfile(t *testing.T) {
t.Run("creates new file when none exists", func(t *testing.T) {
tmpDir := t.TempDir()
configPath := filepath.Join(tmpDir, "config.toml")
catalogPath := filepath.Join(tmpDir, "model.json")
if err := writeCodexProfile(configPath); err != nil {
if err := writeCodexProfile(configPath, catalogPath); err != nil {
t.Fatal(err)
}
@@ -63,6 +68,9 @@ func TestWriteCodexProfile(t *testing.T) {
if !strings.Contains(content, `model_provider = "ollama-launch"`) {
t.Error("missing model_provider key")
}
if !strings.Contains(content, fmt.Sprintf("model_catalog_json = %q", catalogPath)) {
t.Error("missing model_catalog_json key")
}
if !strings.Contains(content, "[model_providers.ollama-launch]") {
t.Error("missing [model_providers.ollama-launch] section")
}
@@ -74,10 +82,11 @@ func TestWriteCodexProfile(t *testing.T) {
t.Run("appends profile to existing file without profile", func(t *testing.T) {
tmpDir := t.TempDir()
configPath := filepath.Join(tmpDir, "config.toml")
catalogPath := filepath.Join(tmpDir, "model.json")
existing := "[some_other_section]\nkey = \"value\"\n"
os.WriteFile(configPath, []byte(existing), 0o644)
if err := writeCodexProfile(configPath); err != nil {
if err := writeCodexProfile(configPath, catalogPath); err != nil {
t.Fatal(err)
}
@@ -95,10 +104,11 @@ func TestWriteCodexProfile(t *testing.T) {
t.Run("replaces existing profile section", func(t *testing.T) {
tmpDir := t.TempDir()
configPath := filepath.Join(tmpDir, "config.toml")
catalogPath := filepath.Join(tmpDir, "model.json")
existing := "[profiles.ollama-launch]\nopenai_base_url = \"http://old:1234/v1/\"\n\n[model_providers.ollama-launch]\nname = \"Ollama\"\nbase_url = \"http://old:1234/v1/\"\n"
os.WriteFile(configPath, []byte(existing), 0o644)
if err := writeCodexProfile(configPath); err != nil {
if err := writeCodexProfile(configPath, catalogPath); err != nil {
t.Fatal(err)
}
@@ -119,10 +129,11 @@ func TestWriteCodexProfile(t *testing.T) {
t.Run("replaces profile while preserving following sections", func(t *testing.T) {
tmpDir := t.TempDir()
configPath := filepath.Join(tmpDir, "config.toml")
catalogPath := filepath.Join(tmpDir, "model.json")
existing := "[profiles.ollama-launch]\nopenai_base_url = \"http://old:1234/v1/\"\n[another_section]\nfoo = \"bar\"\n"
os.WriteFile(configPath, []byte(existing), 0o644)
if err := writeCodexProfile(configPath); err != nil {
if err := writeCodexProfile(configPath, catalogPath); err != nil {
t.Fatal(err)
}
@@ -143,10 +154,11 @@ func TestWriteCodexProfile(t *testing.T) {
t.Run("appends newline to file not ending with newline", func(t *testing.T) {
tmpDir := t.TempDir()
configPath := filepath.Join(tmpDir, "config.toml")
catalogPath := filepath.Join(tmpDir, "model.json")
existing := "[other]\nkey = \"val\""
os.WriteFile(configPath, []byte(existing), 0o644)
if err := writeCodexProfile(configPath); err != nil {
if err := writeCodexProfile(configPath, catalogPath); err != nil {
t.Fatal(err)
}
@@ -166,8 +178,9 @@ func TestWriteCodexProfile(t *testing.T) {
t.Setenv("OLLAMA_HOST", "http://myhost:9999")
tmpDir := t.TempDir()
configPath := filepath.Join(tmpDir, "config.toml")
catalogPath := filepath.Join(tmpDir, "model.json")
if err := writeCodexProfile(configPath); err != nil {
if err := writeCodexProfile(configPath, catalogPath); err != nil {
t.Fatal(err)
}
@@ -185,7 +198,7 @@ func TestEnsureCodexConfig(t *testing.T) {
tmpDir := t.TempDir()
setTestHome(t, tmpDir)
if err := ensureCodexConfig(); err != nil {
if err := ensureCodexConfig("llama3.2"); err != nil {
t.Fatal(err)
}
@@ -202,16 +215,25 @@ func TestEnsureCodexConfig(t *testing.T) {
if !strings.Contains(content, "openai_base_url") {
t.Error("missing openai_base_url key")
}
catalogPath := filepath.Join(tmpDir, ".codex", "model.json")
data, err = os.ReadFile(catalogPath)
if err != nil {
t.Fatalf("model.json not created: %v", err)
}
if !strings.Contains(string(data), `"slug": "llama3.2"`) {
t.Error("missing model catalog entry for selected model")
}
})
t.Run("is idempotent", func(t *testing.T) {
tmpDir := t.TempDir()
setTestHome(t, tmpDir)
if err := ensureCodexConfig(); err != nil {
if err := ensureCodexConfig("llama3.2"); err != nil {
t.Fatal(err)
}
if err := ensureCodexConfig(); err != nil {
if err := ensureCodexConfig("llama3.2"); err != nil {
t.Fatal(err)
}
@@ -227,3 +249,204 @@ func TestEnsureCodexConfig(t *testing.T) {
}
})
}
func TestParseNumCtx(t *testing.T) {
tests := []struct {
name string
parameters string
want int
}{
{"num_ctx set", "num_ctx 8192", 8192},
{"num_ctx with other params", "temperature 0.7\nnum_ctx 4096\ntop_p 0.9", 4096},
{"no num_ctx", "temperature 0.7\ntop_p 0.9", 0},
{"empty string", "", 0},
{"malformed value", "num_ctx abc", 0},
{"float value", "num_ctx 8192.0", 8192},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := parseNumCtx(tt.parameters); got != tt.want {
t.Errorf("parseNumCtx(%q) = %d, want %d", tt.parameters, got, tt.want)
}
})
}
}
func TestModelInfoContextLength(t *testing.T) {
tests := []struct {
name string
modelInfo map[string]any
want int
}{
{"float64 value", map[string]any{"qwen3_5_moe.context_length": float64(262144)}, 262144},
{"int value", map[string]any{"llama.context_length": 131072}, 131072},
{"no context_length key", map[string]any{"llama.embedding_length": float64(4096)}, 0},
{"empty map", map[string]any{}, 0},
{"nil map", nil, 0},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, _ := modelInfoContextLength(tt.modelInfo)
if got != tt.want {
t.Errorf("modelInfoContextLength() = %d, want %d", got, tt.want)
}
})
}
}
func TestBuildCodexModelEntryContextWindow(t *testing.T) {
tests := []struct {
name string
modelName string
showResponse string
envContextLen string
wantContext int
}{
{
name: "architectural context length as fallback",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"details": {"format": "gguf"}
}`,
wantContext: 131072,
},
{
name: "OLLAMA_CONTEXT_LENGTH overrides architectural",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"details": {"format": "gguf"}
}`,
envContextLen: "64000",
wantContext: 64000,
},
{
name: "num_ctx overrides OLLAMA_CONTEXT_LENGTH",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"parameters": "num_ctx 8192",
"details": {"format": "gguf"}
}`,
envContextLen: "64000",
wantContext: 8192,
},
{
name: "num_ctx overrides architectural",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"parameters": "num_ctx 32768",
"details": {"format": "gguf"}
}`,
wantContext: 32768,
},
{
name: "safetensors uses architectural context only",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"parameters": "num_ctx 8192",
"details": {"format": "safetensors"}
}`,
envContextLen: "64000",
wantContext: 131072,
},
{
name: "cloud model uses hardcoded limits",
modelName: "qwen3.5:cloud",
showResponse: `{
"model_info": {"qwen3_5_moe.context_length": 131072},
"details": {"format": "gguf"}
}`,
envContextLen: "64000",
wantContext: 262144,
},
{
name: "vision and thinking capabilities",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"details": {"format": "gguf"},
"capabilities": ["vision", "thinking"]
}`,
wantContext: 131072,
},
{
name: "system prompt passed through",
modelName: "llama3.2",
showResponse: `{
"model_info": {"llama.context_length": 131072},
"details": {"format": "gguf"},
"system": "You are a helpful assistant."
}`,
wantContext: 131072,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/api/show":
fmt.Fprint(w, tt.showResponse)
default:
http.NotFound(w, r)
}
}))
defer srv.Close()
t.Setenv("OLLAMA_HOST", srv.URL)
if tt.envContextLen != "" {
t.Setenv("OLLAMA_CONTEXT_LENGTH", tt.envContextLen)
} else {
t.Setenv("OLLAMA_CONTEXT_LENGTH", "")
}
entry := buildCodexModelEntry(tt.modelName)
gotContext, _ := entry["context_window"].(int)
if gotContext != tt.wantContext {
t.Errorf("context_window = %d, want %d", gotContext, tt.wantContext)
}
if tt.name == "vision and thinking capabilities" {
modalities, _ := entry["input_modalities"].([]string)
if !slices.Contains(modalities, "image") {
t.Error("expected image in input_modalities")
}
levels, _ := entry["supported_reasoning_levels"].([]any)
if len(levels) == 0 {
t.Error("expected non-empty supported_reasoning_levels")
}
}
if tt.name == "system prompt passed through" {
if got, _ := entry["base_instructions"].(string); got != "You are a helpful assistant." {
t.Errorf("base_instructions = %q, want %q", got, "You are a helpful assistant.")
}
}
if tt.name == "cloud model uses hardcoded limits" {
truncationPolicy, _ := entry["truncation_policy"].(map[string]any)
if mode, _ := truncationPolicy["mode"].(string); mode != "tokens" {
t.Errorf("truncation_policy mode = %q, want %q", mode, "tokens")
}
}
requiredKeys := []string{"slug", "display_name", "apply_patch_tool_type", "shell_type"}
for _, key := range requiredKeys {
if _, ok := entry[key]; !ok {
t.Errorf("missing required key %q", key)
}
}
if _, err := json.Marshal(entry); err != nil {
t.Errorf("entry is not JSON serializable: %v", err)
}
})
}
}

View File

@@ -318,10 +318,18 @@ func names(items []ModelItem) []string {
return out
}
func recommendedNames(extra ...string) []string {
out := make([]string, 0, len(recommendedModels)+len(extra))
for _, item := range recommendedModels {
out = append(out, item.Name)
}
return append(out, extra...)
}
func TestBuildModelList_NoExistingModels(t *testing.T) {
items, _, _, _ := buildModelList(nil, nil, "")
want := []string{"kimi-k2.6:cloud", "qwen3.5:cloud", "glm-5.1:cloud", "minimax-m2.7:cloud", "gemma4", "qwen3.5"}
want := recommendedNames()
if diff := cmp.Diff(want, names(items)); diff != "" {
t.Errorf("with no existing models, items should be recommended in order (-want +got):\n%s", diff)
}
@@ -350,7 +358,7 @@ func TestBuildModelList_OnlyLocalModels_CloudRecsStillFirst(t *testing.T) {
// Cloud recs always come first among recommended, regardless of installed inventory.
// Cloud disablement is handled upstream in loadSelectableModels via filterCloudItems.
want := []string{"kimi-k2.6:cloud", "qwen3.5:cloud", "glm-5.1:cloud", "minimax-m2.7:cloud", "gemma4", "qwen3.5", "llama3.2", "qwen2.5"}
want := recommendedNames("llama3.2", "qwen2.5")
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("cloud recs pinned first even when no cloud models installed (-want +got):\n%s", diff)
}
@@ -366,13 +374,13 @@ func TestBuildModelList_BothCloudAndLocal_RegularSort(t *testing.T) {
got := names(items)
// All recs pinned at top (cloud before local in mixed case), then non-recs
want := []string{"kimi-k2.6:cloud", "qwen3.5:cloud", "glm-5.1:cloud", "minimax-m2.7:cloud", "gemma4", "qwen3.5", "llama3.2"}
want := recommendedNames("llama3.2")
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("recs pinned at top, cloud recs first in mixed case (-want +got):\n%s", diff)
}
}
func TestBuildModelList_PreCheckedFirst(t *testing.T) {
func TestBuildModelList_PreCheckedNonRecommendedFirstInMore(t *testing.T) {
existing := []modelInfo{
{Name: "llama3.2:latest", Remote: false},
{Name: "glm-5.1:cloud", Remote: true},
@@ -381,8 +389,9 @@ func TestBuildModelList_PreCheckedFirst(t *testing.T) {
items, _, _, _ := buildModelList(existing, []string{"llama3.2"}, "")
got := names(items)
if got[0] != "llama3.2" {
t.Errorf("pre-checked model should be first, got %v", got)
want := recommendedNames("llama3.2")
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("recommended block should stay fixed while checked non-recommended models lead More (-want +got):\n%s", diff)
}
}
@@ -457,7 +466,7 @@ func TestBuildModelList_ExistingCloudModelsNotPushedToBottom(t *testing.T) {
// gemma4 and glm-5.1:cloud are installed so they sort normally;
// qwen3.5:cloud and qwen3.5 are not installed so they go to the bottom
// All recs: cloud first in mixed case, then local, in rec order within each
want := []string{"kimi-k2.6:cloud", "qwen3.5:cloud", "glm-5.1:cloud", "minimax-m2.7:cloud", "gemma4", "qwen3.5"}
want := recommendedNames()
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("all recs, cloud first in mixed case (-want +got):\n%s", diff)
}
@@ -475,7 +484,7 @@ func TestBuildModelList_HasRecommendedCloudModel_OnlyNonInstalledAtBottom(t *tes
// kimi-k2.6:cloud is installed so it sorts normally;
// the rest of the recommendations are not installed so they go to the bottom
// All recs pinned at top (cloud first in mixed case), then non-recs
want := []string{"kimi-k2.6:cloud", "qwen3.5:cloud", "glm-5.1:cloud", "minimax-m2.7:cloud", "gemma4", "qwen3.5", "llama3.2"}
want := recommendedNames("llama3.2")
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("recs pinned at top, cloud first in mixed case (-want +got):\n%s", diff)
}
@@ -641,17 +650,32 @@ func TestBuildModelList_RecsAboveNonRecs(t *testing.T) {
}
}
func TestBuildModelList_CheckedBeforeRecs(t *testing.T) {
func TestBuildModelList_CheckedRecommendedDoesNotReshuffleRecommendedOrder(t *testing.T) {
existing := []modelInfo{
{Name: "llama3.2:latest", Remote: false},
{Name: "glm-5.1:cloud", Remote: true},
}
items, _, _, _ := buildModelList(existing, []string{"llama3.2"}, "")
items, _, _, _ := buildModelList(existing, []string{"qwen3.5:cloud", "glm-5.1:cloud"}, "")
got := names(items)
if got[0] != "llama3.2" {
t.Errorf("checked model should be first even before recs, got %v", got)
want := recommendedNames("llama3.2")
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("checked recommended models should not reshuffle the fixed recommended order (-want +got):\n%s", diff)
}
}
func TestBuildModelList_StaleSavedKimiK25DoesNotReshuffleRecommendedOrder(t *testing.T) {
existing := []modelInfo{
{Name: "kimi-k2.5:cloud", Remote: true},
}
items, _, _, _ := buildModelList(existing, []string{"kimi-k2.5:cloud", "qwen3.5:cloud", "glm-5.1:cloud", "minimax-m2.7:cloud"}, "kimi-k2.5:cloud")
got := names(items)
want := recommendedNames("kimi-k2.5:cloud")
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("stale saved kimi-k2.5 should stay in More without reshuffling the fixed recommended order (-want +got):\n%s", diff)
}
}

View File

@@ -13,6 +13,7 @@ import (
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/ollama/ollama/cmd/config"
)
@@ -1219,8 +1220,9 @@ func TestLaunchIntegration_EditorForceConfigure_FloatsCheckedModelsInPicker(t *t
if len(gotItems) == 0 {
t.Fatal("expected multi selector to receive items")
}
if gotItems[0] != "qwen3.5:cloud" {
t.Fatalf("expected checked models floated to top with qwen3.5:cloud first, got %v", gotItems)
wantItems := recommendedNames()
if diff := cmp.Diff(wantItems, gotItems); diff != "" {
t.Fatalf("expected fixed recommended order in selector items (-want +got):\n%s", diff)
}
if len(gotPreChecked) < 2 {
t.Fatalf("expected prechecked models to be preserved, got %v", gotPreChecked)

View File

@@ -361,18 +361,12 @@ func buildModelList(existing []modelInfo, preChecked []string, current string) (
}
if hasLocalModel || hasCloudModel {
// Keep the Recommended section pinned to recommendedModels order. Checked
// and default-model priority only apply within the More section.
slices.SortStableFunc(items, func(a, b ModelItem) int {
ac, bc := checked[a.Name], checked[b.Name]
aNew, bNew := notInstalled[a.Name], notInstalled[b.Name]
aRec, bRec := recRank[a.Name] > 0, recRank[b.Name] > 0
aCloud, bCloud := cloudModels[a.Name], cloudModels[b.Name]
if ac != bc {
if ac {
return -1
}
return 1
}
if aRec != bRec {
if aRec {
return -1
@@ -380,14 +374,14 @@ func buildModelList(existing []modelInfo, preChecked []string, current string) (
return 1
}
if aRec && bRec {
if aCloud != bCloud {
if aCloud {
return -1
}
return 1
}
return recRank[a.Name] - recRank[b.Name]
}
if ac != bc {
if ac {
return -1
}
return 1
}
// Among checked non-recommended items - put the default first
if ac && !aRec && current != "" {
aCurrent := a.Name == current