diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000000..d10e7b1c8c8f --- /dev/null +++ b/.gitmodules @@ -0,0 +1,15 @@ +[submodule "references/codex"] + path = references/codex + url = https://github.com/openai/codex +[submodule "references/cline"] + path = references/cline + url = https://github.com/cline/cline +[submodule "references/awesome-opencode"] + path = references/awesome-opencode + url = https://github.com/awesome-opencode/awesome-opencode +[submodule "references/OpenGUI"] + path = references/OpenGUI + url = https://github.com/akemmanuel/OpenGUI +[submodule "references/AionUi"] + path = references/AionUi + url = https://github.com/iOfficeAI/AionUi diff --git a/AI_REVIEW.md b/AI_REVIEW.md new file mode 100644 index 000000000000..bc3462933800 --- /dev/null +++ b/AI_REVIEW.md @@ -0,0 +1,1428 @@ +# 🔍 Code Review - 2/26/2026, 4:03:24 PM + +**Project:** AI Visual Code Review +**Generated by:** AI Visual Code Review v2.0 + +## 📊 Change Summary + +``` +docs/09-temp/codex-queue-steer-architecture.md | 319 +++++++++++++++++++++ + packages/app/src/components/prompt-input.tsx | 256 ++++++++++++++--- + .../app/src/context/global-sync/child-store.ts | 1 + + .../src/context/global-sync/event-reducer.test.ts | 1 + + .../app/src/context/global-sync/event-reducer.ts | 9 +- + packages/app/src/context/global-sync/types.ts | 3 + + packages/opencode/src/server/routes/session.ts | 117 ++++++++ + packages/opencode/src/session/prompt.ts | 51 ++++ + packages/opencode/src/session/steer.ts | 127 ++++++++ + packages/opencode/test/session/steer.test.ts | 235 +++++++++++++++ + 10 files changed, 1084 insertions(+), 35 deletions(-) +``` + +## 📝 Files Changed (10 selected) + + +### ✨ `docs/09-temp/codex-queue-steer-architecture.md` **[ADDED]** + +**Status:** ✅ **NEW FILE** - This file has been newly created + +**Type:** Documentation 📖 + +```diff +@@ -0,0 +1,319 @@ + 1 +# Codex Queue/Steer Architecture Analysis + 2 + + 3 +> Deep-dive into OpenAI Codex CLI's queue/steer mechanism for mid-turn user interaction. + 4 +> Source: `references/codex/` submodule + 5 + + 6 +--- + 7 + + 8 +## Overview + 9 + + 10 +Codex implements a **dual-input model** that lets users interact with the agent **during** an active turn, not just between turns: + 11 + + 12 +| Action | Keybinding | Behavior | When Turn Active | + 13 +|--------|-----------|----------|-----------------| + 14 +| **Queue** | `Enter` | Enqueue message for next turn boundary | Message waits in queue, displayed in UI | + 15 +| **Steer** | `⌘Enter` / `Enter` (steer-mode) | Inject input into active turn immediately | Message sent to model in current context | + 16 + + 17 +--- + 18 + + 19 +## Architecture Layers + 20 + + 21 +``` + 22 +┌─────────────────────────────────────────────────┐ + 23 +│ TUI Layer (tui/src/) │ + 24 +│ ┌─────────────────────────────────────────┐ │ + 25 +│ │ ChatComposer │ │ + 26 +│ │ Enter → InputResult::Submitted (steer) │ │ + 27 +│ │ Tab → InputResult::Queued │ │ + 28 +│ └─────────────────┬───────────────────────┘ │ + 29 +│ │ │ + 30 +│ ┌─────────────────▼───────────────────────┐ │ + 31 +│ │ QueuedUserMessages widget │ │ + 32 +│ │ Shows queued messages with "↳" prefix │ │ + 33 +│ │ Alt+Up to pop back into composer │ │ + 34 +│ └─────────────────────────────────────────┘ │ + 35 +└────────────────────┬────────────────────────────┘ + 36 + │ + 37 +┌────────────────────▼────────────────────────────┐ + 38 +│ App Server Protocol (app-server-protocol/) │ + 39 +│ │ + 40 +│ turn/start → TurnStartParams (new turn) │ + 41 +│ turn/steer → TurnSteerParams (mid-turn) │ + 42 +│ │ + 43 +│ TurnSteerParams { │ + 44 +│ thread_id: String, │ + 45 +│ input: Vec, │ + 46 +│ expected_turn_id: String, // guard │ + 47 +│ } │ + 48 +│ │ + 49 +│ TurnSteerResponse { │ + 50 +│ turn_id: String, // confirms active turn │ + 51 +│ } │ + 52 +└────────────────────┬────────────────────────────┘ + 53 + │ + 54 +┌────────────────────▼────────────────────────────┐ + 55 +│ App Server (app-server/src/) │ + 56 +│ codex_message_processor.rs │ + 57 +│ │ + 58 +│ async fn turn_steer(&self, req_id, params) { │ + 59 +│ let thread = load_thread(params.thread_id); │ + 60 +│ thread.steer_input( │ + 61 +│ mapped_items, │ + 62 +│ Some(¶ms.expected_turn_id) │ + 63 +│ ); │ + 64 +│ // Returns turn_id or error: │ + 65 +│ // "no active turn to steer" │ + 66 +│ } │ + 67 +└────────────────────┬────────────────────────────┘ + 68 + │ + 69 +┌────────────────────▼────────────────────────────┐ + 70 +│ Core Engine (core/src/codex.rs) │ + 71 +│ │ + 72 +│ Session::steer_input(input, expected_turn_id) │ + 73 +│ 1. Validate input not empty │ + 74 +│ 2. Lock active_turn mutex │ + 75 +│ 3. Verify active turn exists │ + 76 +│ 4. Check expected_turn_id matches │ + 77 +│ 5. Lock turn_state │ + 78 +│ 6. push_pending_input(input) ← KEY STEP │ + 79 +│ 7. Return active turn_id │ + 80 +└────────────────────┬────────────────────────────┘ + 81 + │ + 82 +┌────────────────────▼────────────────────────────┐ + 83 +│ Turn State (core/src/state/turn.rs) │ + 84 +│ │ + 85 +│ struct TurnState { │ + 86 +│ pending_input: Vec, │ + 87 +│ } │ + 88 +│ │ + 89 +│ push_pending_input(item) → appends to vec │ + 90 +│ take_pending_input() → drains vec │ + 91 +│ has_pending_input() → checks non-empty │ + 92 +└────────────────────┬────────────────────────────┘ + 93 + │ + 94 +┌────────────────────▼────────────────────────────┐ + 95 +│ Task Loop (core/src/codex.rs ~L4970) │ + 96 +│ │ + 97 +│ loop { │ + 98 +│ // At each iteration, drain pending input │ + 99 +│ let pending = sess.get_pending_input().await; │ + 100 +│ if !pending.is_empty() { │ + 101 +│ // Record as conversation items │ + 102 +│ // → injected into model context │ + 103 +│ for item in pending { │ + 104 +│ record_user_prompt_and_emit_turn_item(); │ + 105 +│ } │ + 106 +│ } │ + 107 +│ // ... send to model, process response ... │ + 108 +│ // On ResponseEvent::Completed: │ + 109 +│ needs_follow_up |= has_pending_input(); │ + 110 +│ // If follow_up needed → loop continues │ + 111 +│ } │ + 112 +└─────────────────────────────────────────────────┘ + 113 +``` + 114 + + 115 +--- + 116 + + 117 +## Core Mechanism: `steer_input` + 118 + + 119 +The heart of steer is `Session::steer_input()` in `core/src/codex.rs`: + 120 + + 121 +```rust + 122 +pub async fn steer_input( + 123 + &self, + 124 + input: Vec, + 125 + expected_turn_id: Option<&str>, + 126 +) -> Result { + 127 + if input.is_empty() { + 128 + return Err(SteerInputError::EmptyInput); + 129 + } + 130 + let mut active = self.active_turn.lock().await; + 131 + let Some(active_turn) = active.as_mut() else { + 132 + return Err(SteerInputError::NoActiveTurn(input)); + 133 + }; + 134 + let Some((active_turn_id, _)) = active_turn.tasks.first() else { + 135 + return Err(SteerInputError::NoActiveTurn(input)); + 136 + }; + 137 + if let Some(expected_turn_id) = expected_turn_id + 138 + && expected_turn_id != active_turn_id + 139 + { + 140 + return Err(SteerInputError::ExpectedTurnMismatch { + 141 + expected: expected_turn_id.to_string(), + 142 + actual: active_turn_id.clone(), + 143 + }); + 144 + } + 145 + let mut turn_state = active_turn.turn_state.lock().await; + 146 + turn_state.push_pending_input(input.into()); + 147 + Ok(active_turn_id.clone()) + 148 +} + 149 +``` + 150 + + 151 +### Key Design Decisions + 152 + + 153 +1. **Non-blocking injection**: `steer_input` just pushes to a `Vec` — it doesn't interrupt or cancel the model. The model's active response completes naturally. + 154 + + 155 +2. **Consumption at loop boundary**: The task loop checks `pending_input` at the **top of each iteration**. After the model finishes a response, if pending input exists, it gets recorded as conversation items and the model is called again with the updated context. + 156 + + 157 +3. **`needs_follow_up` flag**: When a model response completes (`ResponseEvent::Completed`), if there's pending input, the loop sets `needs_follow_up = true` and continues instead of ending the turn. + 158 + + 159 +4. **Turn ID validation**: The `expected_turn_id` field prevents race conditions — the steer request fails if the turn has changed between the user pressing Enter and the server processing the request. + 160 + + 161 +--- + 162 + + 163 +## Error Types + 164 + + 165 +```rust + 166 +pub enum SteerInputError { + 167 + NoActiveTurn(Vec), // No model turn running + 168 + ExpectedTurnMismatch { // Turn changed since request + 169 + expected: String, + 170 + actual: String, + 171 + }, + 172 + EmptyInput, // Nothing to inject + 173 +} + 174 +``` + 175 + + 176 +When `NoActiveTurn` occurs, the app-server falls back — the input that failed to steer gets queued for the next `turn/start`. + 177 + + 178 +--- + 179 + + 180 +## Queue vs Steer: Detailed Comparison + 181 + + 182 +### Queue (Tab / Enter in legacy mode) + 183 + + 184 +1. User types message, presses Tab (or Enter in non-steer mode) + 185 +2. TUI returns `InputResult::Queued { text, text_elements }` + 186 +3. Message stored in `QueuedUserMessages.messages: Vec` + 187 +4. Rendered in UI with `↳` prefix, dimmed/italic + 188 +5. User can pop with Alt+Up to edit + 189 +6. When current turn completes → queued messages become the next `turn/start` + 190 + + 191 +### Steer (Enter in steer mode / ⌘Enter) + 192 + + 193 +1. User types message, presses Enter + 194 +2. TUI returns `InputResult::Submitted { text, text_elements }` + 195 +3. App sends `turn/steer` RPC to server + 196 +4. Server calls `thread.steer_input()` → pushes to `pending_input` + 197 +5. Model's current response continues to completion + 198 +6. At next task loop iteration, pending input is drained and recorded + 199 +7. Model sees the user's steer message in context → generates follow-up + 200 +8. **All within the same turn** — no new turn boundary + 201 + + 202 +### Critical Difference + 203 + + 204 +| Aspect | Queue | Steer | + 205 +|--------|-------|-------| + 206 +| **Timing** | After turn ends | During active turn | + 207 +| **Turn boundary** | Creates new turn | Same turn continues | + 208 +| **Model sees it** | On next turn start | At next loop iteration | + 209 +| **Cancels response** | No (waits) | No (appends to context) | + 210 +| **UI display** | Queued messages widget | Injected into chat transcript | + 211 +| **Fallback** | N/A | Falls back to queue if no active turn | + 212 + + 213 +--- + 214 + + 215 +## Turn Lifecycle with Steer + 216 + + 217 +``` + 218 +Turn Start (user submits prompt) + 219 + │ + 220 + ├─→ Model generates response... + 221 + │ │ + 222 + │ │ ← User presses Enter (steer) + 223 + │ │ → steer_input() pushes to pending_input + 224 + │ │ + 225 + │ ▼ + 226 + │ Response completes + 227 + │ │ + 228 + │ ├─→ has_pending_input()? YES + 229 + │ │ → needs_follow_up = true + 230 + │ │ + 231 + │ ▼ + 232 + │ Loop continues → drain pending_input + 233 + │ → Record steered message as conversation item + 234 + │ → Model sees: [original prompt, response, steered message] + 235 + │ → Model generates new response with full context + 236 + │ │ + 237 + │ ├─→ has_pending_input()? NO + 238 + │ │ → needs_follow_up = false + 239 + │ ▼ + 240 + │ Turn Complete + 241 + │ + 242 + └─→ Queued messages (if any) → next turn/start + 243 +``` + 244 + + 245 +--- + 246 + + 247 +## Turn Completion & Leftover Input + 248 + + 249 +When a task finishes (`task_finished()` in `core/src/tasks/mod.rs`): + 250 + + 251 +```rust + 252 +// 1. Lock active turn + 253 +let mut active = self.active_turn.lock().await; + 254 +// 2. Take any remaining pending input + 255 +let pending_input = ts.take_pending_input(); + 256 +// 3. Clear active turn + 257 +*active = None; + 258 +// 4. Record leftover input as conversation items + 259 +if !pending_input.is_empty() { + 260 + record_conversation_items(&turn_context, &pending_response_items); + 261 +} + 262 +// 5. Emit TurnComplete event + 263 +``` + 264 + + 265 +This ensures steered input is **never lost** — even if the turn ends before the pending input could be consumed by the model loop. + 266 + + 267 +--- + 268 + + 269 +## Feature Flag: `steer_enabled` + 270 + + 271 +Steer is gated behind `Feature::Steer` in the TUI: + 272 + + 273 +```rust + 274 +// When steer_enabled == true: + 275 +// Enter → Submitted (steer immediately) + 276 +// Tab → Queued (wait for turn end) + 277 +// + 278 +// When steer_enabled == false (legacy): + 279 +// Enter → Queued + 280 +// Tab → Queued + 281 +``` + 282 + + 283 +--- + 284 + + 285 +## Implications for OpenCode + 286 + + 287 +### What OpenCode Currently Has + 288 +- Session/turn model with `processor.ts` handling model interaction + 289 +- Parallel agents via `task.ts` tool + 290 +- No mid-turn input injection + 291 + + 292 +### What Queue/Steer Would Add + 293 +1. **Pending input buffer** on the session/turn state + 294 +2. **Steer RPC** that pushes to the buffer while model is running + 295 +3. **Loop-boundary drain** that checks for pending input after each model response + 296 +4. **Follow-up continuation** instead of ending the turn when input is pending + 297 +5. **UI queue widget** showing messages waiting for the current turn to finish + 298 +6. **Fallback path**: steer → queue if no active turn + 299 + + 300 +### Key Implementation Points + 301 +- `steer_input()` is a **lock-based, non-cancelling** approach — it doesn't abort the model stream + 302 +- Pending input is consumed at the **top of the agentic loop**, not mid-stream + 303 +- The model sees steered input as additional conversation items on its next iteration + 304 +- `expected_turn_id` prevents stale steer requests from affecting wrong turns + 305 +- Queued messages are a purely UI-side concept until they become a `turn/start` + 306 + + 307 +--- + 308 + + 309 +## References + 310 + + 311 +- Protocol types: `codex-rs/app-server-protocol/src/protocol/v2.rs` + 312 +- Core steer: `codex-rs/core/src/codex.rs` (L3377-3406) + 313 +- Turn state: `codex-rs/core/src/state/turn.rs` (L77-163) + 314 +- Task loop drain: `codex-rs/core/src/codex.rs` (L4970-5000) + 315 +- Follow-up flag: `codex-rs/core/src/codex.rs` (L6364) + 316 +- Task completion: `codex-rs/core/src/tasks/mod.rs` (L190-230) + 317 +- App server handler: `codex-rs/app-server/src/codex_message_processor.rs` + 318 +- TUI queue widget: `codex-rs/tui/src/bottom_pane/queued_user_messages.rs` + 319 +- TUI composer: `codex-rs/tui/src/public_widgets/composer_input.rs` + 320 + +``` + + +### 📄 `packages/app/src/components/prompt-input.tsx` + +**Type:** TypeScript React Component ⚛️ + +```diff +@@ -1,6 +1,6 @@ + 1 1 import { useFilteredList } from "@opencode-ai/ui/hooks" + 2 2 import { showToast } from "@opencode-ai/ui/toast" + 3 -import { createEffect, on, Component, Show, onCleanup, Switch, Match, createMemo, createSignal } from "solid-js" + 3 +import { createEffect, on, Component, Show, For, onCleanup, Switch, Match, createMemo, createSignal } from "solid-js" + 4 4 import { createStore } from "solid-js/store" + 5 5 import { createFocusSignal } from "@solid-primitives/active-element" + 6 6 import { useLocal } from "@/context/local" +@@ -215,6 +215,7 @@ export const PromptInput: Component = (props) => { +215 215 }, +216 216 ) +217 217 const working = createMemo(() => status()?.type !== "idle") + 218 + const steerQueue = createMemo(() => sync.data.steer_queue[params.id ?? ""] ?? []) +218 219 const imageAttachments = createMemo(() => +219 220 prompt.current().filter((part): part is ImageAttachmentPart => part.type === "image"), +220 221 ) +@@ -987,9 +988,37 @@ export const PromptInput: Component = (props) => { +987 988 } +988 989 } +989 990 +990 - // Handle Shift+Enter BEFORE IME check - Shift+Enter is never used for IME input +991 - // and should always insert a newline regardless of composition state + 991 + // Handle Shift+Enter: when working with text, send as "steer" (inject mid-turn) +992 992 if (event.key === "Enter" && event.shiftKey) { + 993 + if (working() && params.id && store.mode === "normal") { + 994 + const text = prompt + 995 + .current() + 996 + .map((p) => ("content" in p ? p.content : "")) + 997 + .join("") + 998 + .trim() + 999 + if (text.length > 0) { + 1000 + event.preventDefault() + 1001 + const sessionID = params.id + 1002 + fetch(`${sdk.url}/session/${sessionID}/steer`, { + 1003 + method: "POST", + 1004 + headers: { "Content-Type": "application/json" }, + 1005 + body: JSON.stringify({ text, mode: "steer" }), + 1006 + }).catch(() => { + 1007 + showToast({ + 1008 + title: "Failed to steer", + 1009 + description: "Could not inject message into current turn", + 1010 + }) + 1011 + }) + 1012 + prompt.reset() + 1013 + clearEditor() + 1014 + showToast({ + 1015 + title: "Steering", + 1016 + description: "Will be injected at the next step of the current turn", + 1017 + }) + 1018 + return + 1019 + } + 1020 + } + 1021 + // Default: insert newline when not working +993 1022 addPart({ type: "text", content: "\n", start: 0, end: 0 }) +994 1023 event.preventDefault() +995 1024 return +@@ -1056,6 +1085,38 @@ export const PromptInput: Component = (props) => { +1056 1085 +1057 1086 // Note: Shift+Enter is handled earlier, before IME check +1058 1087 if (event.key === "Enter" && !event.shiftKey) { + 1088 + // When busy: Enter queues a steer message instead of normal submit + 1089 + if (working() && params.id && store.mode === "normal") { + 1090 + const text = prompt + 1091 + .current() + 1092 + .map((p) => ("content" in p ? p.content : "")) + 1093 + .join("") + 1094 + .trim() + 1095 + if (text.length > 0) { + 1096 + event.preventDefault() + 1097 + const sessionID = params.id + 1098 + fetch(`${sdk.url}/session/${sessionID}/steer`, { + 1099 + method: "POST", + 1100 + headers: { "Content-Type": "application/json" }, + 1101 + body: JSON.stringify({ text }), + 1102 + }).catch(() => { + 1103 + showToast({ + 1104 + title: "Failed to queue message", + 1105 + description: "Could not steer the session", + 1106 + }) + 1107 + }) + 1108 + prompt.reset() + 1109 + clearEditor() + 1110 + showToast({ + 1111 + title: "Message queued", + 1112 + description: "Will be injected when the model finishes its current step", + 1113 + }) + 1114 + return + 1115 + } + 1116 + // Empty text while working → abort + 1117 + abort() + 1118 + return + 1119 + } +1059 1120 handleSubmit(event) +1060 1121 } +1061 1122 } +@@ -1113,6 +1174,35 @@ export const PromptInput: Component = (props) => { +1113 1174 onRemove={removeImageAttachment} +1114 1175 removeLabel={language.t("prompt.attachment.remove")} +1115 1176 /> + 1177 + 0}> + 1178 +
+ 1179 +
+ 1180 + + 1181 + Queued ({steerQueue().length}) + 1182 +
+ 1183 + + 1184 + {(item) => ( + 1185 +
+ 1186 + {item.text} + 1187 + + 1201 +
+ 1202 + )} + 1203 +
+ 1204 +
+ 1205 +
+1116 1206
{ +@@ -1206,37 +1296,135 @@ export const PromptInput: Component = (props) => { +1206 1296 +1207 1297 +1208 1298 +1209 - +1214 - +1215 -
+1216 - {language.t("prompt.action.stop")} +1217 - {language.t("common.key.esc")} +1218 -
+1219 -
+1220 - +1221 -
+1222 - {language.t("prompt.action.send")} +1223 - +1224 -
+1225 -
+1226 - +1227 - } +1228 - > +1229 - +1239 -
+ 1299 + + 1300 + + 1301 +
+ 1302 + + 1306 +
+ 1307 + Queue + 1308 + + 1309 +
+ 1310 + Send after current response finishes + 1311 +
+ 1312 + } + 1313 + > + 1314 + { + 1322 + const text = prompt + 1323 + .current() + 1324 + .map((p) => ("content" in p ? p.content : "")) + 1325 + .join("") + 1326 + .trim() + 1327 + if (!text || !params.id) return + 1328 + fetch(`${sdk.url}/session/${params.id}/steer`, { + 1329 + method: "POST", + 1330 + headers: { "Content-Type": "application/json" }, + 1331 + body: JSON.stringify({ text, mode: "queue" }), + 1332 + }).catch(() => { + 1333 + showToast({ + 1334 + title: "Failed to queue message", + 1335 + description: "Could not queue the message", + 1336 + }) + 1337 + }) + 1338 + prompt.reset() + 1339 + clearEditor() + 1340 + showToast({ + 1341 + title: "Message queued", + 1342 + description: "Will be sent when the model finishes its current response", + 1343 + }) + 1344 + }} + 1345 + /> + 1346 + + 1347 + + 1351 +
+ 1352 + Steer + 1353 + ⇧⏎ + 1354 +
+ 1355 + Inject into current turn at next step + 1356 +
+ 1357 + } + 1358 + > + 1359 + { + 1367 + const text = prompt + 1368 + .current() + 1369 + .map((p) => ("content" in p ? p.content : "")) + 1370 + .join("") + 1371 + .trim() + 1372 + if (!text || !params.id) return + 1373 + fetch(`${sdk.url}/session/${params.id}/steer`, { + 1374 + method: "POST", + 1375 + headers: { "Content-Type": "application/json" }, + 1376 + body: JSON.stringify({ text, mode: "steer" }), + 1377 + }).catch(() => { + 1378 + showToast({ + 1379 + title: "Failed to steer", + 1380 + description: "Could not inject message into current turn", + 1381 + }) + 1382 + }) + 1383 + prompt.reset() + 1384 + clearEditor() + 1385 + showToast({ + 1386 + title: "Steering", + 1387 + description: "Will be injected at the next step of the current turn", + 1388 + }) + 1389 + }} + 1390 + /> + 1391 + + 1392 + + 1393 + + 1394 + + 1395 + + 1400 + + 1401 +
+ 1402 + {language.t("prompt.action.stop")} + 1403 + {language.t("common.key.esc")} + 1404 +
+ 1405 +
+ 1406 + + 1407 +
+ 1408 + {language.t("prompt.action.send")} + 1409 + + 1410 +
+ 1411 +
+ 1412 + + 1413 + } + 1414 + > + 1415 + + 1425 +
+ 1426 +
+ 1427 + +1240 1428 +1241 1429 +1242 1430 +1243 1431 + +``` + + +### 📄 `packages/app/src/context/global-sync/child-store.ts` + +**Type:** TypeScript Source File 📘 + +```diff +@@ -167,6 +167,7 @@ export function createChildStoreManager(input: { +167 167 session: [], +168 168 sessionTotal: 0, +169 169 session_status: {}, + 170 + steer_queue: {}, +170 171 session_diff: {}, +171 172 todo: {}, +172 173 permission: {}, +173 174 + +``` + + +### 📄 `packages/app/src/context/global-sync/event-reducer.test.ts` + +**Type:** TypeScript Source File 📘 + +```diff +@@ -75,6 +75,7 @@ const baseState = (input: Partial = {}) => + 75 75 todo: {}, + 76 76 permission: {}, + 77 77 question: {}, + 78 + steer_queue: {}, + 78 79 mcp: {}, + 79 80 lsp: [], + 80 81 vcs: undefined, + 81 82 + +``` + + +### 📄 `packages/app/src/context/global-sync/event-reducer.ts` + +**Type:** TypeScript Source File 📘 + +```diff +@@ -52,7 +52,8 @@ function cleanupSessionCaches( + 52 52 store.todo[sessionID] !== undefined || + 53 53 store.permission[sessionID] !== undefined || + 54 54 store.question[sessionID] !== undefined || + 55 - store.session_status[sessionID] !== undefined + 55 + store.session_status[sessionID] !== undefined || + 56 + store.steer_queue[sessionID] !== undefined + 56 57 setSessionTodo?.(sessionID, undefined) + 57 58 if (!hasAny) return + 58 59 setStore( +@@ -71,6 +72,7 @@ function cleanupSessionCaches( + 71 72 delete draft.permission[sessionID] + 72 73 delete draft.question[sessionID] + 73 74 delete draft.session_status[sessionID] + 75 + delete draft.steer_queue[sessionID] + 74 76 }), + 75 77 ) + 76 78 } +@@ -164,6 +166,11 @@ export function applyDirectoryEvent(input: { +164 166 input.setStore("session_status", props.sessionID, reconcile(props.status)) +165 167 break +166 168 } + 169 + case "session.queue.changed": { + 170 + const props = event.properties as { sessionID: string; queue: { id: string; text: string; time: number; mode: "queue" | "steer" }[] } + 171 + input.setStore("steer_queue", props.sessionID, reconcile(props.queue, { key: "id" })) + 172 + break + 173 + } +167 174 case "message.updated": { +168 175 const info = (event.properties as { info: Message }).info +169 176 const messages = input.store.message[info.sessionID] +170 177 + +``` + + +### 📄 `packages/app/src/context/global-sync/types.ts` + +**Type:** TypeScript Source File 📘 + +```diff +@@ -46,6 +46,9 @@ export type State = { + 46 46 session_status: { + 47 47 [sessionID: string]: SessionStatus + 48 48 } + 49 + steer_queue: { + 50 + [sessionID: string]: { id: string; text: string; time: number; mode: "queue" | "steer" }[] + 51 + } + 49 52 session_diff: { + 50 53 [sessionID: string]: FileDiff[] + 51 54 } + 52 55 + +``` + + +### 📄 `packages/opencode/src/server/routes/session.ts` + +**Type:** TypeScript Source File 📘 + +```diff +@@ -14,6 +14,7 @@ import { Agent } from "../../agent/agent" + 14 14 import { Snapshot } from "@/snapshot" + 15 15 import { Log } from "../../util/log" + 16 16 import { PermissionNext } from "@/permission/next" + 17 +import { SessionSteer } from "@/session/steer" + 17 18 import { errors } from "../error" + 18 19 import { lazy } from "../../util/lazy" + 19 20 +@@ -933,6 +934,122 @@ export const SessionRoutes = lazy(() => +933 934 return c.json(session) +934 935 }, +935 936 ) + 937 + .post( + 938 + "/:sessionID/steer", + 939 + describeRoute({ + 940 + summary: "Steer session", + 941 + description: + 942 + "Push a message into the session's pending input buffer. If the session is busy, the message will be injected at the next agentic loop boundary. If idle, it is queued for the next turn.", + 943 + operationId: "session.steer", + 944 + responses: { + 945 + 200: { + 946 + description: "Queued message", + 947 + content: { + 948 + "application/json": { + 949 + schema: resolver( + 950 + z.object({ + 951 + id: z.string(), + 952 + text: z.string(), + 953 + time: z.number(), + 954 + mode: z.enum(["queue", "steer"]), + 955 + }), + 956 + ), + 957 + }, + 958 + }, + 959 + }, + 960 + ...errors(400, 404), + 961 + }, + 962 + }), + 963 + validator( + 964 + "param", + 965 + z.object({ + 966 + sessionID: z.string().meta({ description: "Session ID" }), + 967 + }), + 968 + ), + 969 + validator( + 970 + "json", + 971 + z.object({ + 972 + text: z.string().min(1).meta({ description: "The message text to inject" }), + 973 + mode: z.enum(["queue", "steer"]).optional().default("queue").meta({ description: "queue waits for turn end, steer injects mid-turn" }), + 974 + }), + 975 + ), + 976 + async (c) => { + 977 + const sessionID = c.req.valid("param").sessionID + 978 + const body = c.req.valid("json") + 979 + const entry = SessionSteer.push(sessionID, body.text, body.mode) + 980 + return c.json(entry) + 981 + }, + 982 + ) + 983 + .get( + 984 + "/:sessionID/steer", + 985 + describeRoute({ + 986 + summary: "Get steer queue", + 987 + description: "List all pending steered messages for a session without draining the queue.", + 988 + operationId: "session.steer.list", + 989 + responses: { + 990 + 200: { + 991 + description: "Pending steered messages", + 992 + content: { + 993 + "application/json": { + 994 + schema: resolver( + 995 + z.array( + 996 + z.object({ + 997 + id: z.string(), + 998 + text: z.string(), + 999 + time: z.number(), + 1000 + mode: z.enum(["queue", "steer"]), + 1001 + }), + 1002 + ), + 1003 + ), + 1004 + }, + 1005 + }, + 1006 + }, + 1007 + ...errors(400, 404), + 1008 + }, + 1009 + }), + 1010 + validator( + 1011 + "param", + 1012 + z.object({ + 1013 + sessionID: z.string().meta({ description: "Session ID" }), + 1014 + }), + 1015 + ), + 1016 + async (c) => { + 1017 + const sessionID = c.req.valid("param").sessionID + 1018 + const queue = SessionSteer.list(sessionID) + 1019 + return c.json(queue) + 1020 + }, + 1021 + ) + 1022 + .delete( + 1023 + "/:sessionID/steer/:steerID", + 1024 + describeRoute({ + 1025 + summary: "Remove steered message", + 1026 + description: "Remove a specific queued steered message by its ID before it gets injected.", + 1027 + operationId: "session.steer.remove", + 1028 + responses: { + 1029 + 200: { + 1030 + description: "Whether the message was found and removed", + 1031 + content: { + 1032 + "application/json": { + 1033 + schema: resolver(z.boolean()), + 1034 + }, + 1035 + }, + 1036 + }, + 1037 + ...errors(400, 404), + 1038 + }, + 1039 + }), + 1040 + validator( + 1041 + "param", + 1042 + z.object({ + 1043 + sessionID: z.string().meta({ description: "Session ID" }), + 1044 + steerID: z.string().meta({ description: "Steer message ID" }), + 1045 + }), + 1046 + ), + 1047 + async (c) => { + 1048 + const params = c.req.valid("param") + 1049 + const removed = SessionSteer.remove(params.sessionID, params.steerID) + 1050 + return c.json(removed) + 1051 + }, + 1052 + ) +936 1053 .post( +937 1054 "/:sessionID/permissions/:permissionID", +938 1055 describeRoute({ +939 1056 + +``` + + +### 📄 `packages/opencode/src/session/prompt.ts` + +**Type:** TypeScript Source File 📘 + +```diff +@@ -45,6 +45,7 @@ import { LLM } from "./llm" + 45 45 import { iife } from "@/util/iife" + 46 46 import { Shell } from "@/shell/shell" + 47 47 import { Truncate } from "@/tool/truncation" + 48 +import { SessionSteer } from "./steer" + 48 49 + 49 50 // @ts-ignore + 50 51 globalThis.AI_SDK_LOG_WARNINGS = false +@@ -320,6 +321,56 @@ export namespace SessionPrompt { +320 321 !["tool-calls", "unknown"].includes(lastAssistant.finish) && +321 322 lastUser.id < lastAssistant.id +322 323 ) { + 324 + // Check for "steer" mode messages — these inject mid-turn at loop + 325 + // boundaries. "queue" mode messages wait until the turn fully ends. + 326 + const steered = SessionSteer.takeByMode(sessionID, "steer") + 327 + if (steered.length > 0) { + 328 + log.info("steer: injecting pending input", { sessionID, count: steered.length }) + 329 + const text = steered.map((m) => m.text).join("\n\n") + 330 + const steerMsg: MessageV2.User = { + 331 + id: Identifier.ascending("message"), + 332 + sessionID, + 333 + role: "user", + 334 + time: { created: Date.now() }, + 335 + agent: lastUser.agent, + 336 + model: lastUser.model, + 337 + } + 338 + await Session.updateMessage(steerMsg) + 339 + await Session.updatePart({ + 340 + id: Identifier.ascending("part"), + 341 + messageID: steerMsg.id, + 342 + sessionID, + 343 + type: "text", + 344 + text, + 345 + } satisfies MessageV2.TextPart) + 346 + continue + 347 + } + 348 + + 349 + // Turn is finished. Drain "queue" mode messages and auto-submit + 350 + // them as new user messages so the model starts a fresh turn. + 351 + const queued = SessionSteer.takeByMode(sessionID, "queue") + 352 + if (queued.length > 0) { + 353 + log.info("steer: auto-submitting queued input", { sessionID, count: queued.length }) + 354 + const text = queued.map((m) => m.text).join("\n\n") + 355 + const queueMsg: MessageV2.User = { + 356 + id: Identifier.ascending("message"), + 357 + sessionID, + 358 + role: "user", + 359 + time: { created: Date.now() }, + 360 + agent: lastUser.agent, + 361 + model: lastUser.model, + 362 + } + 363 + await Session.updateMessage(queueMsg) + 364 + await Session.updatePart({ + 365 + id: Identifier.ascending("part"), + 366 + messageID: queueMsg.id, + 367 + sessionID, + 368 + type: "text", + 369 + text, + 370 + } satisfies MessageV2.TextPart) + 371 + continue + 372 + } + 373 + +323 374 log.info("exiting loop", { sessionID }) +324 375 break +325 376 } +326 377 + +``` + + +### ✨ `packages/opencode/src/session/steer.ts` **[ADDED]** + +**Status:** ✅ **NEW FILE** - This file has been newly created + +**Type:** TypeScript Source File 📘 + +```diff +@@ -0,0 +1,127 @@ + 1 +import { Bus } from "../bus" + 2 +import { BusEvent } from "../bus/bus-event" + 3 +import { Instance } from "../project/instance" + 4 +import { Log } from "../util/log" + 5 +import z from "zod" + 6 + + 7 +export namespace SessionSteer { + 8 + const log = Log.create({ service: "session.steer" }) + 9 + + 10 + export type Mode = "queue" | "steer" + 11 + + 12 + const QueuedMessageSchema = z.object({ + 13 + id: z.string(), + 14 + text: z.string(), + 15 + time: z.number(), + 16 + mode: z.enum(["queue", "steer"]), + 17 + }) + 18 + + 19 + export const Event = { + 20 + QueueChanged: BusEvent.define( + 21 + "session.queue.changed", + 22 + z.object({ + 23 + sessionID: z.string(), + 24 + queue: z.array(QueuedMessageSchema), + 25 + }), + 26 + ), + 27 + } + 28 + + 29 + export interface QueuedMessage { + 30 + id: string + 31 + text: string + 32 + time: number + 33 + mode: Mode + 34 + } + 35 + + 36 + interface SteerState { + 37 + pending: QueuedMessage[] + 38 + } + 39 + + 40 + const state = Instance.state( + 41 + () => { + 42 + const data: Record = {} + 43 + return data + 44 + }, + 45 + async () => {}, + 46 + ) + 47 + + 48 + function ensure(sessionID: string): SteerState { + 49 + const s = state() + 50 + if (!s[sessionID]) s[sessionID] = { pending: [] } + 51 + return s[sessionID] + 52 + } + 53 + + 54 + /** Push a message into the pending buffer for an active session. */ + 55 + export function push(sessionID: string, text: string, mode: Mode = "queue"): QueuedMessage { + 56 + const entry: QueuedMessage = { + 57 + id: crypto.randomUUID(), + 58 + text, + 59 + time: Date.now(), + 60 + mode, + 61 + } + 62 + const s = ensure(sessionID) + 63 + s.pending.push(entry) + 64 + log.info("steer.push", { sessionID, id: entry.id, queueLength: s.pending.length }) + 65 + Bus.publish(Event.QueueChanged, { sessionID, queue: s.pending }) + 66 + return entry + 67 + } + 68 + + 69 + /** Drain all pending messages and return them. Clears the buffer. */ + 70 + export function take(sessionID: string): QueuedMessage[] { + 71 + const s = state()[sessionID] + 72 + if (!s || s.pending.length === 0) return [] + 73 + const result = s.pending.splice(0) + 74 + log.info("steer.take", { sessionID, count: result.length }) + 75 + Bus.publish(Event.QueueChanged, { sessionID, queue: s.pending }) + 76 + return result + 77 + } + 78 + + 79 + /** Drain only messages matching the given mode. Leaves other messages in the buffer. */ + 80 + export function takeByMode(sessionID: string, mode: Mode): QueuedMessage[] { + 81 + const s = state()[sessionID] + 82 + if (!s || s.pending.length === 0) return [] + 83 + const matched: QueuedMessage[] = [] + 84 + const remaining: QueuedMessage[] = [] + 85 + for (const m of s.pending) { + 86 + if (m.mode === mode) matched.push(m) + 87 + else remaining.push(m) + 88 + } + 89 + if (matched.length === 0) return [] + 90 + s.pending = remaining + 91 + log.info("steer.takeByMode", { sessionID, mode, count: matched.length }) + 92 + Bus.publish(Event.QueueChanged, { sessionID, queue: s.pending }) + 93 + return matched + 94 + } + 95 + + 96 + /** Check if there's pending steered input for a session. */ + 97 + export function has(sessionID: string): boolean { + 98 + const s = state()[sessionID] + 99 + return !!s && s.pending.length > 0 + 100 + } + 101 + + 102 + /** Get the current queue without draining. */ + 103 + export function list(sessionID: string): QueuedMessage[] { + 104 + return state()[sessionID]?.pending ?? [] + 105 + } + 106 + + 107 + /** Remove a specific queued message by id. */ + 108 + export function remove(sessionID: string, id: string): boolean { + 109 + const s = state()[sessionID] + 110 + if (!s) return false + 111 + const idx = s.pending.findIndex((m) => m.id === id) + 112 + if (idx === -1) return false + 113 + s.pending.splice(idx, 1) + 114 + log.info("steer.remove", { sessionID, id }) + 115 + Bus.publish(Event.QueueChanged, { sessionID, queue: s.pending }) + 116 + return true + 117 + } + 118 + + 119 + /** Clear all pending messages for a session. */ + 120 + export function clear(sessionID: string) { + 121 + const s = state()[sessionID] + 122 + if (!s || s.pending.length === 0) return + 123 + s.pending.length = 0 + 124 + log.info("steer.clear", { sessionID }) + 125 + Bus.publish(Event.QueueChanged, { sessionID, queue: s.pending }) + 126 + } + 127 +} + 128 + +``` + + +### ✨ `packages/opencode/test/session/steer.test.ts` **[ADDED]** + +**Status:** ✅ **NEW FILE** - This file has been newly created + +**Type:** TypeScript Source File 📘 + +```diff +@@ -0,0 +1,235 @@ + 1 +import { describe, expect, test, beforeEach } from "bun:test" + 2 +import path from "path" + 3 +import { SessionSteer } from "../../src/session/steer" + 4 +import { Instance } from "../../src/project/instance" + 5 +import { Log } from "../../src/util/log" + 6 + + 7 +const projectRoot = path.join(__dirname, "../..") + 8 +const SESSION = "session_test_steer_001" + 9 +Log.init({ print: false }) + 10 + + 11 +/** Helper to run a test function inside Instance.provide context */ + 12 +function withInstance(fn: () => void | Promise) { + 13 + return Instance.provide({ + 14 + directory: projectRoot, + 15 + fn: async () => { + 16 + await fn() + 17 + }, + 18 + }) + 19 +} + 20 + + 21 +describe("SessionSteer", () => { + 22 + describe("push", () => { + 23 + test("creates a queued message with default mode 'queue'", async () => { + 24 + await withInstance(() => { + 25 + SessionSteer.clear(SESSION) + 26 + const msg = SessionSteer.push(SESSION, "hello") + 27 + expect(msg.text).toBe("hello") + 28 + expect(msg.mode).toBe("queue") + 29 + expect(msg.id).toBeTruthy() + 30 + expect(msg.time).toBeGreaterThan(0) + 31 + }) + 32 + }) + 33 + + 34 + test("accepts explicit mode 'steer'", async () => { + 35 + await withInstance(() => { + 36 + SessionSteer.clear(SESSION) + 37 + const msg = SessionSteer.push(SESSION, "redirect", "steer") + 38 + expect(msg.text).toBe("redirect") + 39 + expect(msg.mode).toBe("steer") + 40 + }) + 41 + }) + 42 + + 43 + test("accepts explicit mode 'queue'", async () => { + 44 + await withInstance(() => { + 45 + SessionSteer.clear(SESSION) + 46 + const msg = SessionSteer.push(SESSION, "later", "queue") + 47 + expect(msg.mode).toBe("queue") + 48 + }) + 49 + }) + 50 + }) + 51 + + 52 + describe("take", () => { + 53 + test("drains all messages regardless of mode", async () => { + 54 + await withInstance(() => { + 55 + SessionSteer.clear(SESSION) + 56 + SessionSteer.push(SESSION, "a", "queue") + 57 + SessionSteer.push(SESSION, "b", "steer") + 58 + SessionSteer.push(SESSION, "c", "queue") + 59 + + 60 + const taken = SessionSteer.take(SESSION) + 61 + expect(taken).toHaveLength(3) + 62 + expect(taken.map((m) => m.text)).toEqual(["a", "b", "c"]) + 63 + expect(SessionSteer.list(SESSION)).toHaveLength(0) + 64 + }) + 65 + }) + 66 + + 67 + test("returns empty array when no messages", async () => { + 68 + await withInstance(() => { + 69 + SessionSteer.clear(SESSION) + 70 + expect(SessionSteer.take(SESSION)).toEqual([]) + 71 + }) + 72 + }) + 73 + }) + 74 + + 75 + describe("takeByMode", () => { + 76 + test("drains only 'steer' messages, leaving 'queue' messages", async () => { + 77 + await withInstance(() => { + 78 + SessionSteer.clear(SESSION) + 79 + SessionSteer.push(SESSION, "queued-1", "queue") + 80 + SessionSteer.push(SESSION, "steer-1", "steer") + 81 + SessionSteer.push(SESSION, "queued-2", "queue") + 82 + SessionSteer.push(SESSION, "steer-2", "steer") + 83 + + 84 + const steered = SessionSteer.takeByMode(SESSION, "steer") + 85 + expect(steered).toHaveLength(2) + 86 + expect(steered.map((m) => m.text)).toEqual(["steer-1", "steer-2"]) + 87 + + 88 + const remaining = SessionSteer.list(SESSION) + 89 + expect(remaining).toHaveLength(2) + 90 + expect(remaining.map((m) => m.text)).toEqual(["queued-1", "queued-2"]) + 91 + }) + 92 + }) + 93 + + 94 + test("drains only 'queue' messages, leaving 'steer' messages", async () => { + 95 + await withInstance(() => { + 96 + SessionSteer.clear(SESSION) + 97 + SessionSteer.push(SESSION, "queued-1", "queue") + 98 + SessionSteer.push(SESSION, "steer-1", "steer") + 99 + SessionSteer.push(SESSION, "queued-2", "queue") + 100 + + 101 + const queued = SessionSteer.takeByMode(SESSION, "queue") + 102 + expect(queued).toHaveLength(2) + 103 + expect(queued.map((m) => m.text)).toEqual(["queued-1", "queued-2"]) + 104 + + 105 + const remaining = SessionSteer.list(SESSION) + 106 + expect(remaining).toHaveLength(1) + 107 + expect(remaining[0].text).toBe("steer-1") + 108 + }) + 109 + }) + 110 + + 111 + test("returns empty when no messages match mode", async () => { + 112 + await withInstance(() => { + 113 + SessionSteer.clear(SESSION) + 114 + SessionSteer.push(SESSION, "queued", "queue") + 115 + const steered = SessionSteer.takeByMode(SESSION, "steer") + 116 + expect(steered).toEqual([]) + 117 + expect(SessionSteer.list(SESSION)).toHaveLength(1) + 118 + }) + 119 + }) + 120 + + 121 + test("returns empty when buffer is empty", async () => { + 122 + await withInstance(() => { + 123 + SessionSteer.clear(SESSION) + 124 + expect(SessionSteer.takeByMode(SESSION, "steer")).toEqual([]) + 125 + expect(SessionSteer.takeByMode(SESSION, "queue")).toEqual([]) + 126 + }) + 127 + }) + 128 + + 129 + test("sequential takeByMode drains both modes completely", async () => { + 130 + await withInstance(() => { + 131 + SessionSteer.clear(SESSION) + 132 + SessionSteer.push(SESSION, "s1", "steer") + 133 + SessionSteer.push(SESSION, "q1", "queue") + 134 + SessionSteer.push(SESSION, "s2", "steer") + 135 + SessionSteer.push(SESSION, "q2", "queue") + 136 + + 137 + const steered = SessionSteer.takeByMode(SESSION, "steer") + 138 + expect(steered).toHaveLength(2) + 139 + + 140 + const queued = SessionSteer.takeByMode(SESSION, "queue") + 141 + expect(queued).toHaveLength(2) + 142 + + 143 + expect(SessionSteer.has(SESSION)).toBe(false) + 144 + expect(SessionSteer.list(SESSION)).toHaveLength(0) + 145 + }) + 146 + }) + 147 + }) + 148 + + 149 + describe("has", () => { + 150 + test("returns false for empty session", async () => { + 151 + await withInstance(() => { + 152 + SessionSteer.clear(SESSION) + 153 + expect(SessionSteer.has(SESSION)).toBe(false) + 154 + }) + 155 + }) + 156 + + 157 + test("returns true after push", async () => { + 158 + await withInstance(() => { + 159 + SessionSteer.clear(SESSION) + 160 + SessionSteer.push(SESSION, "test") + 161 + expect(SessionSteer.has(SESSION)).toBe(true) + 162 + }) + 163 + }) + 164 + + 165 + test("returns false after take drains all", async () => { + 166 + await withInstance(() => { + 167 + SessionSteer.clear(SESSION) + 168 + SessionSteer.push(SESSION, "test") + 169 + SessionSteer.take(SESSION) + 170 + expect(SessionSteer.has(SESSION)).toBe(false) + 171 + }) + 172 + }) + 173 + + 174 + test("returns true when takeByMode leaves remaining", async () => { + 175 + await withInstance(() => { + 176 + SessionSteer.clear(SESSION) + 177 + SessionSteer.push(SESSION, "q", "queue") + 178 + SessionSteer.takeByMode(SESSION, "steer") + 179 + expect(SessionSteer.has(SESSION)).toBe(true) + 180 + }) + 181 + }) + 182 + }) + 183 + + 184 + describe("list", () => { + 185 + test("returns current queue without draining", async () => { + 186 + await withInstance(() => { + 187 + SessionSteer.clear(SESSION) + 188 + SessionSteer.push(SESSION, "a", "queue") + 189 + SessionSteer.push(SESSION, "b", "steer") + 190 + + 191 + const first = SessionSteer.list(SESSION) + 192 + expect(first).toHaveLength(2) + 193 + + 194 + const second = SessionSteer.list(SESSION) + 195 + expect(second).toHaveLength(2) + 196 + }) + 197 + }) + 198 + }) + 199 + + 200 + describe("remove", () => { + 201 + test("removes specific message by id", async () => { + 202 + await withInstance(() => { + 203 + SessionSteer.clear(SESSION) + 204 + const msg = SessionSteer.push(SESSION, "target", "steer") + 205 + SessionSteer.push(SESSION, "keep", "queue") + 206 + + 207 + const removed = SessionSteer.remove(SESSION, msg.id) + 208 + expect(removed).toBe(true) + 209 + expect(SessionSteer.list(SESSION)).toHaveLength(1) + 210 + expect(SessionSteer.list(SESSION)[0].text).toBe("keep") + 211 + }) + 212 + }) + 213 + + 214 + test("returns false for non-existent id", async () => { + 215 + await withInstance(() => { + 216 + SessionSteer.clear(SESSION) + 217 + SessionSteer.push(SESSION, "test") + 218 + expect(SessionSteer.remove(SESSION, "nonexistent")).toBe(false) + 219 + }) + 220 + }) + 221 + }) + 222 + + 223 + describe("clear", () => { + 224 + test("removes all pending messages", async () => { + 225 + await withInstance(() => { + 226 + SessionSteer.clear(SESSION) + 227 + SessionSteer.push(SESSION, "a", "queue") + 228 + SessionSteer.push(SESSION, "b", "steer") + 229 + SessionSteer.clear(SESSION) + 230 + expect(SessionSteer.has(SESSION)).toBe(false) + 231 + expect(SessionSteer.list(SESSION)).toHaveLength(0) + 232 + }) + 233 + }) + 234 + }) + 235 +}) + 236 + +``` + +## 🤖 Comprehensive Review Checklist + +### ✅ Code Quality & Standards +- [ ] **Syntax & Formatting**: Consistent indentation, proper spacing +- [ ] **Naming Conventions**: Clear, descriptive variable/function names +- [ ] **Code Structure**: Logical organization, appropriate function size +- [ ] **Documentation**: Clear comments explaining complex logic +- [ ] **Type Safety**: Proper typing (if applicable) + +### 🔍 Logic & Functionality +- [ ] **Algorithm Correctness**: Logic implements requirements correctly +- [ ] **Edge Case Handling**: Boundary conditions properly addressed +- [ ] **Error Handling**: Appropriate try-catch blocks and error messages +- [ ] **Performance**: Efficient algorithms, no unnecessary loops +- [ ] **Memory Management**: Proper cleanup, no memory leaks + +### 🐛 Potential Issues & Bugs +- [ ] **Runtime Errors**: No null/undefined dereferencing +- [ ] **Type Mismatches**: Consistent data types throughout +- [ ] **Race Conditions**: Proper async/await handling +- [ ] **Resource Leaks**: Event listeners, timers properly cleaned up +- [ ] **Off-by-one Errors**: Array/loop bounds correctly handled + +### 🔒 Security Considerations +- [ ] **Input Validation**: User inputs properly sanitized +- [ ] **XSS Prevention**: No unsafe HTML injection +- [ ] **Authentication**: Proper access controls if applicable +- [ ] **Data Exposure**: No sensitive information in logs/client +- [ ] **Dependency Security**: No known vulnerable packages + +### 📱 User Experience & Accessibility +- [ ] **Responsive Design**: Works on different screen sizes +- [ ] **Loading States**: Proper feedback during operations +- [ ] **Error Messages**: User-friendly error communication +- [ ] **Accessibility**: ARIA labels, keyboard navigation +- [ ] **Performance**: Fast loading, smooth interactions + +### 💡 Improvement Suggestions + +#### Code Organization +- [ ] Consider extracting complex logic into separate functions +- [ ] Evaluate if constants should be moved to configuration +- [ ] Check for code duplication opportunities + +#### Performance Optimizations +- [ ] Identify opportunities for memoization +- [ ] Consider lazy loading for heavy operations +- [ ] Evaluate database query efficiency (if applicable) + +#### Testing Recommendations +- [ ] Unit tests for core functionality +- [ ] Integration tests for API endpoints +- [ ] Edge case testing scenarios + +#### Documentation Needs +- [ ] API documentation updates +- [ ] Code comments for complex algorithms +- [ ] README updates if public interfaces changed + +### 📝 Review Notes +*Add your specific feedback, suggestions, and observations here:* + +--- +*Individual file review generated by AI Visual Code Review v2.0* +*Generated: 2026-02-26T10:33:24.938Z* diff --git a/docs/09-temp/cline-subagent-research.md b/docs/09-temp/cline-subagent-research.md new file mode 100644 index 000000000000..76d7684e9929 --- /dev/null +++ b/docs/09-temp/cline-subagent-research.md @@ -0,0 +1,66 @@ +# Research: Cline Subagent Architecture + +**Date:** 2026-02-24 +**Status:** TODO — pick up in next session + +## Research Questions + +1. How does Cline form subagents? How does the AI decide how many to create? +2. What task distribution strategy is used? How are tasks assigned to each subagent? +3. How is context shared between parent agent and subagents? +4. How are subagent outputs aggregated back into the main conversation? +5. What happens when a subagent task errors? Error handling and recovery. +6. How could this inspire improvements to opencode's existing subagent system? + +## Key References + +- **CLI Subagent Command Transformation**: `src/integrations/cli-subagents/subagent_command.ts` + - `isSubagentCommand()` — identifies simplified cline commands + - `transformClineCommand()` — injects `--json -y` flags for autonomous execution + +- **Agent Client Protocol (ACP)**: `cli/src/acp/AcpAgent.ts` + - Bridges ClineAgent with AgentSideConnection for stdio-based communication + - Handles permission requests, forwards session events + +- **ClineAgent**: `cli/src/agent/ClineAgent.ts` + - Implements ACP agent interface + - Translates ACP requests into core Controller operations + - Manages authentication, session modes, processes user prompts + +- **Message Translator**: `cli/src/agent/messageTranslator.ts` + - Converts ClineMessage objects to ACP SessionUpdate messages + - Computes deltas for streaming (avoids duplicate content) + +## CodeWiki References + +- https://codewiki.google/github.com/cline/cline#cli-subagent-command-transformation +- https://codewiki.google/github.com/cline/cline#command-line-interface-cli-functionality +- https://codewiki.google/github.com/cline/cline#agent-client-protocol-acp-integration-for-external-control + +## Comparison with OpenCode's Subagent System + +OpenCode already has subagents (`TaskTool` in `packages/opencode/src/tool/task.ts`): +- Subagents are spawned via the `task` tool +- Each subagent gets its own child session +- Subagent types: explore, plan, general (configurable per agent) +- Results returned as tool output to parent session + +**Gaps to investigate:** +- Does Cline support parallel subagents? (OpenCode does via plan mode Phase 1) +- How does Cline's ACP protocol compare to opencode's Bus event system? +- Can we adopt Cline's streaming delta pattern for subagent updates? + +## Tonight's Session Summary (2026-02-24, 2:37 AM - 4:57 AM) + +### 6 PRs Submitted to opencode (sst/opencode): +1. **#14820** — Streaming content duplication fix (global-sdk.tsx voided Set) +2. **#14821** — Font size settings (CSS vars + terminal + UI stepper) +3. **#14826** — ContextOverflowError auto-recovery (processor.ts) +4. **#14827** — Prune before compaction (prompt.ts) +5. **#14831** — Context usage card with compact button (session-context-tab.tsx) +6. **#14835** — Wide mode setting (full-width chat toggle) + +### Issues Created: +- #14822, #14823, #14824, #14825, #14830, #14834 + +### All branches merged into `origin/dev` on fork (PrakharMNNIT/opencode) diff --git a/docs/09-temp/codex-queue-steer-architecture.md b/docs/09-temp/codex-queue-steer-architecture.md new file mode 100644 index 000000000000..f2012489aa4f --- /dev/null +++ b/docs/09-temp/codex-queue-steer-architecture.md @@ -0,0 +1,319 @@ +# Codex Queue/Steer Architecture Analysis + +> Deep-dive into OpenAI Codex CLI's queue/steer mechanism for mid-turn user interaction. +> Source: `references/codex/` submodule + +--- + +## Overview + +Codex implements a **dual-input model** that lets users interact with the agent **during** an active turn, not just between turns: + +| Action | Keybinding | Behavior | When Turn Active | +|--------|-----------|----------|-----------------| +| **Queue** | `Enter` | Enqueue message for next turn boundary | Message waits in queue, displayed in UI | +| **Steer** | `⌘Enter` / `Enter` (steer-mode) | Inject input into active turn immediately | Message sent to model in current context | + +--- + +## Architecture Layers + +``` +┌─────────────────────────────────────────────────┐ +│ TUI Layer (tui/src/) │ +│ ┌─────────────────────────────────────────┐ │ +│ │ ChatComposer │ │ +│ │ Enter → InputResult::Submitted (steer) │ │ +│ │ Tab → InputResult::Queued │ │ +│ └─────────────────┬───────────────────────┘ │ +│ │ │ +│ ┌─────────────────▼───────────────────────┐ │ +│ │ QueuedUserMessages widget │ │ +│ │ Shows queued messages with "↳" prefix │ │ +│ │ Alt+Up to pop back into composer │ │ +│ └─────────────────────────────────────────┘ │ +└────────────────────┬────────────────────────────┘ + │ +┌────────────────────▼────────────────────────────┐ +│ App Server Protocol (app-server-protocol/) │ +│ │ +│ turn/start → TurnStartParams (new turn) │ +│ turn/steer → TurnSteerParams (mid-turn) │ +│ │ +│ TurnSteerParams { │ +│ thread_id: String, │ +│ input: Vec, │ +│ expected_turn_id: String, // guard │ +│ } │ +│ │ +│ TurnSteerResponse { │ +│ turn_id: String, // confirms active turn │ +│ } │ +└────────────────────┬────────────────────────────┘ + │ +┌────────────────────▼────────────────────────────┐ +│ App Server (app-server/src/) │ +│ codex_message_processor.rs │ +│ │ +│ async fn turn_steer(&self, req_id, params) { │ +│ let thread = load_thread(params.thread_id); │ +│ thread.steer_input( │ +│ mapped_items, │ +│ Some(¶ms.expected_turn_id) │ +│ ); │ +│ // Returns turn_id or error: │ +│ // "no active turn to steer" │ +│ } │ +└────────────────────┬────────────────────────────┘ + │ +┌────────────────────▼────────────────────────────┐ +│ Core Engine (core/src/codex.rs) │ +│ │ +│ Session::steer_input(input, expected_turn_id) │ +│ 1. Validate input not empty │ +│ 2. Lock active_turn mutex │ +│ 3. Verify active turn exists │ +│ 4. Check expected_turn_id matches │ +│ 5. Lock turn_state │ +│ 6. push_pending_input(input) ← KEY STEP │ +│ 7. Return active turn_id │ +└────────────────────┬────────────────────────────┘ + │ +┌────────────────────▼────────────────────────────┐ +│ Turn State (core/src/state/turn.rs) │ +│ │ +│ struct TurnState { │ +│ pending_input: Vec, │ +│ } │ +│ │ +│ push_pending_input(item) → appends to vec │ +│ take_pending_input() → drains vec │ +│ has_pending_input() → checks non-empty │ +└────────────────────┬────────────────────────────┘ + │ +┌────────────────────▼────────────────────────────┐ +│ Task Loop (core/src/codex.rs ~L4970) │ +│ │ +│ loop { │ +│ // At each iteration, drain pending input │ +│ let pending = sess.get_pending_input().await; │ +│ if !pending.is_empty() { │ +│ // Record as conversation items │ +│ // → injected into model context │ +│ for item in pending { │ +│ record_user_prompt_and_emit_turn_item(); │ +│ } │ +│ } │ +│ // ... send to model, process response ... │ +│ // On ResponseEvent::Completed: │ +│ needs_follow_up |= has_pending_input(); │ +│ // If follow_up needed → loop continues │ +│ } │ +└─────────────────────────────────────────────────┘ +``` + +--- + +## Core Mechanism: `steer_input` + +The heart of steer is `Session::steer_input()` in `core/src/codex.rs`: + +```rust +pub async fn steer_input( + &self, + input: Vec, + expected_turn_id: Option<&str>, +) -> Result { + if input.is_empty() { + return Err(SteerInputError::EmptyInput); + } + let mut active = self.active_turn.lock().await; + let Some(active_turn) = active.as_mut() else { + return Err(SteerInputError::NoActiveTurn(input)); + }; + let Some((active_turn_id, _)) = active_turn.tasks.first() else { + return Err(SteerInputError::NoActiveTurn(input)); + }; + if let Some(expected_turn_id) = expected_turn_id + && expected_turn_id != active_turn_id + { + return Err(SteerInputError::ExpectedTurnMismatch { + expected: expected_turn_id.to_string(), + actual: active_turn_id.clone(), + }); + } + let mut turn_state = active_turn.turn_state.lock().await; + turn_state.push_pending_input(input.into()); + Ok(active_turn_id.clone()) +} +``` + +### Key Design Decisions + +1. **Non-blocking injection**: `steer_input` just pushes to a `Vec` — it doesn't interrupt or cancel the model. The model's active response completes naturally. + +2. **Consumption at loop boundary**: The task loop checks `pending_input` at the **top of each iteration**. After the model finishes a response, if pending input exists, it gets recorded as conversation items and the model is called again with the updated context. + +3. **`needs_follow_up` flag**: When a model response completes (`ResponseEvent::Completed`), if there's pending input, the loop sets `needs_follow_up = true` and continues instead of ending the turn. + +4. **Turn ID validation**: The `expected_turn_id` field prevents race conditions — the steer request fails if the turn has changed between the user pressing Enter and the server processing the request. + +--- + +## Error Types + +```rust +pub enum SteerInputError { + NoActiveTurn(Vec), // No model turn running + ExpectedTurnMismatch { // Turn changed since request + expected: String, + actual: String, + }, + EmptyInput, // Nothing to inject +} +``` + +When `NoActiveTurn` occurs, the app-server falls back — the input that failed to steer gets queued for the next `turn/start`. + +--- + +## Queue vs Steer: Detailed Comparison + +### Queue (Tab / Enter in legacy mode) + +1. User types message, presses Tab (or Enter in non-steer mode) +2. TUI returns `InputResult::Queued { text, text_elements }` +3. Message stored in `QueuedUserMessages.messages: Vec` +4. Rendered in UI with `↳` prefix, dimmed/italic +5. User can pop with Alt+Up to edit +6. When current turn completes → queued messages become the next `turn/start` + +### Steer (Enter in steer mode / ⌘Enter) + +1. User types message, presses Enter +2. TUI returns `InputResult::Submitted { text, text_elements }` +3. App sends `turn/steer` RPC to server +4. Server calls `thread.steer_input()` → pushes to `pending_input` +5. Model's current response continues to completion +6. At next task loop iteration, pending input is drained and recorded +7. Model sees the user's steer message in context → generates follow-up +8. **All within the same turn** — no new turn boundary + +### Critical Difference + +| Aspect | Queue | Steer | +|--------|-------|-------| +| **Timing** | After turn ends | During active turn | +| **Turn boundary** | Creates new turn | Same turn continues | +| **Model sees it** | On next turn start | At next loop iteration | +| **Cancels response** | No (waits) | No (appends to context) | +| **UI display** | Queued messages widget | Injected into chat transcript | +| **Fallback** | N/A | Falls back to queue if no active turn | + +--- + +## Turn Lifecycle with Steer + +``` +Turn Start (user submits prompt) + │ + ├─→ Model generates response... + │ │ + │ │ ← User presses Enter (steer) + │ │ → steer_input() pushes to pending_input + │ │ + │ ▼ + │ Response completes + │ │ + │ ├─→ has_pending_input()? YES + │ │ → needs_follow_up = true + │ │ + │ ▼ + │ Loop continues → drain pending_input + │ → Record steered message as conversation item + │ → Model sees: [original prompt, response, steered message] + │ → Model generates new response with full context + │ │ + │ ├─→ has_pending_input()? NO + │ │ → needs_follow_up = false + │ ▼ + │ Turn Complete + │ + └─→ Queued messages (if any) → next turn/start +``` + +--- + +## Turn Completion & Leftover Input + +When a task finishes (`task_finished()` in `core/src/tasks/mod.rs`): + +```rust +// 1. Lock active turn +let mut active = self.active_turn.lock().await; +// 2. Take any remaining pending input +let pending_input = ts.take_pending_input(); +// 3. Clear active turn +*active = None; +// 4. Record leftover input as conversation items +if !pending_input.is_empty() { + record_conversation_items(&turn_context, &pending_response_items); +} +// 5. Emit TurnComplete event +``` + +This ensures steered input is **never lost** — even if the turn ends before the pending input could be consumed by the model loop. + +--- + +## Feature Flag: `steer_enabled` + +Steer is gated behind `Feature::Steer` in the TUI: + +```rust +// When steer_enabled == true: +// Enter → Submitted (steer immediately) +// Tab → Queued (wait for turn end) +// +// When steer_enabled == false (legacy): +// Enter → Queued +// Tab → Queued +``` + +--- + +## Implications for OpenCode + +### What OpenCode Currently Has +- Session/turn model with `processor.ts` handling model interaction +- Parallel agents via `task.ts` tool +- No mid-turn input injection + +### What Queue/Steer Would Add +1. **Pending input buffer** on the session/turn state +2. **Steer RPC** that pushes to the buffer while model is running +3. **Loop-boundary drain** that checks for pending input after each model response +4. **Follow-up continuation** instead of ending the turn when input is pending +5. **UI queue widget** showing messages waiting for the current turn to finish +6. **Fallback path**: steer → queue if no active turn + +### Key Implementation Points +- `steer_input()` is a **lock-based, non-cancelling** approach — it doesn't abort the model stream +- Pending input is consumed at the **top of the agentic loop**, not mid-stream +- The model sees steered input as additional conversation items on its next iteration +- `expected_turn_id` prevents stale steer requests from affecting wrong turns +- Queued messages are a purely UI-side concept until they become a `turn/start` + +--- + +## References + +- Protocol types: `codex-rs/app-server-protocol/src/protocol/v2.rs` +- Core steer: `codex-rs/core/src/codex.rs` (L3377-3406) +- Turn state: `codex-rs/core/src/state/turn.rs` (L77-163) +- Task loop drain: `codex-rs/core/src/codex.rs` (L4970-5000) +- Follow-up flag: `codex-rs/core/src/codex.rs` (L6364) +- Task completion: `codex-rs/core/src/tasks/mod.rs` (L190-230) +- App server handler: `codex-rs/app-server/src/codex_message_processor.rs` +- TUI queue widget: `codex-rs/tui/src/bottom_pane/queued_user_messages.rs` +- TUI composer: `codex-rs/tui/src/public_widgets/composer_input.rs` diff --git a/docs/09-temp/escape-key-ux-research.md b/docs/09-temp/escape-key-ux-research.md new file mode 100644 index 000000000000..37575a6243bb --- /dev/null +++ b/docs/09-temp/escape-key-ux-research.md @@ -0,0 +1,32 @@ +# Research: Escape Key Cancel UX + +**Date:** 2026-02-24 +**Status:** TODO — brainstorm in next session + +## Problem +Pressing Escape accidentally during AI response immediately stops the response with no confirmation. No visual feedback in chat that response was interrupted. + +## Current Behavior +- Escape → immediately cancels the LLM response +- Shows a notification/warning toast +- No visual indicator in the chat thread that the message was interrupted +- No confirmation dialog before cancelling + +## User's Proposed Improvements +1. **Confirmation before cancel** — Alert/dialog: "Are you sure you want to interrupt?" +2. **Visual interruption indicator** — Show in chat that the message was interrupted (red line, badge, etc.) +3. **Better UX** — Maybe double-tap Escape to cancel, or Escape once to show warning + +## Files to Investigate +- `packages/app/src/pages/session.tsx` — handleKeyDown, Escape handling +- `packages/app/src/components/prompt-input.tsx` — Escape key handling in input +- `packages/opencode/src/session/prompt.ts` — cancel() function +- `packages/ui/src/components/message-part.tsx` — interrupted state rendering +- `packages/app/src/pages/session/use-session-commands.tsx` — session.cancel command + +## Design Questions +1. Should Escape require double-tap? (like VS Code terminal) +2. Should there be a small "Esc to cancel" indicator during streaming? +3. Should interrupted messages have a visual indicator (red border/badge)? +4. Should there be an "undo cancel" option (resume if possible)? +5. How does Cline/Cursor handle this? diff --git a/docs/09-temp/issues.md b/docs/09-temp/issues.md new file mode 100644 index 000000000000..263f75fe81c8 --- /dev/null +++ b/docs/09-temp/issues.md @@ -0,0 +1,599 @@ +# OpenCode — Parallel Agent & Retry Storm Issues + +> **Created**: 2025-02-25 +> **Source**: Combined RCA by Cline + Antigravity +> **Status**: Approved for implementation + +--- + +## Issue #1: `processor-max-retries` — Infinite Retry Loop in processor.ts + +### Priority: P0 — Stop The Bleeding + +### What is the issue? +The session processor retries failed API calls in an infinite `while(true)` loop with **no maximum retry count**. When an error is classified as "retryable" by `retry.ts`, the processor will retry it forever — user observed **2,244 identical retries over 3.5 hours** before manual abort. + +### What is the bug? +`packages/opencode/src/session/processor.ts` line ~53 has a `while(true)` loop. When the catch block determines an error is retryable via `SessionRetry.retryable(error)`, it increments `attempt` and `continue`s the loop. There is **no guard** like `if (attempt >= MAX_RETRIES) break`. + +### Where it can happen? +- Any API call that returns a retryable error (transient network issues, rate limits, Bedrock context overflow misclassified as retryable) +- Most critically: Bedrock "prompt is too long" errors that get misclassified as retryable by the catch-all in `retry.ts` (see Issue #2) +- Affects both parent sessions and subagent sessions independently + +### What any agent needs to look for? +``` +File: packages/opencode/src/session/processor.ts +Location: The while(true) loop (~line 53) +Pattern: Look for the catch block that calls SessionRetry.retryable() and does `continue` +``` + +### How to make the fix? +Add a `MAX_RETRIES` constant and guard before the `continue`: + +```typescript +// At top of file or inside the function +const MAX_RETRIES = 10 + +// Inside the catch block, before `continue`: +if (attempt >= MAX_RETRIES) { + input.assistantMessage.error = { + name: "RetryLimitExceeded", + message: `Maximum retries (${MAX_RETRIES}) exceeded. Last error: ${retry}`, + } + break +} +``` + +The error should be stored on `input.assistantMessage.error` so the session stops and the UI shows the error. Make sure the status is set to idle after breaking. + +### Testing +- Trigger a retryable error (e.g., rate limit) and verify it stops after 10 attempts +- Verify the error message appears in the session UI +- Verify the session status returns to "idle" (not stuck in "retry") + +--- + +## Issue #2: `bedrock-undefined-message` — error.ts Fails to Parse Bedrock Error Messages + +### Priority: P0 — Stop The Bleeding + +### What is the issue? +When Amazon Bedrock returns an API error (e.g., "prompt is too long"), the `message()` function in `error.ts` receives `e.message = "undefined"` (the literal string, not the JS undefined value). The function only checks for empty string `""`, so it passes `"undefined"` through to `isOverflow()`, which fails to match any overflow pattern. This means **Bedrock context overflow errors are never detected as overflow**, preventing compaction from triggering. + +### What is the bug? +`packages/opencode/src/provider/error.ts` function `message()` (~line 50-80): +```typescript +const msg = e.message +if (msg === "") { + if (e.responseBody) return e.responseBody + // ... +} +``` +When Bedrock SDK sets `e.message` to the literal string `"undefined"`, this check passes through. The actual error details are in `e.responseBody` but never extracted. + +### Where it can happen? +- Any Bedrock API call that returns an error (context overflow, validation errors, throttling) +- The Bedrock SDK wraps errors differently than the Anthropic direct SDK +- Specifically observed with "prompt is too long: 208845 tokens > 200000 maximum" errors + +### What any agent needs to look for? +``` +File: packages/opencode/src/provider/error.ts +Location: The message() function, specifically the `if (msg === "")` check +Also check: isOverflow() function and the OVERFLOW_PATTERNS regex +``` + +### How to make the fix? +Extend the empty-message check to also handle `"undefined"`: + +```typescript +function message(providerID: string, e: APICallError) { + return iife(() => { + const msg = e.message + if (msg === "" || msg === "undefined") { + if (e.responseBody) return e.responseBody + // ... rest of existing fallback logic + } + return msg + }) +} +``` + +This ensures the actual error body (which contains "prompt is too long") is used for overflow detection instead of the meaningless `"undefined"` string. + +### Testing +- Mock a Bedrock APICallError with `message: "undefined"` and `responseBody: "prompt is too long: 208845 tokens > 200000 maximum"` +- Verify `message()` returns the responseBody, not `"undefined"` +- Verify `isOverflow()` correctly detects the overflow pattern from the responseBody + +--- + +## Issue #3: `task-swallows-errors` — task.ts Silently Swallows Subagent Failures + +### Priority: P0 — Stop The Bleeding + +### What is the issue? +When a subagent (child session spawned by the `task` tool) fails with an error, the parent session shows it as **successfully completed with empty output**. The user sees a green ✅ checkmark for a task that actually errored. This is THE primary cause of "failures not reflected in main chat." + +### What is the bug? +`packages/opencode/src/tool/task.ts` line ~145: +```typescript +const result = await SessionPrompt.prompt({...}) +const text = result.parts.findLast((x) => x.type === "text")?.text ?? "" +``` + +`result.info` contains an `.error` field when the child session errored (set by `processor.ts` at `input.assistantMessage.error = error`). But `task.ts` **never checks `result.info.error`** — it only looks for text parts. When the child errored, there are no text parts, so `text = ""`, and the parent receives `\n\n` as a "successful" empty result. + +### Where it can happen? +- Any subagent failure: context overflow, API error, tool execution error, rate limit +- Parallel subagents: if 1 of 3 subagents fails, parent sees 3 "completed" tasks with one having empty output +- The parent LLM may then hallucinate that the task completed or silently move on + +### What any agent needs to look for? +``` +File: packages/opencode/src/tool/task.ts +Location: After the `SessionPrompt.prompt()` call, before building the output +Pattern: result.info should have an error field — check result.info type definition +Also check: packages/opencode/src/session/prompt.ts for the return type of prompt() +``` + +### How to make the fix? +Add an error check immediately after the `SessionPrompt.prompt()` call: + +```typescript +const result = await SessionPrompt.prompt({...}) + +// Check if child session errored +if (result.info.error) { + const error = result.info.error + const msg = error.message ?? error.name ?? "Subagent task failed" + return { + title: params.description, + metadata: { sessionId: session.id, model }, + output: [ + `task_id: ${session.id}`, + "", + "", + `ERROR: ${msg}`, + `The subtask encountered an error and could not complete.`, + "", + ].join("\n"), + } +} + +const text = result.parts.findLast((x) => x.type === "text")?.text ?? "" +``` + +**Important**: Check the actual type of `result.info` to use proper typing instead of `(result.info as any).error`. Look at how `processor.ts` sets the error on `input.assistantMessage.error` to understand the shape. + +### Testing +- Trigger a subagent error (e.g., invalid tool call, context overflow) +- Verify the parent session shows "ERROR: ..." in the task result, not empty +- Verify the parent LLM receives the error and can report it to the user + +--- + +## Issue #4: `bedrock-context-cap` — Bedrock Provider Missing Context Limit Override + +### Priority: P0 — This Sprint + +### What is the issue? +The `models-snapshot.ts` file (auto-generated from models.dev) lists Claude Opus 4.6 on Bedrock with `context: 1,000,000`. This is the model's capability WITH the `context-1m` beta header. However, the Bedrock provider handler in `provider.ts` **never sends the 1M beta header**, so Bedrock actually enforces a 200K limit. The result: UI shows "20% context usage" when the user is actually at 100% of the real limit, and compaction never triggers. + +### What is the bug? +Two bugs combine: + +1. **`models-snapshot.ts`** lists Opus 4.6 Bedrock models at 1M context (reflects model capability, not runtime limit) +2. **`provider.ts`** `"amazon-bedrock"` handler has NO logic to: + - Send `additionalModelRequestFields: { anthropic_beta: ["context-1m-2025-08-07"] }` to enable 1M + - Override the context limit to 200K when 1M beta is NOT active + +**Affected models in snapshot**: +``` +amazon-bedrock / anthropic.claude-opus-4-6-v1: context=1,000,000 ❌ +amazon-bedrock / us.anthropic.claude-opus-4-6-v1: context=1,000,000 ❌ +amazon-bedrock / eu.anthropic.claude-opus-4-6-v1: context=1,000,000 ❌ +amazon-bedrock / global.anthropic.claude-opus-4-6-v1: context=1,000,000 ❌ +``` + +All other Bedrock Claude models correctly show 200K. + +### Where it can happen? +- Any user running Claude Opus 4.6 via Amazon Bedrock +- Compaction threshold is calculated from `model.limit.context` → 1M → threshold ~900K +- Bedrock rejects at 200K → 700K token gap where compaction never fires but API always rejects +- Combined with Issue #1 (infinite retries), this causes the 3.5-hour freeze + +### What any agent needs to look for? +``` +File: packages/opencode/src/provider/provider.ts +Location: The "amazon-bedrock" entry in CUSTOM_LOADERS (~line 211) +Pattern: The returned object has options (providerOptions) and getModel() but NO context limit override +Also: Look at how compaction.ts uses model.limit.context (~line 33) +Also: Look at how Cline handles this — they use additionalModelRequestFields for Bedrock + +DO NOT edit models-snapshot.ts directly — it is auto-generated by build.ts +``` + +### How to make the fix? +**Option A (Recommended)**: Add provider-level context limit override in the model resolution logic. When provider is "amazon-bedrock" and model is Claude, cap context at 200K unless a 1M configuration is explicitly enabled. + +Look at where models are resolved and limits are applied. The fix should go in `provider.ts` where models are loaded/resolved, adding a context limit override: + +```typescript +// Inside amazon-bedrock handler or model resolution +if (providerID === "amazon-bedrock" && modelData.limit?.context > 200000) { + // Cap at 200K unless 1M beta is explicitly configured + modelData.limit.context = 200000 +} +``` + +**Option B (Future)**: Implement Cline's `:1m` suffix pattern — user explicitly opts into 1M context, which triggers adding `anthropic_beta: ["context-1m-2025-08-07"]` via `additionalModelRequestFields`. + +### Testing +- Configure Bedrock with Opus 4.6 +- Verify UI shows context limit as 200K (not 1M) +- Verify compaction triggers before hitting Bedrock's actual 200K limit +- Verify no "prompt is too long" errors during normal usage + +--- + +## Issue #5: `subagent-timeout` — task.ts Has No Execution Timeout + +### Priority: P0 — This Sprint + +### What is the issue? +The `task` tool calls `SessionPrompt.prompt()` with **no timeout or deadline**. If a subagent gets stuck (infinite retry storm, permission hang, or any other blocking issue), the parent tool call never resolves. The parent session appears frozen with a spinning "running" indicator forever. + +### What is the bug? +`packages/opencode/src/tool/task.ts`: +```typescript +const result = await SessionPrompt.prompt({ + messageID, + sessionID: session.id, + model: { modelID: model.modelID, providerID: model.providerID }, + agent: agent.name, + tools: { ... }, + parts: promptParts, +}) +// ← No timeout wrapper, no AbortController deadline +``` + +This Promise can hang indefinitely if the child session encounters: +- Infinite retry loop (Issue #1 before fix) +- Permission hang (Issue #6) +- Slow API responses that never complete + +### Where it can happen? +- Any subagent execution, but especially: + - When subagent hits context overflow with retries + - When subagent needs permission and user is watching parent + - When API provider is slow or unresponsive + +### What any agent needs to look for? +``` +File: packages/opencode/src/tool/task.ts +Location: The SessionPrompt.prompt() call +Pattern: Check if there's an AbortSignal or timeout mechanism available +Also check: How the abort signal flows from processor.ts → tool execution → task.ts +Also check: ctx parameter in execute() — does it carry an abort signal? +``` + +### How to make the fix? +Wrap the `SessionPrompt.prompt()` call with an AbortController timeout: + +```typescript +const timeout = 5 * 60 * 1000 // 5 minutes (configurable) +const controller = new AbortController() +const timer = setTimeout(() => controller.abort(), timeout) + +try { + const result = await SessionPrompt.prompt({ + // ... existing params ... + abort: controller.signal, // Pass abort signal if prompt() supports it + }) + clearTimeout(timer) + // ... process result ... +} catch (e) { + clearTimeout(timer) + if (controller.signal.aborted) { + return { + title: params.description, + metadata: { sessionId: session.id, model }, + output: `ERROR: Subtask timed out after ${timeout / 1000}s. The task may still be running in session ${session.id}.`, + } + } + throw e +} +``` + +Check if `SessionPrompt.prompt()` already accepts an `abort` parameter. If not, trace how `processor.ts` passes its abort signal and ensure the plumbing exists. + +### Testing +- Trigger a subagent that would hang (e.g., long-running task) +- Verify it times out after the configured deadline +- Verify the parent receives a timeout error message, not silent hang +- Verify the child session is properly cleaned up + +--- + +## Issue #6: `permission-abort` — next.ts Permission Promises Hang Forever in Subagents + +### Priority: P0 — This Sprint + +### What is the issue? +When a subagent's tool requires permission (e.g., file write, command execution), the permission prompt appears **only in the child session**. If the user is watching the parent session, they never see the prompt. The child session hangs forever waiting for permission, which blocks the parent's tool call. + +### What is the bug? +`packages/opencode/src/permission/next.ts` lines ~143-156: +```typescript +export function ask(input: AskInput) { + return new Promise((resolve, reject) => { + // ... sets up permission request ... + // NO abort signal listener + // NO timeout + // Promise resolves only when user explicitly grants/denies + }) +} +``` + +`grep -c "abort" next.ts` returns **0** — there is zero abort signal awareness in the entire file. + +### Where it can happen? +- Any subagent tool call that requires permission +- Parallel subagents: one hangs on permission → parent hangs → all other parallel results blocked +- Even with auto-approve policies, edge cases (new tools, destructive operations) may still prompt + +### What any agent needs to look for? +``` +File: packages/opencode/src/permission/next.ts +Location: The ask() function (exported, ~line 143) +Pattern: The Promise constructor — no abort/timeout handling +Also check: How ask() is called from tool execution context +Also check: Whether an AbortSignal is available in the call chain +Also check: packages/opencode/src/session/prompt.ts for where permissions are requested +``` + +### How to make the fix? +Add AbortSignal support to the `ask()` function: + +```typescript +export function ask(input: AskInput & { abort?: AbortSignal }) { + return new Promise((resolve, reject) => { + // Check if already aborted + if (input.abort?.aborted) { + return reject(new Error("Permission request aborted")) + } + + // Listen for abort + const onAbort = () => { + reject(new Error("Permission request aborted")) + } + input.abort?.addEventListener("abort", onAbort, { once: true }) + + // ... existing permission logic ... + // Clean up abort listener in resolve/reject paths + }) +} +``` + +**Important**: The abort signal must be plumbed from `processor.ts` through the tool execution chain to `next.ts`. Trace the call path: +``` +processor.ts (has abort) → tool execution → specific tool → permission check → next.ts ask() +``` + +### Testing +- Trigger a subagent that needs permission +- Abort the parent session while permission is pending +- Verify the child permission promise rejects +- Verify the parent tool call resolves with an error (not hangs forever) + +--- + +## Issue #7: `retry-catch-all` — retry.ts Catch-All Makes All JSON Errors Retryable + +### Priority: P1 — Robustness + +### What is the issue? +The `retryable()` function in `retry.ts` has a catch-all at line ~96 that makes **any error with a parseable JSON response body** retryable. This means Bedrock 400 errors ("prompt is too long"), which should NOT be retried, get classified as retryable — fueling the infinite retry storm. + +### What is the bug? +`packages/opencode/src/session/retry.ts` line ~96: +```typescript +// After checking specific patterns (rate limit, overloaded, etc.)... +return JSON.stringify(json) // ← ANY remaining JSON error = retryable +``` + +The Bedrock "prompt is too long" error response is valid JSON with `"isRetryable": false` in the body, but the catch-all ignores this field and returns the body as a retryable error message. + +### Where it can happen? +- Any API error that returns a JSON response body +- Specifically: Bedrock validation errors (400), authentication errors, quota errors +- Combined with Issue #1 (no max retries), this creates infinite retry storms + +### What any agent needs to look for? +``` +File: packages/opencode/src/session/retry.ts +Location: The retryable() function, specifically the catch-all after all pattern checks +Pattern: The final `return JSON.stringify(json)` that runs for any unmatched JSON error +Also check: What specific patterns ARE checked before the catch-all +Also check: Whether the JSON body contains "isRetryable" or HTTP status fields +``` + +### How to make the fix? +Replace the blanket catch-all with HTTP status-aware classification: + +```typescript +// Instead of: return JSON.stringify(json) +// Use: +const status = (json as any).status ?? (json as any).statusCode +if (typeof status === "number" && status >= 400 && status < 500) { + // 4xx errors are client errors — NOT retryable (bad request, auth, not found, etc.) + return undefined +} +// 5xx and truly unknown → retryable (but capped by MAX_RETRIES from Issue #1) +return JSON.stringify(json) +``` + +Also check for the `isRetryable` field that Bedrock includes: +```typescript +if ((json as any).isRetryable === false) return undefined +``` + +**Note**: This fix is SAFER when combined with Issue #1 (MAX_RETRIES), since any misclassification is bounded by the retry cap. + +### Testing +- Send a Bedrock 400 "prompt is too long" error → verify NOT retried +- Send a 429 rate limit error → verify IS retried +- Send a 500 server error → verify IS retried (up to MAX_RETRIES) +- Send a JSON error with `isRetryable: false` → verify NOT retried + +--- + +## Issue #8: `tool-error-metadata` — processor.ts Drops Metadata on Tool Errors + +### Priority: P1 — Robustness + +### What is the issue? +When a tool execution errors, the tool-error handler in `processor.ts` rebuilds the tool state but **drops the `title` and `metadata` fields**. This means the UI loses the tool's display name and any navigation metadata (like `sessionId` for subagent links). + +### What is the bug? +`packages/opencode/src/session/processor.ts` lines ~207-218, the `"tool-error"` case: +```typescript +case "tool-error": { + const match = toolcalls[value.toolCallId] + if (match && match.state.status === "running") { + await Session.updatePart({ + ...match, + state: { + status: "error", + input: value.input ?? match.state.input, + error: (value.error as any).toString(), + // ❌ Missing: title: match.state.title, + // ❌ Missing: metadata: match.state.metadata, + time: { + start: match.state.time.start, + end: Date.now(), + }, + }, + }) + } +} +``` + +### Where it can happen? +- Any tool that errors during execution +- Most visible for task tool errors — the `sessionId` metadata (used for navigating to child sessions) is lost +- Also affects batch tool parts and any tool with custom title/metadata + +### What any agent needs to look for? +``` +File: packages/opencode/src/session/processor.ts +Location: The "tool-error" case in the stream event handler +Pattern: Compare the "tool-error" state update with the "tool-result" state update +The "tool-result" case preserves title and metadata, but "tool-error" does not +``` + +### How to make the fix? +Add `title` and `metadata` preservation to the error state: + +```typescript +case "tool-error": { + const match = toolcalls[value.toolCallId] + if (match && match.state.status === "running") { + await Session.updatePart({ + ...match, + state: { + status: "error", + input: value.input ?? match.state.input, + error: (value.error as any).toString(), + title: match.state.title, // ← ADD + metadata: match.state.metadata, // ← ADD + time: { + start: match.state.time.start, + end: Date.now(), + }, + }, + }) + } +} +``` + +### Testing +- Trigger a tool error (e.g., file read on non-existent path) +- Verify the error part in the UI shows the tool title +- Trigger a subagent error → verify the sessionId metadata is preserved in the error part + +--- + +## Issue #9: `batch-error-details` — batch.ts Output Lacks Per-Tool Error Details + +### Priority: P2 — Nice to Have + +### What is the issue? +When batch tool calls fail, the output summary only says `"Executed X/Y tools successfully. Z failed."` without including **which tools failed or why**. The LLM receiving this output cannot diagnose or intelligently retry the failures. + +### What is the bug? +`packages/opencode/src/tool/batch.ts` output message: +```typescript +const outputMessage = failedCalls > 0 + ? `Executed ${successfulCalls}/${results.length} tools successfully. ${failedCalls} failed.` + : `All ${successfulCalls} tools executed successfully.` +``` + +Note: Individual tool-call parts ARE written to the database with their errors (via `Session.updatePart` in the catch block), so the UI shows them. But the **summary message returned to the LLM** lacks details. + +### Where it can happen? +- Any batch execution where one or more tools fail +- The LLM sees the summary but not the individual error details +- Can cause the LLM to blindly retry the same failing operations + +### What any agent needs to look for? +``` +File: packages/opencode/src/tool/batch.ts +Location: The outputMessage construction after Promise.all results +Pattern: The results array has { success, tool, error? } for each call +``` + +### How to make the fix? +Include per-tool error details in the output: + +```typescript +const outputMessage = failedCalls > 0 + ? [ + `Executed ${successfulCalls}/${results.length} tools successfully. ${failedCalls} failed.`, + "", + "Failed tools:", + ...results + .filter((r) => !r.success) + .map((r) => `- ${r.tool}: ${r.error instanceof Error ? r.error.message : String(r.error)}`), + ].join("\n") + : `All ${successfulCalls} tools executed successfully.\n\nKeep using the batch tool for optimal performance in your next response!` +``` + +### Testing +- Execute a batch with one intentionally failing tool (e.g., read non-existent file) +- Verify the output includes the tool name and error message +- Verify the LLM can see which tool failed and why + +--- + +## Implementation Order + +``` +✅ DONE — Commit 3670d5f2f: + #1 processor-max-retries → MAX_RETRIES=10 cap + #2 bedrock-undefined-message → "undefined" → responseBody fallback + #3 task-swallows-errors → result.info.error check in task.ts + #8 tool-error-metadata → metadata preserved on tool-error + +✅ DONE — Commit a8758b20f: + #4 bedrock-context-cap → 200K cap in both fromModelsDevModel + config path + #7 retry-catch-all → isRetryable:false + 4xx status guards + #9 batch-error-details → per-tool error details in output + +REMAINING (P0 — Needs deep plumbing): + #5 subagent-timeout → Hung subagent prevention + #6 permission-abort → Permission hang prevention +``` diff --git a/docs/09-temp/ui-overhaul-plan.md b/docs/09-temp/ui-overhaul-plan.md new file mode 100644 index 000000000000..70d205acaf1a --- /dev/null +++ b/docs/09-temp/ui-overhaul-plan.md @@ -0,0 +1,94 @@ +# UI/UX Overhaul Plan — OpenCode Desktop + +**Date:** 2026-02-24 +**Status:** ✅ PHASE 1 COMPLETE + +## User Requirements +- UI looks "very bad" — needs visual polish and tactile feel ✅ +- More themes and theme customization ✅ +- Better UI rendering quality ✅ +- Font size ✅ (fixed in PR #14821) +- Zoom in/out ✅ (already works via Cmd+/-/0) +- Wide mode ✅ (added in PR #14835) +- More UI settings options needed (future) + +## Phase 1 Changes (Completed) + +### 1. Font Rendering (`base.css`) +- Added `-webkit-font-smoothing: antialiased` for crisp text on macOS +- Added `-moz-osx-font-smoothing: grayscale` for Firefox +- Added `text-rendering: optimizeLegibility` for better kerning +- Added `scroll-behavior: smooth` for smooth scrolling + +### 2. Animation System (`animations.css`) +- Added CSS custom property easing tokens (`--ease-out-expo`, `--ease-spring`, etc.) +- Added duration tokens (`--duration-instant` through `--duration-slower`) +- Added new keyframes: `fadeIn`, `fadeInScale`, `slideInFromRight/Left/Bottom` +- Added `subtleGlow` for focus states, `shimmer` for loading, `spin` +- Halved stagger delay (50ms instead of 100ms) for snappier text reveals +- Added `prefers-reduced-motion: reduce` media query for accessibility + +### 3. Utilities (`utilities.css`) +- Added `::selection` styling with theme-aware color +- Added global transition defaults for all interactive elements +- Added `:focus-visible` ring with theme color +- Added thin scrollbar styling for scroll views +- Suppressed focus ring for components that handle their own + +### 4. Shadow/Depth System (`theme.css`) +- Refined `--shadow-xs` with slightly stronger presence +- Added new `--shadow-sm` level for subtle elevation +- Enhanced `--shadow-md` with deeper, more dramatic depth +- Enhanced `--shadow-lg` with softer, more premium feel +- Added new `--shadow-xl` for maximum elevation (modals, floating panels) + +### 5. Button Micro-Interactions (`button.css`) +- Added explicit transition for bg-color, border, box-shadow, transform, opacity +- Primary: hover now lifts with `--shadow-sm`, active presses with `scale(0.98)` +- Ghost: icon color transitions on hover, active presses with `scale(0.97)` +- Secondary: hover adds border shadow hint, active presses +- Disabled states now use `opacity: 0.6` for clearer visual feedback + +### 6. Card Polish (`card.css`) +- Upgraded border-radius from `--radius-md` to `--radius-lg` +- Added full transition for bg-color, border-color, box-shadow, transform +- Hover state now shows subtle border highlight and `--shadow-xs` elevation + +### 7. Dialog Animations (`dialog.css`) +- Overlay now uses `backdrop-filter: blur(4px)` for frosted glass effect +- Overlay opacity increased from 0.2 to 0.35 for better focus +- Content now uses combined `scale(0.96) + translateY(4px)` entrance +- Animation uses `cubic-bezier(0.16, 1, 0.3, 1)` expo-out for premium feel +- Added subtle 1px border ring on dialog content for depth definition +- Overlay entrance/exit now animated separately + +### 8. Icon Button Interactions (`icon-button.css`) +- Added explicit transitions for bg-color, box-shadow, transform +- Ghost variant: icon color now transitions on hover (to `--icon-hover`) +- Active state now scales to `0.92` for satisfying tactile press +- Icon SVG color now properly transitions through states +- Disabled state uses `opacity: 0.5` + +### 9. New Themes (3 premium additions) +- **Rosé Pine** — Dreamy, soft palette with purple/rose accents. Very popular community theme. +- **Kanagawa** — Japanese-inspired warm palette. Distinctive golden/purple tones based on "The Great Wave." +- **Everforest** — Calming green/earth tones nature-inspired palette. Easy on the eyes for long sessions. + +All themes include full light + dark variants with seeds, borders, surfaces, text, syntax highlighting, and markdown colors. + +## Files Modified +- `packages/ui/src/styles/base.css` — Font rendering +- `packages/ui/src/styles/animations.css` — Animation system +- `packages/ui/src/styles/utilities.css` — Selection, focus, transitions, scrollbars +- `packages/ui/src/styles/theme.css` — Shadow system +- `packages/ui/src/components/button.css` — Button interactions +- `packages/ui/src/components/card.css` — Card polish +- `packages/ui/src/components/dialog.css` — Dialog animations +- `packages/ui/src/components/icon-button.css` — Icon button interactions +- `packages/ui/src/theme/themes/rosepine.json` — NEW +- `packages/ui/src/theme/themes/kanagawa.json` — NEW +- `packages/ui/src/theme/themes/everforest.json` — NEW +- `packages/ui/src/theme/default-themes.ts` — Theme registration + +## Build Status +✅ `vite build` passes with zero errors (7.98s) diff --git a/docs/09-temp/ui-redesign-spec.md b/docs/09-temp/ui-redesign-spec.md new file mode 100644 index 000000000000..718b5ee2ecfb --- /dev/null +++ b/docs/09-temp/ui-redesign-spec.md @@ -0,0 +1,37 @@ +# UI Redesign Spec + +## Reference Design +See HTML mockup provided by user. Key elements: + +### Sidebar +- "New Session" button with icon, primary color border +- "RECENT CHATS" section header (uppercase, tracking-wider) +- Chat items with icon + title + timestamp +- "CONTEXT" section with file list +- Bottom: plan usage bar + +### Message Timeline +- Assistant: Robot icon (32x32 rounded square) + "OPENCODE AI" label (uppercase, primary color, bold) +- User: Timestamp + "You" label (accent-cyan color, bold) +- User message: Glass panel, rounded-2xl with rounded-tr-none + +### Thinking Block +- Collapsible `
` with: + - Cyan pulsing dot + "Thinking process..." text + - Expand/collapse arrow + - Mono font content with `>` prefix + - Border-top separator + +### Prompt Input +- Glass panel with backdrop-blur +- Model selector pills ("GPT-4o", "Web Search") +- Textarea +- Send button with primary color + glow shadow +- Bottom bar: keyboard shortcuts + sync status + +### Right Activity Bar +- Vertical icon strip: Extensions, Source Control, History +- Bottom: Settings + user avatar + +### Settings (from screenshot) +- Already looks reasonable, minor polish needed diff --git a/docs/plans/2025-02-26-aurora-design-system.md b/docs/plans/2025-02-26-aurora-design-system.md new file mode 100644 index 000000000000..9dd67f391618 --- /dev/null +++ b/docs/plans/2025-02-26-aurora-design-system.md @@ -0,0 +1,1977 @@ +# 🌌 Aurora Design System for opencode + +> **Vision**: "Code illuminated from within" +> +> A unified design language for opencode that creates an ethereal, digital-native interface where UI elements emit light rather than receive it. + +--- + +## Table of Contents + +1. [Design Vision Summary](#part-1-design-vision-summary) +2. [Design Approaches Explored](#part-2-design-approaches-explored) +3. [Color System](#part-3-color-system) +4. [Typography System](#part-4-typography-system) +5. [Spacing System](#part-5-spacing-system) +6. [Motion & Animation System](#part-6-motion--animation-system) +7. [Component Specifications](#part-7-component-specifications) +8. [TUI (Terminal) Component Translations](#part-8-tui-terminal-component-translations) +9. [Stitch Prompts for Visual Prototyping](#part-9-stitch-prompts-for-visual-prototyping) +10. [Final Summary & Implementation Guide](#part-10-final-summary--implementation-guide) +11. [Accessibility & Review Amendments](#part-11-accessibility--review-amendments) + +--- + +## Part 1: Design Vision Summary + +### Design Requirements Gathered + +| Aspect | Choice | +|--------|--------| +| **Scope** | Unified design language (Web Console + Terminal UI) | +| **Tone** | Luxury Minimal | +| **Color** | Dark-first luxury with luminous accents | +| **Motion** | Confident, tactile, functional | +| **Reference** | Future-forward (Tesla/Rivian interiors) | + +### Core Identity + +``` +┌─────────────────────────────────────────────────────────────┐ +│ │ +│ opencode AURORA │ +│ │ +│ "Code illuminated from within" │ +│ │ +│ Not a tool that shows you code— │ +│ A window into a dimension where code IS light. │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Design Principles + +| Principle | Description | Implementation | +|-----------|-------------|----------------| +| **Light as Material** | UI elements emit light rather than receive it | Glows, gradients, luminous borders | +| **Depth through Transparency** | Layers visible through glassmorphism | backdrop-blur, low-opacity backgrounds | +| **Confident Motion** | Every animation serves purpose and feels physical | Spring physics, 200-300ms durations | +| **Chromatic Restraint** | Rich palette but used sparingly | Monochrome base, color for meaning | +| **Unified Language** | Same DNA across Web and TUI | Shared color tokens, adapted to medium | + +--- + +## Part 2: Design Approaches Explored + +Three design approaches were explored before settling on Aurora: + +### Approach A: "Carbon Fiber" — Industrial Luxury (Rejected) + +**Concept:** Premium materials meet precision engineering. Think machined aluminum bezels, carbon fiber textures, and surgical-grade steel accents. + +**Web Console:** +``` +┌─────────────────────────────────────────────────────────────┐ +│ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ │ +│ Background: Subtle carbon weave pattern with depth │ +│ Cards: Brushed metal finish with soft inner glow │ +│ Accent: Copper/rose gold highlights (warm against cold) │ +│ │ +│ 3D Elements: │ +│ • Cards tilt on hover (perspective transform) │ +│ • Depth shadows that respond to mouse position │ +│ • Metallic sheen that catches virtual "light" │ +│ │ +│ Motion: │ +│ • Spring-based button depressions (like mechanical keys) │ +│ • Smooth state transitions with mass/velocity physics │ +│ • Loading: Rotating machined bezel indicator │ +└─────────────────────────────────────────────────────────────┘ +``` + +**TUI Translation:** +``` +┌─ SESSION: Project Analysis ─────────────── ◈ ────┐ +│ │ +│ ▓▓▓▓▓▓▓▓▓▓▓▓▓░░░░░░░░░░░░░░ Processing... │ +│ │ +│ ╭──────────────────────────────────────────╮ │ +│ │ ◆ Analyzing codebase │ │ +│ │ └─ Found 127 TypeScript files │ │ +│ │ └─ Detected SolidJS framework │ │ +│ ╰──────────────────────────────────────────╯ │ +│ │ +│ ┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄ │ +│ Unicode: Heavy borders, diamond bullets │ +│ Colors: Warm copper (#B87333) on charcoal │ +└───────────────────────────────────────────────────┘ +``` + +**Pros:** +- Distinctive, memorable aesthetic +- Strong brand identity ("the tool that feels engineered") +- Warm accent prevents cold/sterile feeling + +**Cons:** +- Carbon texture could feel dated if not executed perfectly +- Copper might clash with some terminal color schemes +- More complex to implement subtle material effects + +**Verdict:** Too industrial - user wanted something more ethereal/digital, less physical materials + +--- + +### Approach B: "Aurora" — Digital Luminescence (SELECTED ✓) + +**Concept:** Pure light and energy. No physical materials—just gradients, glows, and luminous color that feels alive. Like looking at code through a prism of pure digital light. + +**Web Console:** +``` +┌─────────────────────────────────────────────────────────────┐ +│ │ +│ Background: Deep void black (#0A0A0F) with subtle │ +│ animated gradient aurora (very slow drift) │ +│ │ +│ Cards: Glassmorphism with luminous edge glow │ +│ bg: rgba(255,255,255,0.03) │ +│ border: gradient (cyan → purple → magenta) │ +│ backdrop-filter: blur(20px) │ +│ │ +│ Accent Colors (shifting spectrum): │ +│ • Primary: Electric Cyan (#00D4FF) │ +│ • Secondary: Soft Violet (#A78BFA) │ +│ • Tertiary: Rose (#FF6B9D) │ +│ │ +│ 3D Elements: │ +│ • Subtle glow pulses (like breathing light) │ +│ • Hover reveals inner luminescence │ +│ • Focus states bloom with soft radiance │ +│ • Depth through layered transparency, not shadows │ +│ │ +│ Motion: │ +│ • Smooth spring physics on all interactions │ +│ • Elements "float" with subtle parallax │ +│ • Loading: Gradient shimmer / aurora wave │ +│ • Transitions: Fade + scale with glow trail │ +└─────────────────────────────────────────────────────────────┘ +``` + +**Light Theme Variant:** +``` +┌─────────────────────────────────────────────────────────────┐ +│ Background: Soft pearl (#FAFAFA) with subtle iridescence │ +│ Cards: Frosted glass with prismatic edge highlights │ +│ Accents: Deeper cyan, rich violet (contrast preserved) │ +│ Effect: "Daylight aurora" - colors visible but softer │ +└─────────────────────────────────────────────────────────────┘ +``` + +**TUI Translation:** +``` +╭───────────────────────────────────────────────────────╮ +│ ● opencode ◐ processing │ +├───────────────────────────────────────────────────────┤ +│ │ +│ ┃ Analyzing your codebase... │ +│ ┃ │ +│ ├─● packages/opencode/src/cli/ │ +│ │ ├─○ cmd/tui/app.tsx │ +│ │ ├─○ cmd/tui/context/theme.tsx │ +│ │ └─● cmd/tui/routes/session/ │ +│ │ └─○ index.tsx ← focus │ +│ │ │ +│ ╰─ Found 247 files in 3.2s │ +│ │ +│ ░░░░░░░░░░░░░░░░░░░░░░░▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ │ +│ │ +╰───────────────────────────────────────────────────────╯ +│ Unicode: Rounded corners, thin lines, ● ○ bullets │ +│ Colors: Cyan/violet/magenta gradient hierarchy │ +│ Effect: "Glowing" text via bright-on-dark contrast │ +└───────────────────────────────────────────────────────┘ +``` + +**Key Differentiators:** +- **Depth through light, not shadow** — Elements glow from within rather than casting shadows +- **Living gradients** — Subtle color shifts that feel organic, not static +- **Ethereal presence** — UI feels like it exists in digital space, weightless + +**Pros:** +- Truly unique aesthetic (few tools look like this) +- Perfectly digital - no physical material metaphors +- Light/dark themes can share the same luminous DNA +- Scalable: subtle for everyday use, dramatic for hero moments + +**Cons:** +- Risk of "gaming aesthetic" if not carefully restrained +- Gradient animations need to be VERY subtle or becomes distracting +- Performance consideration for animated gradients + +**Verdict:** Selected as final direction - ethereal, digital, distinctive + +--- + +### Approach C: Not Developed + +Since Approach B (Aurora) was selected immediately, a third approach was not fully developed. + +--- + +## Part 3: Color System + +### Dark Theme (Primary) + +```css +/* ═══════════════════════════════════════════════════════════ + AURORA DARK — PRIMARY THEME + ═══════════════════════════════════════════════════════════ */ + +:root[data-theme="aurora-dark"] { + /* ─── VOID BACKGROUNDS ─── */ + --void-deepest: #050508; /* True dark, almost black */ + --void-deep: #0A0A0F; /* Primary background */ + --void-base: #0F0F14; /* Card backgrounds */ + --void-elevated: #14141A; /* Elevated surfaces */ + --void-hover: #1A1A22; /* Hover states */ + + /* ─── SURFACE GLASS ─── */ + --glass-subtle: rgba(255, 255, 255, 0.02); + --glass-light: rgba(255, 255, 255, 0.04); + --glass-medium: rgba(255, 255, 255, 0.06); + --glass-strong: rgba(255, 255, 255, 0.08); + + /* ─── LUMINOUS SPECTRUM ─── */ + --aurora-cyan: #00D4FF; /* Primary accent */ + --aurora-cyan-soft: #00A3CC; /* Cyan muted */ + --aurora-cyan-glow: rgba(0, 212, 255, 0.15); + + --aurora-violet: #A78BFA; /* Secondary accent */ + --aurora-violet-soft:#8B6ED9; + --aurora-violet-glow:rgba(167, 139, 250, 0.15); + + --aurora-rose: #FF6B9D; /* Tertiary / attention */ + --aurora-rose-soft: #D94A7B; + --aurora-rose-glow: rgba(255, 107, 157, 0.15); + + --aurora-amber: #FFBB33; /* Warning / warm accent */ + --aurora-green: #4ADE80; /* Success */ + --aurora-red: #F87171; /* Error / danger */ + + /* ─── TEXT HIERARCHY ─── */ + --text-primary: #F5F5F7; /* Bright white */ + --text-secondary: #A1A1AA; /* Muted gray */ + --text-tertiary: #71717A; /* Subtle gray */ + --text-disabled: #3F3F46; /* Very dim */ + + /* ─── BORDER LUMINANCE ─── */ + --border-subtle: rgba(255, 255, 255, 0.06); + --border-default: rgba(255, 255, 255, 0.10); + --border-strong: rgba(255, 255, 255, 0.15); + --border-glow: var(--aurora-cyan); +} +``` + +### Light Theme (Secondary) + +```css +/* ═══════════════════════════════════════════════════════════ + AURORA LIGHT — DAYLIGHT VARIANT + ═══════════════════════════════════════════════════════════ */ + +:root[data-theme="aurora-light"] { + /* ─── PEARL BACKGROUNDS ─── */ + --void-deepest: #FFFFFF; + --void-deep: #FAFAFA; + --void-base: #F4F4F5; + --void-elevated: #FFFFFF; + --void-hover: #E4E4E7; + + /* ─── SURFACE FROST ─── */ + --glass-subtle: rgba(0, 0, 0, 0.02); + --glass-light: rgba(0, 0, 0, 0.04); + --glass-medium: rgba(0, 0, 0, 0.06); + --glass-strong: rgba(0, 0, 0, 0.08); + + /* ─── LUMINOUS SPECTRUM (deeper for contrast) ─── */ + --aurora-cyan: #0891B2; /* Deeper cyan */ + --aurora-cyan-soft: #06B6D4; + --aurora-cyan-glow: rgba(8, 145, 178, 0.10); + + --aurora-violet: #7C3AED; /* Richer violet */ + --aurora-violet-soft:#8B5CF6; + --aurora-violet-glow:rgba(124, 58, 237, 0.10); + + --aurora-rose: #DB2777; /* Deeper rose */ + --aurora-rose-soft: #EC4899; + --aurora-rose-glow: rgba(219, 39, 119, 0.10); + + --aurora-amber: #D97706; + --aurora-green: #16A34A; + --aurora-red: #DC2626; + + /* ─── TEXT HIERARCHY ─── */ + --text-primary: #18181B; + --text-secondary: #52525B; + --text-tertiary: #A1A1AA; + --text-disabled: #D4D4D8; + + /* ─── BORDER LUMINANCE ─── */ + --border-subtle: rgba(0, 0, 0, 0.06); + --border-default: rgba(0, 0, 0, 0.10); + --border-strong: rgba(0, 0, 0, 0.15); + --border-glow: var(--aurora-cyan); +} +``` + +### TUI Color Mapping + +```typescript +// Aurora theme for terminal (TUI) +export const auroraDark = { + // Backgrounds (mapped to closest ANSI/24-bit) + background: RGBA.fromHex("#0A0A0F"), + backgroundPanel: RGBA.fromHex("#0F0F14"), + backgroundElement: RGBA.fromHex("#14141A"), + backgroundMenu: RGBA.fromHex("#1A1A22"), + + // Aurora spectrum + primary: RGBA.fromHex("#00D4FF"), // Cyan + secondary: RGBA.fromHex("#A78BFA"), // Violet + accent: RGBA.fromHex("#FF6B9D"), // Rose + + // Semantic + success: RGBA.fromHex("#4ADE80"), + warning: RGBA.fromHex("#FFBB33"), + error: RGBA.fromHex("#F87171"), + info: RGBA.fromHex("#00D4FF"), + + // Text + text: RGBA.fromHex("#F5F5F7"), + textMuted: RGBA.fromHex("#A1A1AA"), + + // Borders + border: RGBA.fromHex("#1E1E26"), + borderActive: RGBA.fromHex("#00D4FF"), + borderSubtle: RGBA.fromHex("#14141A"), + + // Syntax highlighting (aurora-themed) + syntaxKeyword: RGBA.fromHex("#A78BFA"), // Violet + syntaxFunction: RGBA.fromHex("#00D4FF"), // Cyan + syntaxString: RGBA.fromHex("#4ADE80"), // Green + syntaxNumber: RGBA.fromHex("#FF6B9D"), // Rose + syntaxComment: RGBA.fromHex("#71717A"), // Muted + syntaxVariable: RGBA.fromHex("#F5F5F7"), // White + syntaxType: RGBA.fromHex("#FFBB33"), // Amber + syntaxOperator: RGBA.fromHex("#A1A1AA"), + syntaxPunctuation: RGBA.fromHex("#71717A"), + + // Diff colors + diffAdded: RGBA.fromHex("#4ADE80"), + diffRemoved: RGBA.fromHex("#F87171"), + diffAddedBg: RGBA.fromHex("#0D2818"), + diffRemovedBg: RGBA.fromHex("#2D1216"), +} +``` + +--- + +## Part 4: Typography System + +### Font Stack + +```css +/* ═══════════════════════════════════════════════════════════ + AURORA TYPOGRAPHY + ═══════════════════════════════════════════════════════════ */ + +:root { + /* ─── PRIMARY: Code & Interface ─── */ + --font-mono: "JetBrains Mono", "SF Mono", "Fira Code", + "Cascadia Code", monospace; + + /* ─── DISPLAY: Headers & Hero Text ─── */ + /* Option A: Geometric (Future-forward) */ + --font-display: "Geist", "Inter", "SF Pro Display", + system-ui, sans-serif; + + /* Option B: More distinctive (if we want stronger brand) */ + /* --font-display: "Space Grotesk", "Outfit", sans-serif; */ + + /* ─── BODY: Documentation & Long-form ─── */ + --font-body: "Inter", "SF Pro Text", system-ui, sans-serif; +} +``` + +### Type Scale + +```css +/* ─── MODULAR SCALE: 1.250 (Major Third) ─── */ + +:root { + --text-xs: 0.64rem; /* 10.24px - Labels, captions */ + --text-sm: 0.8rem; /* 12.8px - Small UI text */ + --text-base: 1rem; /* 16px - Body text */ + --text-md: 1.25rem; /* 20px - Large body */ + --text-lg: 1.563rem; /* 25px - Section headers */ + --text-xl: 1.953rem; /* 31.25px - Page headers */ + --text-2xl: 2.441rem; /* 39px - Hero subheads */ + --text-3xl: 3.052rem; /* 48.8px - Hero headlines */ + --text-4xl: 3.815rem; /* 61px - Display text */ + + /* ─── LINE HEIGHTS ─── */ + --leading-none: 1; + --leading-tight: 1.25; + --leading-snug: 1.375; + --leading-normal: 1.5; + --leading-relaxed: 1.625; + --leading-loose: 1.75; + + /* ─── LETTER SPACING ─── */ + --tracking-tighter: -0.05em; + --tracking-tight: -0.025em; + --tracking-normal: 0; + --tracking-wide: 0.025em; + --tracking-wider: 0.05em; + + /* ─── FONT WEIGHTS ─── */ + --weight-normal: 400; + --weight-medium: 500; + --weight-semibold: 600; + --weight-bold: 700; +} +``` + +### Typography Classes + +```css +/* ─── SEMANTIC TEXT STYLES ─── */ + +.text-display-hero { + font-family: var(--font-display); + font-size: var(--text-4xl); + font-weight: var(--weight-bold); + line-height: var(--leading-none); + letter-spacing: var(--tracking-tighter); +} + +.text-display-title { + font-family: var(--font-display); + font-size: var(--text-2xl); + font-weight: var(--weight-semibold); + line-height: var(--leading-tight); + letter-spacing: var(--tracking-tight); +} + +.text-heading-lg { + font-family: var(--font-display); + font-size: var(--text-xl); + font-weight: var(--weight-semibold); + line-height: var(--leading-snug); +} + +.text-heading-md { + font-family: var(--font-display); + font-size: var(--text-lg); + font-weight: var(--weight-medium); + line-height: var(--leading-snug); +} + +.text-body { + font-family: var(--font-body); + font-size: var(--text-base); + font-weight: var(--weight-normal); + line-height: var(--leading-relaxed); +} + +.text-body-sm { + font-family: var(--font-body); + font-size: var(--text-sm); + line-height: var(--leading-normal); +} + +.text-code { + font-family: var(--font-mono); + font-size: var(--text-sm); + line-height: var(--leading-normal); + font-variant-ligatures: contextual; /* Enable code ligatures */ +} + +.text-label { + font-family: var(--font-mono); + font-size: var(--text-xs); + font-weight: var(--weight-medium); + letter-spacing: var(--tracking-wide); + text-transform: uppercase; +} +``` + +--- + +## Part 5: Spacing System + +```css +/* ═══════════════════════════════════════════════════════════ + AURORA SPACING — 4px Base Grid + ═══════════════════════════════════════════════════════════ */ + +:root { + --space-px: 1px; + --space-0: 0; + --space-0.5: 0.125rem; /* 2px */ + --space-1: 0.25rem; /* 4px */ + --space-1.5: 0.375rem; /* 6px */ + --space-2: 0.5rem; /* 8px */ + --space-2.5: 0.625rem; /* 10px */ + --space-3: 0.75rem; /* 12px */ + --space-3.5: 0.875rem; /* 14px */ + --space-4: 1rem; /* 16px */ + --space-5: 1.25rem; /* 20px */ + --space-6: 1.5rem; /* 24px */ + --space-7: 1.75rem; /* 28px */ + --space-8: 2rem; /* 32px */ + --space-9: 2.25rem; /* 36px */ + --space-10: 2.5rem; /* 40px */ + --space-11: 2.75rem; /* 44px */ + --space-12: 3rem; /* 48px */ + --space-14: 3.5rem; /* 56px */ + --space-16: 4rem; /* 64px */ + --space-20: 5rem; /* 80px */ + --space-24: 6rem; /* 96px */ + --space-28: 7rem; /* 112px */ + --space-32: 8rem; /* 128px */ + + /* ─── SEMANTIC SPACING ─── */ + --gap-xs: var(--space-1); /* 4px - Inline elements */ + --gap-sm: var(--space-2); /* 8px - Tight groups */ + --gap-md: var(--space-4); /* 16px - Default gap */ + --gap-lg: var(--space-6); /* 24px - Section spacing */ + --gap-xl: var(--space-8); /* 32px - Major sections */ + --gap-2xl: var(--space-12); /* 48px - Page sections */ + + /* ─── COMPONENT PADDING ─── */ + --padding-button: var(--space-2) var(--space-4); + --padding-button-sm: var(--space-1.5) var(--space-3); + --padding-button-lg: var(--space-3) var(--space-6); + + --padding-card: var(--space-5); + --padding-card-sm: var(--space-3); + --padding-card-lg: var(--space-6); + + --padding-input: var(--space-2.5) var(--space-3); + + /* ─── BORDER RADIUS ─── */ + --radius-none: 0; + --radius-sm: 0.25rem; /* 4px - Small elements */ + --radius-md: 0.5rem; /* 8px - Buttons, inputs */ + --radius-lg: 0.75rem; /* 12px - Cards */ + --radius-xl: 1rem; /* 16px - Large cards */ + --radius-2xl: 1.5rem; /* 24px - Modals */ + --radius-full: 9999px; /* Pills, avatars */ +} +``` + +--- + +## Part 6: Motion & Animation System + +```css +/* ═══════════════════════════════════════════════════════════ + AURORA MOTION — Spring-Based Animation + ═══════════════════════════════════════════════════════════ */ + +:root { + /* ─── DURATION ─── */ + --duration-instant: 50ms; + --duration-fast: 150ms; + --duration-normal: 250ms; + --duration-slow: 350ms; + --duration-slower: 500ms; + --duration-slowest: 700ms; + + /* ─── EASING (CSS) ─── */ + --ease-linear: linear; + --ease-in: cubic-bezier(0.4, 0, 1, 1); + --ease-out: cubic-bezier(0, 0, 0.2, 1); + --ease-in-out: cubic-bezier(0.4, 0, 0.2, 1); + + /* ─── SPRING EASING (For Motion library) ─── */ + --spring-bounce: cubic-bezier(0.34, 1.56, 0.64, 1); + --spring-smooth: cubic-bezier(0.22, 1, 0.36, 1); + --spring-snappy: cubic-bezier(0.16, 1, 0.3, 1); + + /* ─── SEMANTIC TRANSITIONS ─── */ + --transition-colors: color var(--duration-fast) var(--ease-out), + background-color var(--duration-fast) var(--ease-out), + border-color var(--duration-fast) var(--ease-out); + + --transition-opacity: opacity var(--duration-normal) var(--ease-out); + + --transition-transform: transform var(--duration-normal) var(--spring-smooth); + + --transition-all: all var(--duration-normal) var(--spring-smooth); + + --transition-glow: box-shadow var(--duration-slow) var(--ease-out); +} +``` + +### Motion Principles + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA MOTION PRINCIPLES │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ 1. ENTER: Scale up + fade in (0.95 → 1.0, 0 → 1) │ +│ 2. EXIT: Scale down + fade out (1.0 → 0.95, 1 → 0) │ +│ 3. HOVER: Subtle lift (translateY -2px) + glow increase │ +│ 4. PRESS: Slight compression (scale 0.98) │ +│ 5. FOCUS: Glow ring expansion │ +│ │ +│ Key insight: Aurora elements GLOW more on interaction, │ +│ they don't cast shadows—they emit light. │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Animation Keyframes + +```css +/* ─── ENTRY ANIMATIONS ─── */ +@keyframes aurora-fade-in { + from { opacity: 0; } + to { opacity: 1; } +} + +@keyframes aurora-scale-in { + from { + opacity: 0; + transform: scale(0.95); + } + to { + opacity: 1; + transform: scale(1); + } +} + +@keyframes aurora-slide-up { + from { + opacity: 0; + transform: translateY(8px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +/* ─── GLOW PULSE (for loading/processing) ─── */ +@keyframes aurora-pulse { + 0%, 100% { + opacity: 1; + box-shadow: 0 0 0 0 var(--aurora-cyan-glow); + } + 50% { + opacity: 0.8; + box-shadow: 0 0 20px 4px var(--aurora-cyan-glow); + } +} + +/* ─── SHIMMER (for skeleton loaders) ─── */ +@keyframes aurora-shimmer { + 0% { + background-position: -200% 0; + } + 100% { + background-position: 200% 0; + } +} + +/* ─── GRADIENT DRIFT (for hero backgrounds) ─── */ +@keyframes aurora-drift { + 0%, 100% { + background-position: 0% 50%; + } + 50% { + background-position: 100% 50%; + } +} +``` + +--- + +## Part 7: Component Specifications + +### 7.1 Buttons + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA BUTTONS │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ PRIMARY (Glowing CTA) │ +│ ┌─────────────────────────────────────┐ │ +│ │ ◉ Start Session │ ← Cyan glow ring │ +│ └─────────────────────────────────────┘ │ +│ bg: var(--aurora-cyan) │ +│ text: var(--void-deepest) │ +│ hover: glow expands, brightness +10% │ +│ active: scale(0.98), glow contracts │ +│ │ +│ SECONDARY (Glass) │ +│ ┌─────────────────────────────────────┐ │ +│ │ View History │ ← Subtle border │ +│ └─────────────────────────────────────┘ │ +│ bg: var(--glass-light) │ +│ border: var(--border-default) │ +│ hover: bg → glass-medium, border glows │ +│ │ +│ GHOST (Minimal) │ +│ ┌─────────────────────────────────────┐ │ +│ │ Cancel │ ← No bg │ +│ └─────────────────────────────────────┘ │ +│ bg: transparent │ +│ hover: var(--glass-subtle) │ +│ │ +│ DANGER (Warning glow) │ +│ ┌─────────────────────────────────────┐ │ +│ │ Delete Session │ ← Red glow │ +│ └─────────────────────────────────────┘ │ +│ bg: var(--aurora-red) at 15% opacity │ +│ border: var(--aurora-red) │ +│ hover: red glow expands │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +```css +/* Primary Button */ +.btn-primary { + background: var(--aurora-cyan); + color: var(--void-deepest); + padding: var(--padding-button); + border-radius: var(--radius-md); + font-weight: var(--weight-medium); + transition: var(--transition-all); + box-shadow: + 0 0 0 0 var(--aurora-cyan-glow), + 0 0 20px -5px var(--aurora-cyan); +} + +.btn-primary:hover { + box-shadow: + 0 0 0 4px var(--aurora-cyan-glow), + 0 0 30px -5px var(--aurora-cyan); + filter: brightness(1.1); +} + +.btn-primary:active { + transform: scale(0.98); + box-shadow: + 0 0 0 2px var(--aurora-cyan-glow), + 0 0 15px -5px var(--aurora-cyan); +} + +/* Secondary Button */ +.btn-secondary { + background: var(--glass-light); + border: 1px solid var(--border-default); + color: var(--text-primary); + padding: var(--padding-button); + border-radius: var(--radius-md); + backdrop-filter: blur(8px); + transition: var(--transition-all); +} + +.btn-secondary:hover { + background: var(--glass-medium); + border-color: var(--aurora-cyan); + box-shadow: 0 0 15px -5px var(--aurora-cyan-glow); +} +``` + +--- + +### 7.2 Cards + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA CARDS │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ GLASS CARD (Default) │ +│ ╭───────────────────────────────────────────╮ │ +│ │ ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ │ │ +│ │ │ │ +│ │ Session Title │ │ +│ │ Subtitle or metadata │ │ +│ │ │ │ +│ │ Content area with sufficient padding │ │ +│ │ for comfortable reading and scanning. │ │ +│ │ │ │ +│ ╰───────────────────────────────────────────╯ │ +│ │ +│ Properties: │ +│ • bg: var(--glass-light) │ +│ • border: 1px solid var(--border-subtle) │ +│ • border-radius: var(--radius-lg) │ +│ • backdrop-filter: blur(12px) │ +│ • padding: var(--padding-card) │ +│ │ +│ ELEVATED CARD (Interactive) │ +│ ╭───────────────────────────────────────────╮ │ +│ │ │ ← Hover: │ +│ │ Select Provider │ Lift + │ +│ │ Choose your AI model │ Glow │ +│ │ │ │ +│ │ → Claude 4 → GPT-4 │ │ +│ │ │ │ +│ ╰───────────────────────────────────────────╯ │ +│ │ +│ Hover state: │ +│ • transform: translateY(-2px) │ +│ • border-color: var(--aurora-cyan) │ +│ • box-shadow: 0 0 30px -10px var(--aurora-cyan-glow) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### 7.3 Input Fields + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA INPUTS │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ DEFAULT STATE │ +│ ┌─────────────────────────────────────────────────┐ │ +│ │ Ask opencode anything... │ │ +│ └─────────────────────────────────────────────────┘ │ +│ bg: var(--glass-subtle) │ +│ border: var(--border-subtle) │ +│ text: var(--text-tertiary) ← placeholder │ +│ │ +│ FOCUS STATE │ +│ ╭─────────────────────────────────────────────────╮ │ +│ │ How do I implement_ ░▓▓▓ │ │ +│ ╰═════════════════════════════════════════════════╯ │ +│ ↑ ↑ │ +│ Cyan glow border Cursor pulse │ +│ │ +│ border: 2px solid var(--aurora-cyan) │ +│ box-shadow: 0 0 0 4px var(--aurora-cyan-glow) │ +│ bg: var(--glass-light) │ +│ │ +│ ERROR STATE │ +│ ╭─────────────────────────────────────────────────╮ │ +│ │ Invalid API key │ │ +│ ╰═════════════════════════════════════════════════╯ │ +│ border-color: var(--aurora-red) │ +│ box-shadow: 0 0 0 4px var(--aurora-red-glow) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### 7.4 The Prompt Input (Hero Component) + +This is the MOST IMPORTANT component—the main chat input: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA PROMPT INPUT │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ ╭═══════════════════════════════════════════════════════╮ │ +│ ║ ║ │ +│ ║ │ How can I refactor this React component ║ │ +│ ║ │ to use hooks instead of class components?_ ║ │ +│ ║ ║ │ +│ ╟───────────────────────────────────────────────────────╢ │ +│ ║ ◎ @file ◎ @folder ◎ @web [⌘ + Enter] ║ │ +│ ╚═══════════════════════════════════════════════════════╝ │ +│ │ +│ Design Details: │ +│ • Double-line border with subtle gradient │ +│ • Inner glow when focused (aurora-cyan) │ +│ • Attachment chips below with hover states │ +│ • Send button pulses subtly when ready │ +│ • Expands smoothly as content grows │ +│ │ +│ Animation: │ +│ • On focus: border brightens, inner glow appears │ +│ • On type: subtle scale micro-pulse (1.002x) │ +│ • On send: content slides up + fades, input shrinks │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### 7.5 Message Bubbles + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA MESSAGE BUBBLES │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ USER MESSAGE │ +│ ╭─────────────────────────────╮ │ +│ │ How do I fix this TypeScript│ │ +│ │ error in my component? │ │ +│ ╰─────────────────────────────╯ │ +│ │ +│ • Aligned right │ +│ • bg: var(--aurora-cyan) at 15% opacity │ +│ • border-left: 2px solid var(--aurora-cyan) │ +│ • Subtle cyan tint │ +│ │ +│ ASSISTANT MESSAGE │ +│ ╭──────────────────────────────────────────────────────╮ │ +│ │ ◈ Let me help you with that TypeScript error. │ │ +│ │ │ │ +│ │ The issue is that your component expects a │ │ +│ │ `string` but receives `string | undefined`. │ │ +│ │ │ │ +│ │ ```typescript │ │ +│ │ // Add type guard │ │ +│ │ if (typeof value === 'string') { │ │ +│ │ processValue(value) │ │ +│ │ } │ │ +│ │ ``` │ │ +│ ╰──────────────────────────────────────────────────────╯ │ +│ │ +│ • Aligned left │ +│ • bg: var(--glass-light) │ +│ • border-left: 2px solid var(--aurora-violet) │ +│ • Code blocks: var(--void-elevated) bg │ +│ │ +│ STREAMING STATE │ +│ ╭──────────────────────────────────────────────────────╮ │ +│ │ ◈ Analyzing your codebase... │ │ +│ │ │ │ +│ │ ▓▓▓▓▓▓▓▓░░░░░░░░░░░░ Scanning files │ │ +│ │ ◌ ◌ ◌ ← Pulsing dots │ │ +│ ╰──────────────────────────────────────────────────────╯ │ +│ │ +│ • Shimmer effect on loading areas │ +│ • Typing indicator: 3 dots with staggered pulse │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### 7.6 Navigation & Header + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA HEADER │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ │ +│ ┃ ◈ opencode Session: Project Analysis ┃ │ +│ ┃ ⚙ ◐ ▤ ┃ │ +│ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ │ +│ │ +│ Properties: │ +│ • bg: var(--void-elevated) with backdrop-blur │ +│ • border-bottom: 1px solid var(--border-subtle) │ +│ • position: sticky │ +│ • Logo: ◈ glyph with subtle cyan glow │ +│ • Session title: truncated with ellipsis │ +│ • Actions: icon buttons with hover glow │ +│ │ +│ SIDEBAR (Collapsed) │ +│ ┌──┐ │ +│ │◈│ ← Logo only │ +│ │──│ │ +│ │⊕│ ← New session │ +│ │📄│ ← Recent │ +│ │⚙│ ← Settings │ +│ └──┘ │ +│ │ +│ SIDEBAR (Expanded) │ +│ ╭────────────────────────╮ │ +│ │ ◈ opencode │ │ +│ ├────────────────────────┤ │ +│ │ ⊕ New Session │ │ +│ ├────────────────────────┤ │ +│ │ RECENT │ │ +│ │ ├─ Project Analysis │ ← Selected, cyan highlight │ +│ │ ├─ Code Review │ │ +│ │ └─ Bug Investigation │ │ +│ ├────────────────────────┤ │ +│ │ ⚙ Settings │ │ +│ ╰────────────────────────╯ │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +### 7.7 Dialogs & Modals + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA MODALS │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ BACKDROP │ +│ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ │ +│ bg: rgba(5, 5, 8, 0.8) │ +│ backdrop-filter: blur(4px) │ +│ │ +│ MODAL CARD │ +│ ╭═══════════════════════════════════════════════════════╮ │ +│ ║ SELECT MODEL ✕ ║ │ +│ ╟───────────────────────────────────────────────────────╢ │ +│ ║ ║ │ +│ ║ ◉ Claude 4 Opus ║ │ +│ ║ Best for complex reasoning ║ │ +│ ║ ║ │ +│ ║ ○ Claude 4 Sonnet ║ │ +│ ║ Balanced performance ║ │ +│ ║ ║ │ +│ ║ ○ GPT-4o ║ │ +│ ║ OpenAI's flagship ║ │ +│ ║ ║ │ +│ ╟───────────────────────────────────────────────────────╢ │ +│ ║ [Cancel] [ Confirm ] ║ │ +│ ╚═══════════════════════════════════════════════════════╝ │ +│ │ +│ Entry animation: │ +│ • Backdrop fades in (0→1, 200ms) │ +│ • Modal scales + fades (0.95→1, 0→1, 250ms, spring) │ +│ │ +│ Exit animation: │ +│ • Modal scales down + fades (1→0.95, 1→0, 150ms) │ +│ • Backdrop fades out (1→0, 150ms) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## Part 8: TUI (Terminal) Component Translations + +The terminal can't do true 3D or blur, but we can create the *feeling* of Aurora through: + +### 8.1 Aurora TUI Character Palette + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA TUI — CHARACTER DESIGN LANGUAGE │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ ─── BORDERS ─── │ +│ Light: ─ │ ┌ ┐ └ ┘ ├ ┤ ┬ ┴ ┼ │ +│ Rounded: ╭ ╮ ╰ ╯ │ +│ Heavy: ━ ┃ ┏ ┓ ┗ ┛ ┣ ┫ ┳ ┻ ╋ │ +│ Double: ═ ║ ╔ ╗ ╚ ╝ ╠ ╣ ╦ ╩ ╬ │ +│ │ +│ ─── AURORA PREFERENCE ─── │ +│ Primary borders: ╭ ─ ╮ (rounded, elegant) │ +│ │ │ │ +│ ╰ ─ ╯ │ +│ │ +│ Active/Focus: ╭═══╮ (double top = "glow") │ +│ │ │ │ +│ ╰───╯ │ +│ │ +│ ─── BULLETS & MARKERS ─── │ +│ Filled: ● ◉ ◆ ◈ ■ ▲ ▶ │ +│ Empty: ○ ◇ □ △ ▷ │ +│ Special: ◐ ◑ ◒ ◓ (half-filled, for progress) │ +│ │ +│ Aurora preference: │ +│ • Logo/brand: ◈ (diamond with dot = light source) │ +│ • Selected: ● │ +│ • Unselected: ○ │ +│ • Active: ◉ (ring = glow) │ +│ • Tree nodes: ├─ └─ │ │ +│ │ +│ ─── PROGRESS INDICATORS ─── │ +│ Block gradient: ░ ▒ ▓ █ │ +│ Thin bar: ─ ━ │ +│ │ +│ Aurora progress: ░░░░░░░░▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ │ +│ (dim → bright = filling with light) │ +│ │ +│ ─── SPINNERS ─── │ +│ Dots: ⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏ │ +│ Circle: ◴ ◷ ◶ ◵ │ +│ Quarter: ◜ ◝ ◞ ◟ │ +│ │ +│ Aurora spinner: ◌ ◍ ◎ ● ◎ ◍ (breathing pulse) │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### 8.2 TUI Layout Templates + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA TUI — MAIN SESSION VIEW │ +├─────────────────────────────────────────────────────────────┤ + +╭─── ◈ opencode ─────────────────── Session: Code Review ────╮ +│ │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ ┃ How can I optimize this database query? │ ← User (cyan │) +│ │ +│ ╭─────────────────────────────────────────────────────╮ │ +│ │ ◈ I'll analyze your query and suggest optimizations.│ │ ← Assistant +│ │ │ │ +│ │ Looking at your query, I see several opportunities: │ │ +│ │ │ │ +│ │ 1. Add an index on `user_id` │ │ +│ │ 2. Use EXPLAIN ANALYZE to identify bottlenecks │ │ +│ │ 3. Consider pagination for large result sets │ │ +│ │ │ │ +│ │ ```sql │ │ +│ │ CREATE INDEX idx_user_id ON orders(user_id); │ │ +│ │ ``` │ │ +│ ╰─────────────────────────────────────────────────────╯ │ +│ │ +│ ░░░░░░░░░░░░░░░░░░▓▓▓▓▓▓▓▓▓▓▓▓ Processing file changes │ ← Progress +│ │ +╰─────────────────────────────────────────────────────────────╯ +│ [?] Help [m] Model [t] Theme [s] Sessions $0.003 │ ← Footer +└─────────────────────────────────────────────────────────────┘ + +COLOR MAPPING: +• Header border: aurora-cyan (#00D4FF) +• Header text: text-primary (#F5F5F7) +• User message │: aurora-cyan +• Assistant border: aurora-violet (#A78BFA) +• Code blocks: void-elevated bg +• Progress filled: aurora-cyan +• Progress empty: border-subtle +• Footer: text-muted +``` + +### 8.3 TUI Dialog Example + +``` +┌─────────────────────────────────────────────────────────────┐ +│ AURORA TUI — MODEL SELECTOR DIALOG │ +├─────────────────────────────────────────────────────────────┤ + + ╭═══════════════════════════════════════════╮ + ║ SELECT MODEL ║ + ╠═══════════════════════════════════════════╣ + ║ ║ + ║ ◉ Claude 4 Opus ║ ← Selected (cyan) + ║ Best for complex reasoning ║ + ║ ║ + ║ ○ Claude 4 Sonnet ║ + ║ Balanced performance ║ + ║ ║ + ║ ○ GPT-4o ║ + ║ OpenAI's flagship ║ + ║ ║ + ║ ○ Gemini 1.5 Pro ║ + ║ Google's multimodal ║ + ║ ║ + ╟───────────────────────────────────────────╢ + ║ [↑↓] Navigate [Enter] Select [Esc] ║ + ╚═══════════════════════════════════════════╝ + +COLOR MAPPING: +• Dialog border: aurora-cyan (double line = "glowing") +• Selected item: aurora-cyan fg + ◉ marker +• Unselected: text-muted + ○ marker +• Description: text-tertiary +• Keybinds: text-muted +• Dialog bg: void-elevated +``` + +--- + +## Part 9: Stitch Prompts for Visual Prototyping + +Copy & paste these prompts directly into [stitch.google.com](https://stitch.google.com) to generate visual mockups. + +### Prompt 1: Main Chat Interface (Dark Theme) + +``` +Create a dark mode AI chat interface for a developer tool called "opencode". + +DESIGN DIRECTION: +- Style: Luxury minimal, future-forward like Tesla/Rivian interiors +- Aesthetic: Digital luminescence - elements emit light rather than cast shadows +- Feel: Clean but bold, pushing boundaries while staying usable + +COLORS: +- Background: Deep void black (#0A0A0F) +- Cards/panels: Glassmorphism with very subtle white tint (rgba(255,255,255,0.04)) +- Primary accent: Electric cyan (#00D4FF) - used for glows and highlights +- Secondary accent: Soft violet (#A78BFA) +- Text: Bright white (#F5F5F7) +- Borders: Subtle glow, not hard edges + +LAYOUT: +- Left sidebar (collapsed): narrow strip with logo ◈, new session button, recent sessions list +- Main area: chat message history with clear visual hierarchy +- Bottom: prominent input field with glassmorphism, glowing cyan border on focus +- Header: session title, context/token count, settings icons + +MESSAGE STYLING: +- User messages: aligned right, subtle cyan tint background, thin cyan left border +- Assistant messages: aligned left, glass card with rounded corners, thin violet left border +- Code blocks inside messages: darker elevated background + +EFFECTS: +- Buttons glow brighter on hover (cyan halo expands) +- Cards have subtle lift on hover +- Input field has pulsing glow when focused +- Use smooth spring-based animations, not linear + +TYPOGRAPHY: +- Font: JetBrains Mono for code, Inter/Geist for UI text +- Clean, modern, monospace aesthetic + +Show this as a full desktop application interface (1440x900) with an ongoing conversation about code refactoring. +``` + +--- + +### Prompt 2: Model Selection Modal + +``` +Create a modal dialog for selecting AI models in a dark mode developer tool. + +DESIGN: +- Style: Glassmorphism modal floating over blurred dark background +- Background behind modal: Deep black (#0A0A0F) with 80% opacity overlay + blur +- Modal card: Glass effect (rgba(255,255,255,0.06)) with luminous cyan border + +MODAL CONTENT: +- Title: "SELECT MODEL" at top +- List of 4-5 AI models as selectable options: + • Claude 4 Opus - "Best for complex reasoning" + • Claude 4 Sonnet - "Balanced performance" + • GPT-4o - "OpenAI's flagship" + • Gemini 1.5 Pro - "Multimodal capabilities" + +INTERACTIONS: +- Selected option: Has filled cyan radio button, text is brighter +- Unselected: Empty circle, muted text +- Hover state: Subtle glow behind the option row +- Footer: "Cancel" ghost button, "Confirm" primary button with cyan glow + +ANIMATION: +- Modal scales in from 0.95 to 1.0 with fade +- Backdrop blurs in smoothly +- Radio selection has smooth transition + +Colors: +- Accent cyan: #00D4FF +- Background void: #0A0A0F +- Glass: rgba(255,255,255,0.06) +- Text: #F5F5F7 (primary), #A1A1AA (muted) +``` + +--- + +### Prompt 3: Empty State / Welcome Screen + +``` +Create a welcome screen for an AI coding assistant called "opencode". + +DESIGN DIRECTION: +- Dark mode, luxury minimal aesthetic +- Ethereal, digital luminescence feel +- Background: Very dark (#050508) with subtle animated gradient aurora effect (cyan/violet/rose, VERY subtle and slow) + +CONTENT: +- Large diamond logo (◈) in center, glowing softly with cyan light +- Tagline: "Code illuminated" +- Subtitle: "Your AI pair programming assistant" +- 3-4 quick action cards below: + • "Start new session" (primary CTA with cyan glow) + • "Continue recent: [session name]" + • "Explore templates" + • "Settings & preferences" + +VISUAL EFFECTS: +- Logo has subtle breathing pulse (glow expands/contracts slowly) +- Quick action cards are glass panels that lift and glow on hover +- Very subtle particle/star field effect in background (optional, keep it minimal) +- Typography is clean, modern, confident + +COLORS: +- Primary: #00D4FF (cyan) +- Secondary: #A78BFA (violet) +- Tertiary: #FF6B9D (rose) +- Background: #050508 to #0A0A0F gradient +- Text: #F5F5F7 + +Show as full screen application (1440x900), centered composition. +``` + +--- + +### Prompt 4: Light Theme Variant + +``` +Create a light mode variant of an AI chat interface for developers. + +DESIGN DIRECTION: +- Same luxury minimal aesthetic as dark mode, but inverted +- "Daylight aurora" - colors are richer/deeper for contrast +- Feel: Clean, bright, professional, premium + +COLORS: +- Background: Soft pearl (#FAFAFA) +- Cards: Frosted white glass with very subtle shadows +- Primary accent: Deeper cyan (#0891B2) for contrast +- Secondary: Rich violet (#7C3AED) +- Text: Near-black (#18181B) +- Borders: Very subtle gray, barely visible + +LAYOUT (same as dark): +- Collapsed sidebar on left +- Chat messages in center +- Glowing input at bottom (cyan glow still works in light mode) + +MESSAGE STYLING: +- User: Subtle cyan wash background, deeper cyan left border +- Assistant: White glass card, violet left border +- Code blocks: Light gray (#F4F4F5) background + +KEY DIFFERENCE FROM DARK: +- Shadows can be used (subtle, soft) +- Glass effect uses slight darkness instead of lightness +- Accents are richer/more saturated +- Same spring animations, same glow effects on focus + +Show as desktop app (1440x900) with same conversation as dark version. +``` + +--- + +### Prompt 5: Session List / Sidebar Expanded + +``` +Create an expanded sidebar view for a developer chat application. + +DESIGN: +- Dark mode, glassmorphism sidebar panel +- Sidebar width: ~280px +- Background: Slightly elevated from main (#0F0F14) + +CONTENT: +- Top: Logo "◈ opencode" with subtle cyan glow +- Below logo: "+ New Session" button (primary, cyan glow) +- Section: "RECENT" label (small, muted, uppercase) +- Session list items showing: + • Session title (truncated) + • Brief preview of last message + • Timestamp (relative: "2h ago", "Yesterday") + • Subtle icon showing model used + +INTERACTIONS: +- Current/selected session: Cyan highlight bar on left, brighter text +- Hover: Glass background appears, subtle glow +- List items have smooth slide-in animation on load + +VISUAL DETAILS: +- Divider lines are very subtle (border-subtle) +- Sessions grouped by time (Today, Yesterday, This Week) +- Scroll area with fading edge at top/bottom +- Search input at top with glass styling + +Colors: +- Selected highlight: #00D4FF +- Muted text: #71717A +- Timestamps: #A1A1AA +``` + +--- + +### Prompt 6: Tool/Permission Dialog + +``` +Create a permission request dialog for an AI coding assistant. + +CONTEXT: +The AI wants to edit a file and needs user approval. + +DESIGN: +- Dark glassmorphism modal +- Slightly different accent - using amber/warning color to indicate caution + +CONTENT: +- Header icon: Edit/pencil icon with amber glow +- Title: "Edit File Request" +- Description: "opencode wants to modify:" +- File path displayed: `src/components/Button.tsx` +- Preview section showing diff: + • Green highlighted lines for additions + • Red highlighted lines for deletions + • Context lines in muted color + +ACTIONS: +- "Allow" button - Primary with amber accent (#FFBB33) +- "Allow All" button - Secondary +- "Deny" button - Ghost/danger hint + +VISUAL DETAILS: +- Diff preview has code syntax highlighting +- Line numbers visible +- Modal has amber-tinted border (warning state) +- Keep the same glass effect and animation patterns + +Show the dialog centered over a blurred chat interface background. +``` + +--- + +### Prompt 7: Loading/Processing State + +``` +Create a message streaming/loading state for an AI response. + +DESIGN: +- Dark mode chat interface +- Assistant message in progress of being generated + +VISUAL: +- Glass card for assistant message (violet left border) +- Inside the card: + • "◈" logo pulsing with violet glow (breathing animation) + • First line of text appearing with typewriter effect + • Remaining area has subtle shimmer/skeleton loader + • Three dots "◌ ◌ ◌" with staggered pulse animation + +PROGRESS INDICATOR: +- Horizontal progress bar at bottom of card +- Uses the "filling with light" metaphor +- Empty portion: dim gray (░░░) +- Filled portion: cyan gradient (▓▓▓) +- Shows: "Analyzing codebase... 127 files" + +EFFECTS: +- Text fades in word by word +- Shimmer effect uses subtle gradient animation +- Overall feel: the AI is "thinking" and response is "materializing from light" + +Keep consistent with the Aurora design system - ethereal, luminous, not mechanical. +``` + +--- + +### Prompt 8: Settings Panel + +``` +Create a settings/preferences panel for a developer AI tool. + +DESIGN: +- Full-width panel that slides in from right (or modal) +- Dark glassmorphism style +- Organized into clear sections + +SECTIONS: +1. APPEARANCE + - Theme selector (dropdown or visual cards) + - Font size slider + +2. MODEL DEFAULTS + - Default model dropdown + - Temperature slider with visual indicator + +3. KEYBINDINGS + - List of keyboard shortcuts in two columns + - Each shows action + keybind + +4. INTEGRATIONS + - Toggle switches for: Git, LSP, MCP servers + - Each with subtle description + +VISUAL STYLE: +- Section headers: Small caps, cyan accent, muted +- Form controls: Glass styling, cyan focus states +- Toggle switches: Off = muted, On = cyan glow +- Sliders: Thin track, glowing thumb + +LAYOUT: +- Clean vertical stack with generous spacing +- Dividers between sections (very subtle) +- "Save" and "Cancel" buttons at bottom + +Background: #0F0F14 (elevated from main void) +``` + +--- + +### Prompt 9: Error State + +``` +Create an error notification/toast for an AI coding assistant. + +SCENARIO: API rate limit exceeded + +DESIGN: +- Toast notification appearing at top-right +- Dark glass with RED accent (error state) + +CONTENT: +- Left: Warning icon with red glow (⚠ or !) +- Title: "Rate Limit Exceeded" +- Description: "Please wait 30 seconds before trying again" +- Dismiss X button on right + +VISUAL: +- Glass background with subtle red tint +- Red left border (2-3px) +- Soft red outer glow (not harsh) +- Red accent: #F87171 + +ANIMATION: +- Slides in from right with spring physics +- Slight bounce at end +- Auto-dismiss with progress bar along bottom +- Fades out when dismissed + +ERROR COLOR MAPPING: +- Error: #F87171 (rose-red) +- Warning: #FFBB33 (amber) +- Success: #4ADE80 (green) +- Info: #00D4FF (cyan) + +Show toast over blurred chat interface. +``` + +--- + +### Prompt 10: Code Diff View + +``` +Create a code diff viewer for an AI coding assistant's file changes. + +DESIGN: +- Dark mode with syntax highlighting +- Side-by-side or unified diff view + +VISUAL STRUCTURE: +- Header: File path, "View Full File" link +- Line numbers on left (muted color) +- Two-tone background for changes: + • Added lines: Very subtle green tint background (#0D2818) + • Removed lines: Very subtle red tint background (#2D1216) + • Context lines: Default void background + +SYNTAX HIGHLIGHTING (Aurora theme): +- Keywords: Violet (#A78BFA) +- Functions: Cyan (#00D4FF) +- Strings: Green (#4ADE80) +- Numbers: Rose (#FF6B9D) +- Comments: Muted gray (#71717A) +- Variables: White (#F5F5F7) + +ADDITIONS: +- + symbol in green +- Line highlighted with green left border +- Changed text within line has brighter green background + +DELETIONS: +- - symbol in red +- Line highlighted with red left border +- Changed text within line has brighter red background + +GLASS CARD: +- Wrap entire diff in glass panel +- Rounded corners +- Subtle border + +Show a realistic diff of a TypeScript React component being refactored. +``` + +--- + +## Part 10: Final Summary & Implementation Guide + +### Design DNA at a Glance + +``` +┌─────────────────────────────────────────────────────────────┐ +│ │ +│ AURORA DESIGN SYSTEM │ +│ "Code illuminated from within" │ +│ │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ VISUAL MOTION │ +│ ├─ Dark-first luxury ├─ Spring physics │ +│ ├─ Digital luminescence ├─ Confident/tactile │ +│ ├─ Glassmorphism ├─ 200-350ms timing │ +│ └─ Glowing accents └─ Glow as feedback │ +│ │ +│ COLOR TYPOGRAPHY │ +│ ├─ Cyan primary ├─ JetBrains Mono (code) │ +│ ├─ Violet secondary ├─ Geist/Inter (UI) │ +│ ├─ Rose tertiary └─ Major Third scale │ +│ └─ Void backgrounds │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Quick Reference Card + +| Aspect | Specification | +|--------|---------------| +| **Primary Accent** | `#00D4FF` (Electric Cyan) | +| **Secondary** | `#A78BFA` (Soft Violet) | +| **Tertiary** | `#FF6B9D` (Rose) | +| **Dark Background** | `#0A0A0F` (Void) | +| **Light Background** | `#FAFAFA` (Pearl) | +| **Border Style** | Subtle glow, not hard edges | +| **Glass Effect** | `rgba(255,255,255,0.04)` + `blur(12px)` | +| **Border Radius** | `8px` buttons, `12px` cards, `24px` modals | +| **Animation Duration** | 150-350ms | +| **Easing** | Spring-based (`cubic-bezier(0.22, 1, 0.36, 1)`) | +| **Code Font** | JetBrains Mono | +| **UI Font** | Geist / Inter | + +### Component Mapping: Web → TUI + +| Web Component | TUI Equivalent | +|---------------|----------------| +| Cyan glow border | Double-line border `═══` | +| Glassmorphism card | Rounded box `╭─╮ │ ╰─╯` | +| Hover lift effect | Highlight color change | +| Loading shimmer | Block gradient `░▒▓█` | +| Pulsing glow | Braille spinner `⠋⠙⠹...` or `◌◍◎●` | +| User cyan tint | Cyan foreground + `┃` pipe | +| Assistant violet border | Violet `│` left margin | + +### Implementation Phases (Recommended) + +#### Phase 1: Theme Foundation +- [ ] Create `aurora-dark.json` and `aurora-light.json` theme files +- [ ] Add to TUI theme selector +- [ ] Update CSS custom properties for web console + +#### Phase 2: Core Components +- [ ] Buttons (primary, secondary, ghost, danger) +- [ ] Input fields with focus glow +- [ ] Cards with glass effect +- [ ] Modals with backdrop blur + +#### Phase 3: Chat Interface +- [ ] Message bubbles (user/assistant) +- [ ] Prompt input (hero component) +- [ ] Loading/streaming states +- [ ] Code blocks with Aurora syntax theme + +#### Phase 4: Motion Polish +- [ ] Spring animations library integration +- [ ] Enter/exit transitions +- [ ] Micro-interactions +- [ ] Loading states + +### Existing Component Touchpoints + +Based on analysis of the codebase, these are the key files to modify: + +**TUI Theme System:** +- `packages/opencode/src/cli/cmd/tui/context/theme.tsx` — Theme provider and color types +- `packages/opencode/src/cli/cmd/tui/context/theme/` — Theme JSON files (add aurora-dark.json, aurora-light.json) + +**TUI Components:** +- `packages/opencode/src/cli/cmd/tui/routes/session/index.tsx` — Main session view +- `packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx` — Prompt input component +- `packages/opencode/src/cli/cmd/tui/component/dialog-*.tsx` — All dialog components + +**Web Console:** +- `packages/console/app/src/style/token/color.css` — CSS color tokens +- `packages/console/app/src/routes/index.css` — Landing page styles +- `packages/console/app/src/component/` — Shared components + +### Success Criteria + +The Aurora redesign is successful when: + +1. **Visual Coherence**: TUI and Web feel like the same product family +2. **Motion Quality**: Interactions feel tactile and confident, not floaty or delayed +3. **Performance**: Animations run at 60fps, no jank +4. **Accessibility**: 4.5:1 contrast ratios maintained, focus states visible +5. **Brand Recognition**: Users recognize "the opencode look" instantly + +--- + +## Appendix: Theme JSON Template + +```json +{ + "$schema": "https://opencode.ai/theme.json", + "defs": { + "voidDeepest": "#050508", + "voidDeep": "#0A0A0F", + "voidBase": "#0F0F14", + "voidElevated": "#14141A", + "voidHover": "#1A1A22", + "auroraCyan": "#00D4FF", + "auroraViolet": "#A78BFA", + "auroraRose": "#FF6B9D", + "auroraAmber": "#FFBB33", + "auroraGreen": "#4ADE80", + "auroraRed": "#F87171", + "textPrimary": "#F5F5F7", + "textSecondary": "#A1A1AA", + "textTertiary": "#71717A" + }, + "theme": { + "primary": "auroraCyan", + "secondary": "auroraViolet", + "accent": "auroraRose", + "error": "auroraRed", + "warning": "auroraAmber", + "success": "auroraGreen", + "info": "auroraCyan", + "text": "textPrimary", + "textMuted": "textSecondary", + "background": "voidDeep", + "backgroundPanel": "voidBase", + "backgroundElement": "voidElevated", + "border": "#1E1E26", + "borderActive": "auroraCyan", + "borderSubtle": "#14141A", + "syntaxKeyword": "auroraViolet", + "syntaxFunction": "auroraCyan", + "syntaxString": "auroraGreen", + "syntaxNumber": "auroraRose", + "syntaxComment": "textTertiary", + "syntaxVariable": "textPrimary", + "syntaxType": "auroraAmber", + "syntaxOperator": "textSecondary", + "syntaxPunctuation": "textTertiary", + "diffAdded": "auroraGreen", + "diffRemoved": "auroraRed", + "diffAddedBg": "#0D2818", + "diffRemovedBg": "#2D1216", + "diffContext": "textTertiary", + "diffContextBg": "voidBase" + } +} +``` + +--- + +**Document created:** 2025-02-26 +**Design direction:** Aurora — Digital Luminescence +**Status:** Ready for implementation +**Last reviewed:** 2025-02-26 (UI/UX Pro Max review incorporated) + +--- + +## Part 11: Accessibility & Review Amendments + +*This section addresses feedback from the UI/UX Pro Max review and adds critical accessibility requirements.* + +### 11.1 Motion Sickness Prevention (CRITICAL) + +**Issue:** The original design suggested animated aurora backgrounds and continuous pulse effects which can trigger motion sensitivity. + +**Resolution:** + +```css +/* ═══════════════════════════════════════════════════════════ + REDUCED MOTION SUPPORT — MANDATORY + ═══════════════════════════════════════════════════════════ */ + +@media (prefers-reduced-motion: reduce) { + *, + *::before, + *::after { + animation-duration: 0.01ms !important; + animation-iteration-count: 1 !important; + transition-duration: 0.01ms !important; + scroll-behavior: auto !important; + } + + /* Disable specific Aurora effects */ + .aurora-background { + background: var(--void-deep) !important; + animation: none !important; + } + + .glow-pulse { + box-shadow: none !important; + } +} +``` + +**Guidelines:** +- ❌ **NEVER** use infinite animations on backgrounds or decorative elements +- ✅ Continuous animation ONLY permitted during active loading states +- ✅ Aurora drift effect should be opt-in, disabled by default +- ✅ All spring animations must have `prefers-reduced-motion` fallback + +--- + +### 11.2 Line Length Constraints (HIGH) + +**Issue:** Chat interfaces and documentation need line-length limits for readability. + +**Resolution:** + +```css +/* Add to spacing system */ +:root { + --max-prose-width: 70ch; /* 65-75 characters optimal */ +} + +/* Apply to text containers */ +.chat-message, +.documentation-content, +.modal-body { + max-width: var(--max-prose-width); +} + +/* Ensure full-width code blocks still work */ +.code-block { + max-width: 100%; + overflow-x: auto; +} +``` + +**Application:** +| Component | Max Width | +|-----------|-----------| +| Chat message bubbles | `70ch` | +| Modal body text | `70ch` | +| Documentation paragraphs | `70ch` | +| Code blocks | `100%` (scrollable) | +| Headers | No limit | + +--- + +### 11.3 Light Mode Glass Contrast (CRITICAL) + +**Issue:** Light mode glass effects were too subtle to establish visual hierarchy. + +**Resolution — Updated Light Theme:** + +```css +:root[data-theme="aurora-light"] { + /* ─── ADJUSTED GLASS OPACITIES ─── */ + --glass-subtle: rgba(0, 0, 0, 0.03); /* was 0.02 */ + --glass-light: rgba(0, 0, 0, 0.06); /* was 0.04 */ + --glass-medium: rgba(0, 0, 0, 0.09); /* was 0.06 */ + --glass-strong: rgba(0, 0, 0, 0.12); /* was 0.08 */ + + /* ─── STRONGER BORDERS ─── */ + --border-subtle: rgba(0, 0, 0, 0.08); /* was 0.06 */ + --border-default: rgba(0, 0, 0, 0.12); /* was 0.10 */ + --border-strong: rgba(0, 0, 0, 0.18); /* was 0.15 */ + + /* ─── SUBTLE SHADOWS (light mode only) ─── */ + --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05); + --shadow-md: 0 2px 4px rgba(0, 0, 0, 0.08); + --shadow-lg: 0 4px 8px rgba(0, 0, 0, 0.10); +} + +/* Apply shadows to cards in light mode only */ +[data-theme="aurora-light"] .glass-card { + box-shadow: var(--shadow-sm); +} +``` + +**Contrast Verification:** + +| Text | Background | Ratio | Status | +|------|------------|-------|--------| +| `#18181B` | `#FAFAFA` | 16.2:1 | ✅ Pass | +| `#52525B` | `#FAFAFA` | 7.4:1 | ✅ Pass | +| `#A1A1AA` | `#FAFAFA` | 3.0:1 | ⚠️ Large text only | +| `#F5F5F7` | `#0A0A0F` | 19.6:1 | ✅ Pass | +| `#A1A1AA` | `#0A0A0F` | 8.5:1 | ✅ Pass | + +--- + +### 11.4 Interactive Element Requirements (MEDIUM) + +**Issue:** Interactive cues need explicit mandates. + +**Resolution — Mandatory Interaction Patterns:** + +```css +/* All clickable elements */ +button, +[role="button"], +.clickable, +.interactive-card, +a { + cursor: pointer; +} + +/* Focus-visible states (keyboard navigation) */ +:focus-visible { + outline: 2px solid var(--aurora-cyan); + outline-offset: 2px; +} + +/* Disable outline for mouse users */ +:focus:not(:focus-visible) { + outline: none; +} +``` + +**Icon Standards:** +- ✅ **Required:** Lucide Icons (React: `lucide-react`, Web: `lucide`) +- ✅ **Acceptable:** Heroicons, Phosphor Icons +- ❌ **Forbidden:** Emoji as UI icons (OS rendering inconsistency) +- ❌ **Forbidden:** Font Awesome (too generic, doesn't fit Aurora aesthetic) + +--- + +### 11.5 WCAG Compliance Checklist + +Before implementation, verify: + +#### Color & Contrast +- [ ] All body text has 4.5:1 minimum contrast ratio +- [ ] All large text (18px+) has 3:1 minimum contrast ratio +- [ ] Focus indicators are clearly visible (2px cyan outline) +- [ ] Error states use red AND icon/text (not color alone) + +#### Motion & Animation +- [ ] `prefers-reduced-motion` media query implemented +- [ ] No infinite animations on decorative elements +- [ ] Loading animations can be paused or are under 5s +- [ ] No flashing content (3 flashes per second limit) + +#### Interaction +- [ ] All interactive elements have `cursor: pointer` +- [ ] Touch targets are minimum 44x44px +- [ ] Keyboard navigation follows visual order +- [ ] Focus states are distinct from hover states + +#### Typography +- [ ] Minimum 16px body text (mobile) +- [ ] Line height minimum 1.5 for body text +- [ ] Line length limited to 70ch for prose +- [ ] Text is resizable to 200% without loss of functionality + +--- + +### Review Response Summary + +| Feedback Item | Severity | Action Taken | +|---------------|----------|--------------| +| Motion sickness / `prefers-reduced-motion` | CRITICAL | Added §11.1 with full CSS implementation | +| Line length 65-75ch | HIGH | Added §11.2 with `--max-prose-width: 70ch` | +| Light mode glass contrast | CRITICAL | Added §11.3 with adjusted opacity values | +| `cursor-pointer` mandate | MEDIUM | Added §11.4 with interactive patterns | +| SVG icons only | MEDIUM | Added §11.4 with Lucide Icons mandate | +| WCAG compliance | — | Added §11.5 checklist | + +--- + +*Review incorporated from: UI/UX Pro Max analysis (2025-02-26)* diff --git a/docs/plans/2025-02-26-roo-code-orphaned-tool-use-debug.md b/docs/plans/2025-02-26-roo-code-orphaned-tool-use-debug.md new file mode 100644 index 000000000000..4c70e250500a --- /dev/null +++ b/docs/plans/2025-02-26-roo-code-orphaned-tool-use-debug.md @@ -0,0 +1,442 @@ +# Systematic Root-Cause Analysis: Roo Code Orphaned `tool_use` Error + +## Phase 1: Root Cause Investigation + +### 1.1 The Error (Read Carefully) + +``` +messages.12: `tool_use` ids were found without `tool_result` blocks immediately after: + tooluse_gcKGmk7V7opjkl8G2V6v0N, tooluse_ldg9S86J2GK8UzcQqvOQXR. +Each `tool_use` block must have a corresponding `tool_result` block in the next message. +``` + +**What this tells us precisely:** + +| Fact | Implication | +| ---------------------------------------------------- | --------------------------------------------------------------------------------- | +| `messages.12` | The **13th message** (0-indexed) in the conversation array is the problem | +| Two IDs: `tooluse_gcKG…`, `tooluse_ldg9…` | The assistant called **exactly 2 tools** in that turn | +| "without `tool_result` blocks **immediately after**" | Message 13 (the next user message) does NOT contain matching `tool_result` blocks | +| Cost `$0.0000` | API rejects the request **before** processing — this is a pre-validation error | +| First attempt at 7%, $1.61 spent | **~6 successful API round-trips** happened before this (at ~$0.25/turn) | +| IDs start with `tooluse_` | This is Anthropic's native tool calling format (not OpenAI-style `call_*`) | + +### 1.2 Reproducing the Scenario + +**User's task:** "Create a latest DMG for me, before that redesign the SVG for the app icon. Use Skill UI UX Pro Max." + +This task would trigger the following tool sequence: + +1. **read_file** — Read existing SVG icon file(s) +2. **list_files** — Scan project structure for icon locations +3. **write_to_file** — Write new SVG design +4. **execute_command** — Build/package DMG + +The **"Use Skill UI UX Pro Max"** instruction is key — it tells the assistant to use a custom mode/skill, which could trigger a **`switch_mode`** or **`skill`** tool call alongside regular tools. + +At the point of failure (message 12, ~7% progress, $1.61), the assistant would have been in the early **file reading/scanning phase**, likely calling 2 tools in parallel. + +### 1.3 Backward Trace: From Error to Root Cause + +``` +Error received by: Anthropic API server + ↑ Sent by: this.api.createMessage() at Task.ts:4271 + ↑ Built from: cleanConversationHistory at Task.ts:4193 + ↑ Derived from: effectiveHistory → mergedForApi → messagesWithoutImages + ↑ Sourced from: this.apiConversationHistory (the persistent storage) + ↑ CORRUPTED HERE: message 12 has tool_use but message 13 lacks tool_result +``` + +**The question is: HOW did message 13 get saved without the tool_results?** + +There are exactly 3 code paths that save user messages with `tool_result` blocks: + +#### Path A: Normal tool execution flow (`recursivelyMakeClineRequests`) + +``` +Task.ts:3542 → Save assistant message (with tool_use blocks) +Task.ts:3561 → presentAssistantMessage(this) → executes tools → pushToolResult() +Task.ts:3581 → pWaitFor(() => this.userMessageContentReady) +Task.ts:2651 → addToApiConversationHistory({ role: "user", content: finalUserContent }) +``` + +In this path, `finalUserContent` at line 2641 includes `this.userMessageContent` which is populated by `pushToolResult` during tool execution. The `pWaitFor` at 3581 blocks until all tools complete. + +**Could this path lose tool_results?** → Only if `presentAssistantMessage` fails to call `pushToolResult`. + +#### Path B: `flushPendingToolResultsToHistory()` (delegation via `new_task`) + +``` +Task.ts:1048 → Check userMessageContent.length > 0 +Task.ts:1067 → Wait for assistantMessageSavedToHistory (30s timeout) +Task.ts:1085 → Build user message from this.userMessageContent +Task.ts:1096 → Push to apiConversationHistory +``` + +**Could this path lose tool_results?** → Yes, if abort/timeout triggers. + +#### Path C: Task resume (`resumeTaskFromHistory`) + +``` +Task.ts:2109-2117 → Generate placeholder tool_results for all tool_use blocks +Task.ts:2142-2159 → Find missing tool_results and fill them in +Task.ts:2217 → overwriteApiConversationHistory(modifiedApiConversationHistory) +``` + +**Could this path lose tool_results?** → No, it explicitly generates them. + +--- + +### 1.4 The Root Cause: `presentAssistantMessage` + `AskIgnoredError` = Silent Failure + +Here is the critical code path that causes the corruption: + +#### Step 1: Stream completes, assistant has 2 tool_use blocks + +At [`Task.ts:3542`](references/Roo-Code/src/core/task/Task.ts:3542): + +```ts +await this.addToApiConversationHistory( + { role: "assistant", content: assistantContent }, // Contains 2 tool_use blocks + reasoningMessage || undefined, +) +this.assistantMessageSavedToHistory = true // ← message 12 is now persisted +``` + +#### Step 2: Tools begin executing via `presentAssistantMessage` + +At [`presentAssistantMessage.ts:61`](references/Roo-Code/src/core/assistant-message/presentAssistantMessage.ts:61), the function is called to process each tool_use block. Each tool handler calls `askApproval()` which internally calls `cline.ask()`. + +#### Step 3: `ask()` throws `AskIgnoredError` — the silent killer + +At [`Task.ts:1304`](references/Roo-Code/src/core/task/Task.ts:1304): + +```ts +throw new AskIgnoredError("updating existing partial") +``` + +And at [`Task.ts:1312`](references/Roo-Code/src/core/task/Task.ts:1312): + +```ts +throw new AskIgnoredError("new partial") +``` + +This error is thrown when: + +- A tool starts streaming its approval request as a partial message +- Another partial update comes in before the user responds +- The earlier ask is **silently abandoned** + +#### Step 4: `handleError` catches `AskIgnoredError` but DOES NOTHING + +At [`presentAssistantMessage.ts:540-544`](references/Roo-Code/src/core/assistant-message/presentAssistantMessage.ts:540): + +```ts +const handleError = async (action: string, error: Error) => { + // Silently ignore AskIgnoredError - this is an internal control flow + // signal, not an actual error. + if (error instanceof AskIgnoredError) { + return // ← NO tool_result pushed! Silent return! + } + // ... + pushToolResult(formatResponse.toolError(errorString)) +} +``` + +**THIS IS THE BUG.** + +When `AskIgnoredError` is caught: + +- `pushToolResult()` is **never called** +- `hasToolResult` remains `false` +- The `tool_use` block has **no corresponding `tool_result`** +- But the tool handler returns normally (no re-throw) + +#### Step 5: The loop continues, user message gets saved incomplete + +After `presentAssistantMessage` completes all blocks: + +- `userMessageContentReady` is set to `true` +- The `pWaitFor` at [`Task.ts:3581`](references/Roo-Code/src/core/task/Task.ts:3581) resolves +- The user message is saved at [`Task.ts:2651`](references/Roo-Code/src/core/task/Task.ts:2651) with **1 out of 2 tool_results** (or 0 out of 2) +- The `validateAndFixToolResultIds` at [`Task.ts:1016`](references/Roo-Code/src/core/task/Task.ts:1016) SHOULD catch this... + +#### Step 6: But wait — does `validateAndFixToolResultIds` catch it? + +At [`validateToolResultIds.ts:118-121`](references/Roo-Code/src/core/task/validateToolResultIds.ts:118): + +```ts +const missingToolUseIds = toolUseBlocks + .filter((toolUse) => !existingToolResultIds.has(toolUse.id)) + .map((toolUse) => toolUse.id) +``` + +Yes, it detects the missing IDs. And at line 220-228: + +```ts +const missingToolResults = stillMissingToolUseIds.map((toolUse) => ({ + type: "tool_result" as const, + tool_use_id: toolUse.id, + content: "Tool execution was interrupted before completion.", +})) +const finalContent = missingToolResults.length > 0 ? [...missingToolResults, ...correctedContent] : correctedContent +``` + +**It injects placeholder tool_results!** So... why does the error still happen? + +#### Step 7: THE REAL BUG — `askApproval` catches `AskIgnoredError` but the tool handler itself ALSO throws it + +Look at the tool handler flow more carefully. The `askApproval` function at [`presentAssistantMessage.ts:494-529`](references/Roo-Code/src/core/assistant-message/presentAssistantMessage.ts:494) calls `cline.ask()`. If `ask()` throws `AskIgnoredError`, it **propagates up through `askApproval`**: + +```ts +const askApproval = async (...) => { + const { response, text, images } = await cline.ask(type, ...) // ← throws AskIgnoredError! + // code below never executes +} +``` + +The `AskIgnoredError` escapes `askApproval`, enters the tool handler (e.g., `readFileTool.handle()`), which catches it through `handleError`: + +```ts +// Inside a tool handler like readFileTool: +try { + const approved = await askApproval("tool", ...) // ← AskIgnoredError thrown here + // never reaches pushToolResult() +} catch (error) { + await handleError("reading file", error) // ← silently returns for AskIgnoredError +} +``` + +After `handleError` silently returns: + +- **No `tool_result` was pushed** +- The tool handler returns normally +- `presentAssistantMessage` moves to the next block + +**But this should be caught by `validateAndFixToolResultIds`...** unless there's a timing issue. + +#### Step 8: THE ACTUAL ROOT CAUSE — The AskIgnoredError is thrown DURING tool approval streaming, which happens DURING the API response stream + +The key insight is **when** this happens: + +1. The API response is still streaming (`didCompleteReadingStream = false`) +2. `presentAssistantMessage` is called to present tool #1 (partial) +3. Tool #1 calls `askApproval(type, partialMessage, progressStatus)` with `partial=true` +4. `ask()` throws `AskIgnoredError("new partial")` for the first partial +5. `handleError` silently ignores it — **no tool_result pushed** +6. `presentAssistantMessage` unlocks at line 933 and returns +7. Stream continues, tool #1 becomes complete (non-partial) +8. `presentAssistantMessage` is called again +9. **But now `cline.currentStreamingContentIndex` has already been incremented at line 957** +10. The complete version of tool #1 is **SKIPPED** — it was "already presented" as partial +11. Tool #2 is presented and executed +12. Tool #2's `tool_result` IS pushed + +So the final user message has: `[tool_result for tool #2]` but NOT `[tool_result for tool #1]`. + +**WAIT** — let me re-read line 940 more carefully: + +```ts +if (!block.partial || cline.didRejectTool || cline.didAlreadyUseTool) { +``` + +This only advances the index when `!block.partial`. A partial block does NOT advance the index. So tool #1 partial → `AskIgnoredError` → returns WITHOUT advancing index → tool #1 complete → presented again → should work. + +Let me trace more carefully... + +#### Step 8 (Revised): The REAL root cause — `AskIgnoredError` thrown for a NON-PARTIAL tool + +The `AskIgnoredError` can be thrown even for non-partial asks. Look at [`Task.ts:1474-1476`](references/Roo-Code/src/core/task/Task.ts:1474): + +```ts +throw new AskIgnoredError("superseded") +``` + +This happens when `this.lastMessageTs !== askTs` — meaning **another ask was created while this one was pending**. This is the "superseded" case. + +**Scenario for 2 parallel tools:** + +1. Stream completes with 2 tool_use blocks: `[tool_A, tool_B]` +2. `presentAssistantMessage` processes tool_A (complete, non-partial) +3. tool_A calls `askApproval("tool", ...)` → calls `cline.ask("tool", ...)` +4. `ask()` creates a new ClineMessage with `askTs = Date.now()` +5. `ask()` reaches `pWaitFor` at line 1444, waiting for user response +6. **Auto-approval kicks in** at line 1368 → `this.approveAsk()` → sets `askResponse` +7. `pWaitFor` resolves → `ask()` returns → tool_A executes → `pushToolResult()` ✓ +8. `presentAssistantMessage` increments index to tool_B +9. tool_B calls `askApproval("tool", ...)` → calls `cline.ask("tool", ...)` +10. This works normally too. ✓ + +So parallel tools in sequence shouldn't cause the issue with auto-approval. BUT: + +#### Step 8 (Final): The TRUE root cause — Mid-stream crash between assistant save and tool execution + +Let me look at the exception handler at [`Task.ts:3722-3729`](references/Roo-Code/src/core/task/Task.ts:3722): + +```ts +} catch (error) { + // This should never happen since the only thing that can throw an + // error is the attemptApiRequest, which is wrapped in a try catch + // that sends an ask where if noButtonClicked, will clear current + // task and destroy this instance. + return true // Needs to be true so parent loop knows to end task. +} +``` + +And the `presentAssistantMessage` at line 62-64: + +```ts +if (cline.abort) { + throw new Error(`[Task#presentAssistantMessage] task ... aborted`) +} +``` + +**HERE IS THE ACTUAL ROOT CAUSE:** + +1. Assistant message with 2 `tool_use` blocks is saved to history (line 3542) ← **message 12** +2. `this.assistantMessageSavedToHistory = true` (line 3546) +3. `presentAssistantMessage(this)` is called (line 3561) to present partial blocks +4. During tool execution, **`cline.abort` gets set to `true`** (user cancels, or error, or timeout) +5. `presentAssistantMessage` throws at line 63: `throw new Error("...aborted")` +6. This throw propagates up through the tool execution +7. **`pushToolResult` was never called for either tool** +8. The error reaches the `catch` at Task.ts:3722 +9. It returns `true` — task ends +10. **BUT message 12 (assistant with 2 tool_use blocks) is ALREADY in the persistent history** +11. **No user message with tool_results was ever saved as message 13** + +When the user resumes the task: + +- `resumeTaskFromHistory` at Task.ts:2090+ checks the LAST message +- If the last message is the assistant with tool_use, it generates placeholders → **works** +- But if other messages were appended AFTER message 12 before the abort (e.g., error messages, api_req_started), the last message is NOT message 12 +- The resume logic only fixes the last assistant-user pair, not arbitrary positions + +**The corruption is permanent.** + +--- + +## Phase 2: Pattern Analysis + +### Working example + +When `abort` is NOT set during tool execution: + +1. All tools execute normally +2. All `pushToolResult()` calls complete +3. `userMessageContent` has all `tool_result` blocks +4. User message saved with all results → ✓ + +### Broken example (this bug) + +When `abort` IS set during tool execution (e.g., user clicks cancel, network timeout, extension deactivation): + +1. Some tools may have executed, others not +2. `presentAssistantMessage` throws on abort check +3. `userMessageContent` has partial or zero `tool_result` blocks +4. User message is NEVER saved (abort exits the loop) +5. But assistant message with `tool_use` blocks is ALREADY saved → ✗ + +### The key difference + +The **assistant message is saved BEFORE tool execution** (line 3542), but the **user message with tool_results is saved AFTER all tools complete** (line 2651). Any interruption between these two writes creates an orphaned `tool_use`. + +--- + +## Phase 3: Hypothesis + +**Hypothesis:** The root cause is that aborting/cancelling a task between the assistant message save (Task.ts:3542) and the user message save (Task.ts:2651) leaves the API conversation history in an invalid state where an assistant message has `tool_use` blocks without a following `tool_result` message. The `validateAndFixToolResultIds` safety net only runs at write-time for new messages, not as a pre-flight check before API calls, so the corruption is never repaired on retry. + +**Evidence supporting this:** + +1. Error occurs at a fixed position (`messages.12`) — consistent with a single write of assistant message followed by no user message write +2. Two tool_use IDs — consistent with a multi-tool call that was interrupted +3. Task was at 7% progress — early in execution, tools were still being called +4. The error is **permanent** — every retry hits the same corrupted history because no code path repairs it +5. Roo Code explicitly has comments about this risk in the codebase (lines 3401-3404, 1054-1057) + +--- + +## Phase 4: Proposed Fix + +### Fix 1: Pre-flight history validation in `attemptApiRequest` + +At [`Task.ts:4193`](references/Roo-Code/src/core/task/Task.ts:4193), after building `cleanConversationHistory`, add: + +```ts +// Repair orphaned tool_use blocks before sending to API +for (let i = 0; i < cleanConversationHistory.length - 1; i++) { + const msg = cleanConversationHistory[i] + const next = cleanConversationHistory[i + 1] + + if (msg.role !== "assistant") continue + + const content = Array.isArray(msg.content) ? msg.content : [] + const toolUseBlocks = content.filter((b) => b.type === "tool_use") + if (toolUseBlocks.length === 0) continue + + if (next.role !== "user") { + // Insert a synthetic user message with tool_results + const toolResults = toolUseBlocks.map((t) => ({ + type: "tool_result", + tool_use_id: t.id, + content: "Tool execution was interrupted.", + })) + cleanConversationHistory.splice(i + 1, 0, { role: "user", content: toolResults }) + continue + } + + // Check if next user message has all required tool_results + const nextContent = Array.isArray(next.content) ? next.content : [] + const resultIds = new Set(nextContent.filter((b) => b.type === "tool_result").map((b) => b.tool_use_id)) + const missing = toolUseBlocks.filter((t) => !resultIds.has(t.id)) + + if (missing.length > 0) { + const repairs = missing.map((t) => ({ + type: "tool_result", + tool_use_id: t.id, + content: "Tool execution was interrupted.", + })) + next.content = [...repairs, ...nextContent] + } +} +``` + +### Fix 2: Ensure abort saves partial tool_results + +At [`Task.ts:3722`](references/Roo-Code/src/core/task/Task.ts:3722), before returning: + +```ts +} catch (error) { + // Save any accumulated tool_results to prevent orphaned tool_use blocks + if (this.userMessageContent.length > 0) { + await this.flushPendingToolResultsToHistory() + } + return true +} +``` + +### Fix 3: Detect and break the infinite retry loop + +In `attemptApiRequest`'s error handler, detect this specific Anthropic error pattern and auto-repair: + +```ts +if (error.message?.includes("tool_use` ids were found without `tool_result`")) { + await this.repairOrphanedToolUseBlocks() + yield * this.attemptApiRequest(retryAttempt + 1) + return +} +``` + +--- + +## Summary + +| Layer | What happens | File:Line | +| ----------------- | -------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| **Trigger** | User cancels task, or network drops, or abort signal fires | `Task.ts:62-64` | +| **Corruption** | Assistant message (with `tool_use`) already saved, tool execution interrupted before `tool_result` saved | `Task.ts:3542` (save) → `Task.ts:3561` (execute) → abort before `Task.ts:2651` (save results) | +| **Missing guard** | `presentAssistantMessage` silently drops tool_results when `AskIgnoredError` or abort occurs | `presentAssistantMessage.ts:225`, `543` | +| **No recovery** | `validateAndFixToolResultIds` only runs at write-time, not pre-flight | `Task.ts:1016` | +| **Infinite loop** | `attemptApiRequest` retries with same corrupted history | `Task.ts:4337` | +| **No escape** | User must start a new session; no "repair history" option exists | — | diff --git a/packages/app/src/components/prompt-input.tsx b/packages/app/src/components/prompt-input.tsx deleted file mode 100644 index 1e1be28b5961..000000000000 --- a/packages/app/src/components/prompt-input.tsx +++ /dev/null @@ -1,1615 +0,0 @@ -import { useFilteredList } from "@opencode-ai/ui/hooks" -import { useSpring } from "@opencode-ai/ui/motion-spring" -import { createEffect, on, Component, Show, onCleanup, createMemo, createSignal, createResource } from "solid-js" -import { createStore } from "solid-js/store" -import { useLocal } from "@/context/local" -import { selectionFromLines, type SelectedLineRange, useFile } from "@/context/file" -import { - ContentPart, - DEFAULT_PROMPT, - isPromptEqual, - Prompt, - usePrompt, - ImageAttachmentPart, - AgentPart, - FileAttachmentPart, -} from "@/context/prompt" -import { useLayout } from "@/context/layout" -import { useSDK } from "@/context/sdk" -import { useSync } from "@/context/sync" -import { useComments } from "@/context/comments" -import { Button } from "@opencode-ai/ui/button" -import { DockShellForm, DockTray } from "@opencode-ai/ui/dock-surface" -import { Icon } from "@opencode-ai/ui/icon" -import { ProviderIcon } from "@opencode-ai/ui/provider-icon" -import { Tooltip, TooltipKeybind } from "@opencode-ai/ui/tooltip" -import { IconButton } from "@opencode-ai/ui/icon-button" -import { Select } from "@opencode-ai/ui/select" -import { useDialog } from "@opencode-ai/ui/context/dialog" -import { ModelSelectorPopover } from "@/components/dialog-select-model" -import { useProviders } from "@/hooks/use-providers" -import { useCommand } from "@/context/command" -import { Persist, persisted } from "@/utils/persist" -import { usePermission } from "@/context/permission" -import { useLanguage } from "@/context/language" -import { usePlatform } from "@/context/platform" -import { useSessionLayout } from "@/pages/session/session-layout" -import { createSessionTabs } from "@/pages/session/helpers" -import { createTextFragment, getCursorPosition, setCursorPosition, setRangeEdge } from "./prompt-input/editor-dom" -import { createPromptAttachments } from "./prompt-input/attachments" -import { ACCEPTED_FILE_TYPES } from "./prompt-input/files" -import { - canNavigateHistoryAtCursor, - navigatePromptHistory, - prependHistoryEntry, - type PromptHistoryComment, - type PromptHistoryEntry, - type PromptHistoryStoredEntry, - promptLength, -} from "./prompt-input/history" -import { createPromptSubmit, type FollowupDraft } from "./prompt-input/submit" -import { PromptPopover, type AtOption, type SlashCommand } from "./prompt-input/slash-popover" -import { PromptContextItems } from "./prompt-input/context-items" -import { PromptImageAttachments } from "./prompt-input/image-attachments" -import { PromptDragOverlay } from "./prompt-input/drag-overlay" -import { promptPlaceholder } from "./prompt-input/placeholder" -import { ImagePreview } from "@opencode-ai/ui/image-preview" -import { useQueries } from "@tanstack/solid-query" -import { useQueryOptions } from "@/context/global-sync" -import { pathKey } from "@/utils/path-key" - -interface PromptInputProps { - class?: string - ref?: (el: HTMLDivElement) => void - newSessionWorktree?: string - onNewSessionWorktreeReset?: () => void - edit?: { id: string; prompt: Prompt; context: FollowupDraft["context"] } - onEditLoaded?: () => void - shouldQueue?: () => boolean - onQueue?: (draft: FollowupDraft) => void - onAbort?: () => void - onSubmit?: () => void -} - -const EXAMPLES = [ - "prompt.example.1", - "prompt.example.2", - "prompt.example.3", - "prompt.example.4", - "prompt.example.5", - "prompt.example.6", - "prompt.example.7", - "prompt.example.8", - "prompt.example.9", - "prompt.example.10", - "prompt.example.11", - "prompt.example.12", - "prompt.example.13", - "prompt.example.14", - "prompt.example.15", - "prompt.example.16", - "prompt.example.17", - "prompt.example.18", - "prompt.example.19", - "prompt.example.20", - "prompt.example.21", - "prompt.example.22", - "prompt.example.23", - "prompt.example.24", - "prompt.example.25", -] as const - -const NON_EMPTY_TEXT = /[^\s\u200B]/ - -export const PromptInput: Component = (props) => { - const sdk = useSDK() - const queryOptions = useQueryOptions() - - const sync = useSync() - const local = useLocal() - const files = useFile() - const prompt = usePrompt() - const layout = useLayout() - const comments = useComments() - const dialog = useDialog() - const providers = useProviders() - const command = useCommand() - const permission = usePermission() - const language = useLanguage() - const platform = usePlatform() - const { params, tabs, view } = useSessionLayout() - let editorRef!: HTMLDivElement - let fileInputRef: HTMLInputElement | undefined - let scrollRef!: HTMLDivElement - let slashPopoverRef!: HTMLDivElement - - const mirror = { input: false } - const inset = 56 - const space = `${inset}px` - - const scrollCursorIntoView = () => { - const container = scrollRef - const selection = window.getSelection() - if (!container || !selection || selection.rangeCount === 0) return - - const range = selection.getRangeAt(0) - if (!editorRef.contains(range.startContainer)) return - - const cursor = getCursorPosition(editorRef) - const length = promptLength(prompt.current().filter((part) => part.type !== "image")) - if (cursor >= length) { - container.scrollTop = container.scrollHeight - return - } - - const rect = range.getClientRects().item(0) ?? range.getBoundingClientRect() - if (!rect.height) return - - const containerRect = container.getBoundingClientRect() - const top = rect.top - containerRect.top + container.scrollTop - const bottom = rect.bottom - containerRect.top + container.scrollTop - const padding = 12 - - if (top < container.scrollTop + padding) { - container.scrollTop = Math.max(0, top - padding) - return - } - - if (bottom > container.scrollTop + container.clientHeight - inset) { - container.scrollTop = bottom - container.clientHeight + inset - } - } - - const queueScroll = (count = 2) => { - requestAnimationFrame(() => { - scrollCursorIntoView() - if (count > 1) queueScroll(count - 1) - }) - } - - const activeFileTab = createSessionTabs({ - tabs, - pathFromTab: files.pathFromTab, - normalizeTab: (tab) => (tab.startsWith("file://") ? files.tab(tab) : tab), - }).activeFileTab - - const commentInReview = (path: string) => { - const sessionID = params.id - if (!sessionID) return false - - const diffs = sync.data.session_diff[sessionID] - if (!diffs) return false - return diffs.some((diff) => diff.file === path) - } - - const openComment = (item: { path: string; commentID?: string; commentOrigin?: "review" | "file" }) => { - if (!item.commentID) return - - const focus = { file: item.path, id: item.commentID } - comments.setActive(focus) - - const queueCommentFocus = (attempts = 6) => { - const schedule = (left: number) => { - requestAnimationFrame(() => { - comments.setFocus({ ...focus }) - if (left <= 0) return - requestAnimationFrame(() => { - const current = comments.focus() - if (!current) return - if (current.file !== focus.file || current.id !== focus.id) return - schedule(left - 1) - }) - }) - } - - schedule(attempts) - } - - const wantsReview = item.commentOrigin === "review" || (item.commentOrigin !== "file" && commentInReview(item.path)) - if (wantsReview) { - if (!view().reviewPanel.opened()) view().reviewPanel.open() - layout.fileTree.setTab("changes") - tabs().setActive("review") - queueCommentFocus() - return - } - - if (!view().reviewPanel.opened()) view().reviewPanel.open() - layout.fileTree.setTab("all") - const tab = files.tab(item.path) - void tabs().open(tab) - tabs().setActive(tab) - void Promise.resolve(files.load(item.path)).finally(() => queueCommentFocus()) - } - - const recent = createMemo(() => { - const all = tabs().all() - const active = activeFileTab() - const order = active ? [active, ...all.filter((x) => x !== active)] : all - const seen = new Set() - const paths: string[] = [] - - for (const tab of order) { - const path = files.pathFromTab(tab) - if (!path) continue - if (seen.has(path)) continue - seen.add(path) - paths.push(path) - } - - return paths - }) - const info = createMemo(() => (params.id ? sync.session.get(params.id) : undefined)) - const working = createMemo(() => sync.data.session_working(params.id ?? "")) - const imageAttachments = createMemo(() => - prompt.current().filter((part): part is ImageAttachmentPart => part.type === "image"), - ) - - const [store, setStore] = createStore<{ - popover: "at" | "slash" | null - historyIndex: number - savedPrompt: PromptHistoryEntry | null - placeholder: number - draggingType: "image" | "@mention" | null - mode: "normal" | "shell" - applyingHistory: boolean - }>({ - popover: null, - historyIndex: -1, - savedPrompt: null as PromptHistoryEntry | null, - placeholder: Math.floor(Math.random() * EXAMPLES.length), - draggingType: null, - mode: "normal", - applyingHistory: false, - }) - - const buttonsSpring = useSpring(() => (store.mode === "normal" ? 1 : 0), { visualDuration: 0.2, bounce: 0 }) - const motion = (value: number) => ({ - opacity: value, - transform: `scale(${0.98 + value * 0.02})`, - filter: `blur(${(1 - value) * 2}px)`, - "pointer-events": value > 0.5 ? ("auto" as const) : ("none" as const), - }) - const buttons = createMemo(() => motion(buttonsSpring())) - const shell = createMemo(() => motion(1 - buttonsSpring())) - const control = createMemo(() => ({ height: "28px", ...buttons() })) - - const commentCount = createMemo(() => { - if (store.mode === "shell") return 0 - return prompt.context.items().filter((item) => !!item.comment?.trim()).length - }) - const blank = createMemo(() => { - const text = prompt - .current() - .map((part) => ("content" in part ? part.content : "")) - .join("") - return text.trim().length === 0 && imageAttachments().length === 0 && commentCount() === 0 - }) - const stopping = createMemo(() => working() && blank()) - const tip = () => { - if (stopping()) { - return ( -
- {language.t("prompt.action.stop")} - {language.t("common.key.esc")} -
- ) - } - - return ( -
- {language.t("prompt.action.send")} - -
- ) - } - - const contextItems = createMemo(() => { - const items = prompt.context.items() - if (store.mode !== "shell") return items - return items.filter((item) => !item.comment?.trim()) - }) - - const hasUserPrompt = createMemo(() => { - const sessionID = params.id - if (!sessionID) return false - const messages = sync.data.message[sessionID] - if (!messages) return false - return messages.some((m) => m.role === "user") - }) - - const [history, setHistory] = persisted( - Persist.global("prompt-history", ["prompt-history.v1"]), - createStore<{ - entries: PromptHistoryStoredEntry[] - }>({ - entries: [], - }), - ) - const [shellHistory, setShellHistory] = persisted( - Persist.global("prompt-history-shell", ["prompt-history-shell.v1"]), - createStore<{ - entries: PromptHistoryStoredEntry[] - }>({ - entries: [], - }), - ) - - const suggest = createMemo(() => !hasUserPrompt()) - - const placeholder = createMemo(() => - promptPlaceholder({ - mode: store.mode, - commentCount: commentCount(), - example: suggest() ? (store.mode === "shell" ? "git status" : language.t(EXAMPLES[store.placeholder])) : "", - suggest: suggest(), - t: (key, params) => language.t(key as Parameters[0], params as never), - }), - ) - - const historyComments = () => { - const byID = new Map(comments.all().map((item) => [`${item.file}\n${item.id}`, item] as const)) - return prompt.context.items().flatMap((item) => { - if (item.type !== "file") return [] - const comment = item.comment?.trim() - if (!comment) return [] - - const selection = item.commentID ? byID.get(`${item.path}\n${item.commentID}`)?.selection : undefined - const nextSelection = - selection ?? - (item.selection - ? ({ - start: item.selection.startLine, - end: item.selection.endLine, - } satisfies SelectedLineRange) - : undefined) - if (!nextSelection) return [] - - return [ - { - id: item.commentID ?? item.key, - path: item.path, - selection: { ...nextSelection }, - comment, - time: item.commentID ? (byID.get(`${item.path}\n${item.commentID}`)?.time ?? Date.now()) : Date.now(), - origin: item.commentOrigin, - preview: item.preview, - } satisfies PromptHistoryComment, - ] - }) - } - - const applyHistoryComments = (items: PromptHistoryComment[]) => { - comments.replace( - items.map((item) => ({ - id: item.id, - file: item.path, - selection: { ...item.selection }, - comment: item.comment, - time: item.time, - })), - ) - prompt.context.replaceComments( - items.map((item) => ({ - type: "file" as const, - path: item.path, - selection: selectionFromLines(item.selection), - comment: item.comment, - commentID: item.id, - commentOrigin: item.origin, - preview: item.preview, - })), - ) - } - - const applyHistoryPrompt = (entry: PromptHistoryEntry, position: "start" | "end") => { - const p = entry.prompt - const length = position === "start" ? 0 : promptLength(p) - setStore("applyingHistory", true) - applyHistoryComments(entry.comments) - prompt.set(p, length) - requestAnimationFrame(() => { - editorRef.focus() - setCursorPosition(editorRef, length) - setStore("applyingHistory", false) - queueScroll() - }) - } - - const getCaretState = () => { - const selection = window.getSelection() - const textLength = promptLength(prompt.current()) - if (!selection || selection.rangeCount === 0) { - return { collapsed: false, cursorPosition: 0, textLength } - } - const anchorNode = selection.anchorNode - if (!anchorNode || !editorRef.contains(anchorNode)) { - return { collapsed: false, cursorPosition: 0, textLength } - } - return { - collapsed: selection.isCollapsed, - cursorPosition: getCursorPosition(editorRef), - textLength, - } - } - - const escBlur = () => platform.platform === "desktop" && platform.os === "macos" - - const pick = () => fileInputRef?.click() - - const setMode = (mode: "normal" | "shell") => { - setStore("mode", mode) - setStore("popover", null) - requestAnimationFrame(() => editorRef?.focus()) - } - - const shellModeKey = "mod+shift+x" - const normalModeKey = "mod+shift+e" - - command.register("prompt-input", () => [ - { - id: "file.attach", - title: language.t("prompt.action.attachFile"), - category: language.t("command.category.file"), - keybind: "mod+u", - disabled: store.mode !== "normal", - onSelect: pick, - }, - { - id: "prompt.mode.shell", - title: language.t("command.prompt.mode.shell"), - category: language.t("command.category.session"), - keybind: shellModeKey, - disabled: store.mode === "shell", - onSelect: () => setMode("shell"), - }, - { - id: "prompt.mode.normal", - title: language.t("command.prompt.mode.normal"), - category: language.t("command.category.session"), - keybind: normalModeKey, - disabled: store.mode === "normal", - onSelect: () => setMode("normal"), - }, - ]) - - const closePopover = () => setStore("popover", null) - - const resetHistoryNavigation = (force = false) => { - if (!force && (store.historyIndex < 0 || store.applyingHistory)) return - setStore("historyIndex", -1) - setStore("savedPrompt", null) - } - - const clearEditor = () => { - editorRef.innerHTML = "" - } - - const setEditorText = (text: string) => { - clearEditor() - editorRef.textContent = text - } - - const focusEditorEnd = () => { - requestAnimationFrame(() => { - editorRef.focus() - const range = document.createRange() - const selection = window.getSelection() - range.selectNodeContents(editorRef) - range.collapse(false) - selection?.removeAllRanges() - selection?.addRange(range) - }) - } - - const currentCursor = () => { - const selection = window.getSelection() - if (!selection || selection.rangeCount === 0 || !editorRef.contains(selection.anchorNode)) return null - return getCursorPosition(editorRef) - } - - const restoreFocus = () => { - requestAnimationFrame(() => { - const cursor = prompt.cursor() ?? promptLength(prompt.current()) - editorRef.focus() - setCursorPosition(editorRef, cursor) - queueScroll() - }) - } - - const renderEditorWithCursor = (parts: Prompt) => { - const cursor = currentCursor() - renderEditor(parts) - if (cursor !== null) setCursorPosition(editorRef, cursor) - } - - createEffect(() => { - params.id - if (params.id) return - if (!suggest()) return - const interval = setInterval(() => { - setStore("placeholder", (prev) => (prev + 1) % EXAMPLES.length) - }, 6500) - onCleanup(() => clearInterval(interval)) - }) - - const [composing, setComposing] = createSignal(false) - const isImeComposing = (event: KeyboardEvent) => event.isComposing || composing() || event.keyCode === 229 - - const handleBlur = () => { - closePopover() - setComposing(false) - } - - const handleCompositionStart = () => { - setComposing(true) - } - - const handleCompositionEnd = () => { - setComposing(false) - requestAnimationFrame(() => { - if (composing()) return - reconcile(prompt.current().filter((part) => part.type !== "image")) - }) - } - - const agentList = createMemo(() => - sync.data.agent - .filter((agent) => !agent.hidden && agent.mode !== "primary") - .map((agent): AtOption => ({ type: "agent", name: agent.name, display: agent.name })), - ) - const agentNames = createMemo(() => local.agent.list().map((agent) => agent.name)) - - const handleAtSelect = (option: AtOption | undefined) => { - if (!option) return - if (option.type === "agent") { - addPart({ type: "agent", name: option.name, content: "@" + option.name, start: 0, end: 0 }) - } else { - addPart({ type: "file", path: option.path, content: "@" + option.path, start: 0, end: 0 }) - } - } - - const atKey = (x: AtOption | undefined) => { - if (!x) return "" - return x.type === "agent" ? `agent:${x.name}` : `file:${x.path}` - } - - const { - flat: atFlat, - active: atActive, - setActive: setAtActive, - onInput: atOnInput, - onKeyDown: atOnKeyDown, - } = useFilteredList({ - items: async (query) => { - const agents = agentList() - const open = recent() - const seen = new Set(open) - const pinned: AtOption[] = open.map((path) => ({ type: "file", path, display: path, recent: true })) - if (!query.trim()) return [...agents, ...pinned] - const paths = await files.searchFilesAndDirectories(query) - const fileOptions: AtOption[] = paths - .filter((path) => !seen.has(path)) - .map((path) => ({ type: "file", path, display: path })) - return [...agents, ...pinned, ...fileOptions] - }, - key: atKey, - filterKeys: ["display"], - groupBy: (item) => { - if (item.type === "agent") return "agent" - if (item.recent) return "recent" - return "file" - }, - sortGroupsBy: (a, b) => { - const rank = (category: string) => { - if (category === "agent") return 0 - if (category === "recent") return 1 - return 2 - } - return rank(a.category) - rank(b.category) - }, - onSelect: handleAtSelect, - }) - - const slashCommands = createMemo(() => { - const builtin = command.options - .filter((opt) => !opt.disabled && !opt.id.startsWith("suggested.") && opt.slash) - .map((opt) => ({ - id: opt.id, - trigger: opt.slash!, - title: opt.title, - description: opt.description, - keybind: opt.keybind, - type: "builtin" as const, - })) - - const custom = sync.data.command.map((cmd) => ({ - id: `custom.${cmd.name}`, - trigger: cmd.name, - title: cmd.name, - description: cmd.description, - type: "custom" as const, - source: cmd.source, - })) - - return [...custom, ...builtin] - }) - - const handleSlashSelect = (cmd: SlashCommand | undefined) => { - if (!cmd) return - closePopover() - const images = imageAttachments() - - if (cmd.type === "custom") { - const text = `/${cmd.trigger} ` - setEditorText(text) - prompt.set([{ type: "text", content: text, start: 0, end: text.length }, ...images], text.length) - focusEditorEnd() - return - } - - clearEditor() - prompt.set([...DEFAULT_PROMPT, ...images], 0) - command.trigger(cmd.id, "slash") - } - - const { - flat: slashFlat, - active: slashActive, - setActive: setSlashActive, - onInput: slashOnInput, - onKeyDown: slashOnKeyDown, - } = useFilteredList({ - items: slashCommands, - key: (x) => x?.id, - filterKeys: ["trigger", "title"], - onSelect: handleSlashSelect, - }) - - const createPill = (part: FileAttachmentPart | AgentPart) => { - const pill = document.createElement("span") - pill.textContent = part.content - pill.setAttribute("data-type", part.type) - if (part.type === "file") pill.setAttribute("data-path", part.path) - if (part.type === "agent") pill.setAttribute("data-name", part.name) - pill.setAttribute("contenteditable", "false") - pill.style.userSelect = "text" - pill.style.cursor = "default" - return pill - } - - const isNormalizedEditor = () => - Array.from(editorRef.childNodes).every((node) => { - if (node.nodeType === Node.TEXT_NODE) { - const text = node.textContent ?? "" - if (!text.includes("\u200B")) return true - if (text !== "\u200B") return false - - const prev = node.previousSibling - const next = node.nextSibling - const prevIsBr = prev?.nodeType === Node.ELEMENT_NODE && (prev as HTMLElement).tagName === "BR" - return !!prevIsBr && !next - } - if (node.nodeType !== Node.ELEMENT_NODE) return false - const el = node as HTMLElement - if (el.dataset.type === "file") return true - if (el.dataset.type === "agent") return true - return el.tagName === "BR" - }) - - const renderEditor = (parts: Prompt) => { - clearEditor() - for (const part of parts) { - if (part.type === "text") { - editorRef.appendChild(createTextFragment(part.content)) - continue - } - if (part.type === "file" || part.type === "agent") { - editorRef.appendChild(createPill(part)) - } - } - - const last = editorRef.lastChild - if (last?.nodeType === Node.ELEMENT_NODE && (last as HTMLElement).tagName === "BR") { - editorRef.appendChild(document.createTextNode("\u200B")) - } - } - - // Auto-scroll active command into view when navigating with keyboard - createEffect(() => { - const activeId = slashActive() - if (!activeId || !slashPopoverRef) return - - requestAnimationFrame(() => { - const element = slashPopoverRef.querySelector(`[data-slash-id="${activeId}"]`) - element?.scrollIntoView({ block: "nearest", behavior: "smooth" }) - }) - }) - const selectPopoverActive = () => { - if (store.popover === "at") { - const items = atFlat() - if (items.length === 0) return - const active = atActive() - const item = items.find((entry) => atKey(entry) === active) ?? items[0] - handleAtSelect(item) - return - } - - if (store.popover === "slash") { - const items = slashFlat() - if (items.length === 0) return - const active = slashActive() - const item = items.find((entry) => entry.id === active) ?? items[0] - handleSlashSelect(item) - } - } - - const reconcile = (input: Prompt) => { - if (mirror.input) { - mirror.input = false - if (isNormalizedEditor()) return - - renderEditorWithCursor(input) - return - } - - const dom = parseFromDOM() - if (isNormalizedEditor() && isPromptEqual(input, dom)) return - - renderEditorWithCursor(input) - } - - createEffect( - on( - () => prompt.current(), - (parts) => { - if (composing()) return - reconcile(parts.filter((part) => part.type !== "image")) - }, - ), - ) - - const parseFromDOM = (): Prompt => { - const parts: Prompt = [] - let position = 0 - let buffer = "" - - const flushText = () => { - let content = buffer - if (content.includes("\r")) content = content.replace(/\r\n?/g, "\n") - if (content.includes("\u200B")) content = content.replace(/\u200B/g, "") - buffer = "" - if (!content) return - parts.push({ type: "text", content, start: position, end: position + content.length }) - position += content.length - } - - const pushFile = (file: HTMLElement) => { - const content = file.textContent ?? "" - parts.push({ - type: "file", - path: file.dataset.path!, - content, - start: position, - end: position + content.length, - }) - position += content.length - } - - const pushAgent = (agent: HTMLElement) => { - const content = agent.textContent ?? "" - parts.push({ - type: "agent", - name: agent.dataset.name!, - content, - start: position, - end: position + content.length, - }) - position += content.length - } - - const visit = (node: Node) => { - if (node.nodeType === Node.TEXT_NODE) { - buffer += node.textContent ?? "" - return - } - if (node.nodeType !== Node.ELEMENT_NODE) return - - const el = node as HTMLElement - if (el.dataset.type === "file") { - flushText() - pushFile(el) - return - } - if (el.dataset.type === "agent") { - flushText() - pushAgent(el) - return - } - if (el.tagName === "BR") { - buffer += "\n" - return - } - - for (const child of Array.from(el.childNodes)) { - visit(child) - } - } - - const children = Array.from(editorRef.childNodes) - children.forEach((child, index) => { - const isBlock = child.nodeType === Node.ELEMENT_NODE && ["DIV", "P"].includes((child as HTMLElement).tagName) - visit(child) - if (isBlock && index < children.length - 1) { - buffer += "\n" - } - }) - - flushText() - - if (parts.length === 0) parts.push(...DEFAULT_PROMPT) - return parts - } - - const handleInput = () => { - const rawParts = parseFromDOM() - const images = imageAttachments() - const cursorPosition = getCursorPosition(editorRef) - const rawText = - rawParts.length === 1 && rawParts[0]?.type === "text" - ? rawParts[0].content - : rawParts.map((p) => ("content" in p ? p.content : "")).join("") - const hasNonText = rawParts.some((part) => part.type !== "text") - const shouldReset = !NON_EMPTY_TEXT.test(rawText) && !hasNonText && images.length === 0 - - if (shouldReset) { - closePopover() - resetHistoryNavigation() - if (prompt.dirty()) { - mirror.input = true - prompt.set(DEFAULT_PROMPT, 0) - } - queueScroll() - return - } - - const shellMode = store.mode === "shell" - - if (!shellMode) { - const atMatch = rawText.substring(0, cursorPosition).match(/@(\S*)$/) - const slashMatch = rawText.match(/^\/(\S*)$/) - - if (atMatch) { - atOnInput(atMatch[1]) - setStore("popover", "at") - } else if (slashMatch) { - slashOnInput(slashMatch[1]) - setStore("popover", "slash") - } else { - closePopover() - } - } else { - closePopover() - } - - resetHistoryNavigation() - - mirror.input = true - prompt.set([...rawParts, ...images], cursorPosition) - queueScroll() - } - - const addPart = (part: ContentPart) => { - if (part.type === "image") return false - - const selection = window.getSelection() - if (!selection) return false - - if (selection.rangeCount === 0 || !editorRef.contains(selection.anchorNode)) { - editorRef.focus() - const cursor = prompt.cursor() ?? promptLength(prompt.current()) - setCursorPosition(editorRef, cursor) - } - - if (selection.rangeCount === 0) return false - const range = selection.getRangeAt(0) - if (!editorRef.contains(range.startContainer)) return false - - if (part.type === "file" || part.type === "agent") { - const cursorPosition = getCursorPosition(editorRef) - const rawText = prompt - .current() - .map((p) => ("content" in p ? p.content : "")) - .join("") - const textBeforeCursor = rawText.substring(0, cursorPosition) - const atMatch = textBeforeCursor.match(/@(\S*)$/) - const pill = createPill(part) - const gap = document.createTextNode(" ") - - if (atMatch) { - const start = atMatch.index ?? cursorPosition - atMatch[0].length - setRangeEdge(editorRef, range, "start", start) - setRangeEdge(editorRef, range, "end", cursorPosition) - } - - range.deleteContents() - range.insertNode(gap) - range.insertNode(pill) - range.setStartAfter(gap) - range.collapse(true) - selection.removeAllRanges() - selection.addRange(range) - } - - if (part.type === "text") { - const fragment = createTextFragment(part.content) - const last = fragment.lastChild - range.deleteContents() - range.insertNode(fragment) - if (last) { - if (last.nodeType === Node.TEXT_NODE) { - const text = last.textContent ?? "" - if (text === "\u200B") { - range.setStart(last, 0) - } - if (text !== "\u200B") { - range.setStart(last, text.length) - } - } - if (last.nodeType !== Node.TEXT_NODE) { - const isBreak = last.nodeType === Node.ELEMENT_NODE && (last as HTMLElement).tagName === "BR" - const next = last.nextSibling - const emptyText = next?.nodeType === Node.TEXT_NODE && (next.textContent ?? "") === "" - if (isBreak && (!next || emptyText)) { - const placeholder = next && emptyText ? next : document.createTextNode("\u200B") - if (!next) last.parentNode?.insertBefore(placeholder, null) - placeholder.textContent = "\u200B" - range.setStart(placeholder, 0) - } else { - range.setStartAfter(last) - } - } - } - range.collapse(true) - selection.removeAllRanges() - selection.addRange(range) - } - - handleInput() - closePopover() - return true - } - - const addToHistory = (prompt: Prompt, mode: "normal" | "shell") => { - const currentHistory = mode === "shell" ? shellHistory : history - const setCurrentHistory = mode === "shell" ? setShellHistory : setHistory - const next = prependHistoryEntry(currentHistory.entries, prompt, mode === "shell" ? [] : historyComments()) - if (next === currentHistory.entries) return - setCurrentHistory("entries", next) - } - - createEffect( - on( - () => props.edit?.id, - (id) => { - const edit = props.edit - if (!id || !edit) return - - for (const item of prompt.context.items()) { - prompt.context.remove(item.key) - } - - for (const item of edit.context) { - prompt.context.add({ - type: item.type, - path: item.path, - selection: item.selection, - comment: item.comment, - commentID: item.commentID, - commentOrigin: item.commentOrigin, - preview: item.preview, - }) - } - - setStore("mode", "normal") - setStore("popover", null) - setStore("historyIndex", -1) - setStore("savedPrompt", null) - prompt.set(edit.prompt, promptLength(edit.prompt)) - requestAnimationFrame(() => { - editorRef.focus() - setCursorPosition(editorRef, promptLength(edit.prompt)) - queueScroll() - }) - props.onEditLoaded?.() - }, - { defer: true }, - ), - ) - - const navigateHistory = (direction: "up" | "down") => { - const result = navigatePromptHistory({ - direction, - entries: store.mode === "shell" ? shellHistory.entries : history.entries, - historyIndex: store.historyIndex, - currentPrompt: prompt.current(), - currentComments: historyComments(), - savedPrompt: store.savedPrompt, - }) - if (!result.handled) return false - setStore("historyIndex", result.historyIndex) - setStore("savedPrompt", result.savedPrompt) - applyHistoryPrompt(result.entry, result.cursor) - return true - } - - const { addAttachments, removeAttachment, handlePaste } = createPromptAttachments({ - editor: () => editorRef, - isDialogActive: () => !!dialog.active, - setDraggingType: (type) => setStore("draggingType", type), - focusEditor: () => { - editorRef.focus() - setCursorPosition(editorRef, promptLength(prompt.current())) - }, - addPart, - readClipboardImage: platform.readClipboardImage, - }) - - const variants = createMemo(() => ["default", ...local.model.variant.list()]) - const accepting = createMemo(() => { - const id = params.id - if (!id) return permission.isAutoAcceptingDirectory(sdk.directory) - return permission.isAutoAccepting(id, sdk.directory) - }) - - const { abort, handleSubmit } = createPromptSubmit({ - info, - imageAttachments, - commentCount, - autoAccept: () => accepting(), - mode: () => store.mode, - working, - editor: () => editorRef, - queueScroll, - promptLength, - addToHistory, - resetHistoryNavigation: () => { - resetHistoryNavigation(true) - }, - setMode: (mode) => setStore("mode", mode), - setPopover: (popover) => setStore("popover", popover), - newSessionWorktree: () => props.newSessionWorktree, - onNewSessionWorktreeReset: props.onNewSessionWorktreeReset, - shouldQueue: props.shouldQueue, - onQueue: props.onQueue, - onAbort: props.onAbort, - onSubmit: props.onSubmit, - }) - - const handleKeyDown = (event: KeyboardEvent) => { - if ((event.metaKey || event.ctrlKey) && !event.altKey && !event.shiftKey && event.key.toLowerCase() === "u") { - event.preventDefault() - if (store.mode !== "normal") return - pick() - return - } - - if (event.key === "Backspace") { - const selection = window.getSelection() - if (selection && selection.isCollapsed) { - const node = selection.anchorNode - const offset = selection.anchorOffset - if (node && node.nodeType === Node.TEXT_NODE) { - const text = node.textContent ?? "" - if (/^\u200B+$/.test(text) && offset > 0) { - const range = document.createRange() - range.setStart(node, 0) - range.collapse(true) - selection.removeAllRanges() - selection.addRange(range) - } - } - } - } - - if (event.key === "!" && store.mode === "normal") { - const cursorPosition = getCursorPosition(editorRef) - if (cursorPosition === 0) { - setStore("mode", "shell") - setStore("popover", null) - event.preventDefault() - return - } - } - - if (event.key === "Escape") { - if (store.popover) { - closePopover() - event.preventDefault() - event.stopPropagation() - return - } - - if (store.mode === "shell") { - setStore("mode", "normal") - event.preventDefault() - event.stopPropagation() - return - } - - if (working()) { - void abort() - event.preventDefault() - event.stopPropagation() - return - } - - if (escBlur()) { - editorRef.blur() - event.preventDefault() - event.stopPropagation() - return - } - } - - if (store.mode === "shell") { - const { collapsed, cursorPosition, textLength } = getCaretState() - if (event.key === "Backspace" && collapsed && cursorPosition === 0 && textLength === 0) { - setStore("mode", "normal") - event.preventDefault() - return - } - } - - // Handle Shift+Enter BEFORE IME check - Shift+Enter is never used for IME input - // and should always insert a newline regardless of composition state - if (event.key === "Enter" && event.shiftKey) { - addPart({ type: "text", content: "\n", start: 0, end: 0 }) - event.preventDefault() - return - } - - if (event.key === "Enter" && isImeComposing(event)) { - return - } - - const ctrl = event.ctrlKey && !event.metaKey && !event.altKey && !event.shiftKey - - if (store.popover) { - if (event.key === "Tab") { - selectPopoverActive() - event.preventDefault() - return - } - const nav = event.key === "ArrowUp" || event.key === "ArrowDown" || event.key === "Enter" - const ctrlNav = ctrl && (event.key === "n" || event.key === "p") - if (nav || ctrlNav) { - if (store.popover === "at") { - atOnKeyDown(event) - event.preventDefault() - return - } - if (store.popover === "slash") { - slashOnKeyDown(event) - } - event.preventDefault() - return - } - } - - if (ctrl && event.code === "KeyG") { - if (store.popover) { - closePopover() - event.preventDefault() - return - } - if (working()) { - void abort() - event.preventDefault() - } - return - } - - if (event.key === "ArrowUp" || event.key === "ArrowDown") { - if (event.altKey || event.ctrlKey || event.metaKey) return - const { collapsed } = getCaretState() - if (!collapsed) return - - const cursorPosition = getCursorPosition(editorRef) - const textContent = prompt - .current() - .map((part) => ("content" in part ? part.content : "")) - .join("") - const direction = event.key === "ArrowUp" ? "up" : "down" - if (!canNavigateHistoryAtCursor(direction, textContent, cursorPosition, store.historyIndex >= 0)) return - if (navigateHistory(direction)) { - event.preventDefault() - } - return - } - - // Note: Shift+Enter is handled earlier, before IME check - if (event.key === "Enter" && !event.shiftKey) { - event.preventDefault() - if (event.repeat) return - if ( - working() && - prompt - .current() - .map((part) => ("content" in part ? part.content : "")) - .join("") - .trim().length === 0 && - imageAttachments().length === 0 && - commentCount() === 0 - ) { - return - } - void handleSubmit(event) - } - } - - const [agentsQuery, globalProvidersQuery, providersQuery] = useQueries(() => ({ - queries: [ - queryOptions.agents(pathKey(sdk.directory)), - queryOptions.providers(null), - queryOptions.providers(pathKey(sdk.directory)), - ], - })) - - const agentsLoading = () => agentsQuery.isLoading - const agentsShouldFadeIn = createMemo((prev) => prev ?? agentsLoading()) - const providersLoading = () => agentsLoading() || providersQuery.isLoading || globalProvidersQuery.isLoading - const providersShouldFadeIn = createMemo((prev) => prev ?? providersLoading()) - - const [promptReady] = createResource( - () => prompt.ready().promise, - (p) => p, - ) - - return ( -
- {(promptReady(), null)} - (slashPopoverRef = el)} - atFlat={atFlat()} - atActive={atActive() ?? undefined} - atKey={atKey} - setAtActive={setAtActive} - onAtSelect={handleAtSelect} - slashFlat={slashFlat()} - slashActive={slashActive() ?? undefined} - setSlashActive={setSlashActive} - onSlashSelect={handleSlashSelect} - commandKeybind={command.keybind} - t={(key) => language.t(key as Parameters[0])} - /> - - - { - const active = comments.active() - return !!item.commentID && item.commentID === active?.id && item.path === active?.file - }} - openComment={openComment} - remove={(item) => { - if (item.commentID) comments.remove(item.path, item.commentID) - prompt.context.remove(item.key) - }} - t={(key) => language.t(key as Parameters[0])} - /> - - dialog.show(() => ) - } - onRemove={removeAttachment} - removeLabel={language.t("prompt.attachment.remove")} - /> -
{ - const target = e.target - if (!(target instanceof HTMLElement)) return - if (target.closest('[data-action="prompt-attach"], [data-action="prompt-submit"]')) { - return - } - editorRef?.focus() - }} - > -
(scrollRef = el)} - style={{ "scroll-padding-bottom": space }} - > -
{ - editorRef = el - props.ref?.(el) - }} - role="textbox" - aria-multiline="true" - aria-label={placeholder()} - contenteditable="true" - autocapitalize={store.mode === "normal" ? "sentences" : "off"} - autocorrect={store.mode === "normal" ? "on" : "off"} - spellcheck={store.mode === "normal"} - inputMode="text" - // @ts-expect-error - autocomplete="off" - onInput={handleInput} - onPaste={handlePaste} - onCompositionStart={handleCompositionStart} - onCompositionEnd={handleCompositionEnd} - onBlur={handleBlur} - onKeyDown={handleKeyDown} - classList={{ - "select-text": true, - "w-full pl-3 pr-2 pt-2 text-14-regular text-text-strong focus:outline-none whitespace-pre-wrap": true, - "[&_[data-type=file]]:text-syntax-property": true, - "[&_[data-type=agent]]:text-syntax-type": true, - "font-mono!": store.mode === "shell", - }} - style={{ "padding-bottom": space }} - /> -
- {placeholder()} -
-
- - - - - -
-
-
- - {language.t("prompt.mode.shell")} -
- -
-
- -
- - (x === "default" ? language.t("common.default") : x)} - onSelect={(value) => { - local.model.variant.set(value === "default" ? undefined : value) - restoreFocus() - }} - class="capitalize max-w-[160px] text-text-base" - valueClass="truncate text-13-regular text-text-base" - triggerStyle={control()} - triggerProps={{ "data-action": "prompt-model-variant" }} - variant="ghost" - /> - -
-
- - -
-
-
- - -
- ) -} diff --git a/packages/app/src/components/prompt-input/attachments.ts b/packages/app/src/components/prompt-input/attachments.ts deleted file mode 100644 index f12a4210c082..000000000000 --- a/packages/app/src/components/prompt-input/attachments.ts +++ /dev/null @@ -1,196 +0,0 @@ -import { onMount } from "solid-js" -import { makeEventListener } from "@solid-primitives/event-listener" -import { showToast } from "@opencode-ai/ui/toast" -import { usePrompt, type ContentPart, type ImageAttachmentPart } from "@/context/prompt" -import { useLanguage } from "@/context/language" -import { uuid } from "@/utils/uuid" -import { getCursorPosition } from "./editor-dom" -import { attachmentMime } from "./files" -import { normalizePaste, pasteMode } from "./paste" - -function dataUrl(file: File, mime: string) { - return new Promise((resolve) => { - const reader = new FileReader() - reader.addEventListener("error", () => resolve("")) - reader.addEventListener("load", () => { - const value = typeof reader.result === "string" ? reader.result : "" - const idx = value.indexOf(",") - if (idx === -1) { - resolve(value) - return - } - resolve(`data:${mime};base64,${value.slice(idx + 1)}`) - }) - reader.readAsDataURL(file) - }) -} - -type PromptAttachmentsInput = { - editor: () => HTMLDivElement | undefined - isDialogActive: () => boolean - setDraggingType: (type: "image" | "@mention" | null) => void - focusEditor: () => void - addPart: (part: ContentPart) => boolean - readClipboardImage?: () => Promise -} - -export function createPromptAttachments(input: PromptAttachmentsInput) { - const prompt = usePrompt() - const language = useLanguage() - - const warn = () => { - showToast({ - title: language.t("prompt.toast.pasteUnsupported.title"), - description: language.t("prompt.toast.pasteUnsupported.description"), - }) - } - - const add = async (file: File, toast = true) => { - const mime = await attachmentMime(file) - if (!mime) { - if (toast) warn() - return false - } - - const editor = input.editor() - if (!editor) return false - - const url = await dataUrl(file, mime) - if (!url) return false - - const attachment: ImageAttachmentPart = { - type: "image", - id: uuid(), - filename: file.name, - mime, - dataUrl: url, - } - const cursor = prompt.cursor() ?? getCursorPosition(editor) - prompt.set([...prompt.current(), attachment], cursor) - return true - } - - const addAttachment = (file: File) => add(file) - - const addAttachments = async (files: File[], toast = true) => { - let found = false - - for (const file of files) { - const ok = await add(file, false) - if (ok) found = true - } - - if (!found && files.length > 0 && toast) warn() - return found - } - - const removeAttachment = (id: string) => { - const current = prompt.current() - const next = current.filter((part) => part.type !== "image" || part.id !== id) - prompt.set(next, prompt.cursor()) - } - - const handlePaste = async (event: ClipboardEvent) => { - const clipboardData = event.clipboardData - if (!clipboardData) return - - event.preventDefault() - event.stopPropagation() - - const files = Array.from(clipboardData.items).flatMap((item) => { - if (item.kind !== "file") return [] - const file = item.getAsFile() - return file ? [file] : [] - }) - - if (files.length > 0) { - await addAttachments(files) - return - } - - const plainText = clipboardData.getData("text/plain") ?? "" - - // Desktop: Browser clipboard has no images and no text, try platform's native clipboard for images - if (input.readClipboardImage && !plainText) { - const file = await input.readClipboardImage() - if (file) { - await addAttachment(file) - return - } - } - - if (!plainText) return - - const text = normalizePaste(plainText) - - const put = () => { - if (input.addPart({ type: "text", content: text, start: 0, end: 0 })) return true - input.focusEditor() - return input.addPart({ type: "text", content: text, start: 0, end: 0 }) - } - - if (pasteMode(text) === "manual") { - put() - return - } - - const inserted = typeof document.execCommand === "function" && document.execCommand("insertText", false, text) - if (inserted) return - - put() - } - - const handleGlobalDragOver = (event: DragEvent) => { - if (input.isDialogActive()) return - - event.preventDefault() - const hasFiles = event.dataTransfer?.types.includes("Files") - const hasText = event.dataTransfer?.types.includes("text/plain") - if (hasFiles) { - input.setDraggingType("image") - } else if (hasText) { - input.setDraggingType("@mention") - } - } - - const handleGlobalDragLeave = (event: DragEvent) => { - if (input.isDialogActive()) return - if (!event.relatedTarget) { - input.setDraggingType(null) - } - } - - const handleGlobalDrop = async (event: DragEvent) => { - if (input.isDialogActive()) return - - event.preventDefault() - input.setDraggingType(null) - - const plainText = event.dataTransfer?.getData("text/plain") - const filePrefix = "file:" - if (plainText?.startsWith(filePrefix)) { - const filePath = plainText.slice(filePrefix.length) - input.focusEditor() - input.addPart({ type: "file", path: filePath, content: "@" + filePath, start: 0, end: 0 }) - return - } - - const dropped = event.dataTransfer?.files - if (!dropped) return - - await addAttachments(Array.from(dropped)) - } - - onMount(() => { - makeEventListener(document, "dragover", handleGlobalDragOver) - makeEventListener(document, "dragleave", handleGlobalDragLeave) - makeEventListener(document, "drop", handleGlobalDrop) - }) - - return { - addAttachment, - addAttachments, - removeAttachment, - handlePaste, - } -} diff --git a/packages/app/src/components/session/session-context-tab.tsx b/packages/app/src/components/session/session-context-tab.tsx deleted file mode 100644 index 43741bd3fc0d..000000000000 --- a/packages/app/src/components/session/session-context-tab.tsx +++ /dev/null @@ -1,341 +0,0 @@ -import { createMemo, createEffect, on, onCleanup, For, Show } from "solid-js" -import type { JSX } from "solid-js" -import { useSync } from "@/context/sync" -import { checksum } from "@opencode-ai/core/util/encode" -import { findLast } from "@opencode-ai/core/util/array" -import { same } from "@/utils/same" -import { Icon } from "@opencode-ai/ui/icon" -import { Accordion } from "@opencode-ai/ui/accordion" -import { StickyAccordionHeader } from "@opencode-ai/ui/sticky-accordion-header" -import { File } from "@opencode-ai/ui/file" -import { Markdown } from "@opencode-ai/ui/markdown" -import { ScrollView } from "@opencode-ai/ui/scroll-view" -import type { Message, Part, UserMessage } from "@opencode-ai/sdk/v2/client" -import { useLanguage } from "@/context/language" -import { useProviders } from "@/hooks/use-providers" -import { useSessionLayout } from "@/pages/session/session-layout" -import { getSessionContextMetrics } from "./session-context-metrics" -import { estimateSessionContextBreakdown, type SessionContextBreakdownKey } from "./session-context-breakdown" -import { createSessionContextFormatter } from "./session-context-format" - -const BREAKDOWN_COLOR: Record = { - system: "var(--syntax-info)", - user: "var(--syntax-success)", - assistant: "var(--syntax-property)", - tool: "var(--syntax-warning)", - other: "var(--syntax-comment)", -} - -function Stat(props: { label: string; value: JSX.Element }) { - return ( -
-
{props.label}
-
{props.value}
-
- ) -} - -function RawMessageContent(props: { message: Message; getParts: (id: string) => Part[]; onRendered: () => void }) { - const file = createMemo(() => { - const parts = props.getParts(props.message.id) - const contents = JSON.stringify({ message: props.message, parts }, null, 2) - return { - name: `${props.message.role}-${props.message.id}.json`, - contents, - cacheKey: checksum(contents), - } - }) - - return ( - requestAnimationFrame(props.onRendered)} - /> - ) -} - -function RawMessage(props: { - message: Message - getParts: (id: string) => Part[] - onRendered: () => void - time: (value: number | undefined) => string -}) { - return ( - - - -
-
- {props.message.role} • {props.message.id} -
-
-
{props.time(props.message.time.created)}
- -
-
-
-
- -
- -
-
-
- ) -} - -const emptyMessages: Message[] = [] -const emptyUserMessages: UserMessage[] = [] - -export function SessionContextTab() { - const sync = useSync() - const language = useLanguage() - const providers = useProviders() - const { params, view } = useSessionLayout() - - const info = createMemo(() => (params.id ? sync.session.get(params.id) : undefined)) - - const messages = createMemo( - () => { - const id = params.id - if (!id) return emptyMessages - return (sync.data.message[id] ?? []) as Message[] - }, - emptyMessages, - { equals: same }, - ) - - const userMessages = createMemo( - () => messages().filter((m) => m.role === "user") as UserMessage[], - emptyUserMessages, - { equals: same }, - ) - - const visibleUserMessages = createMemo( - () => { - const revert = info()?.revert?.messageID - if (!revert) return userMessages() - return userMessages().filter((m) => m.id < revert) - }, - emptyUserMessages, - { equals: same }, - ) - - const usd = createMemo( - () => - new Intl.NumberFormat(language.intl(), { - style: "currency", - currency: "USD", - }), - ) - - const metrics = createMemo(() => getSessionContextMetrics(messages(), providers.all())) - const ctx = createMemo(() => metrics().context) - const formatter = createMemo(() => createSessionContextFormatter(language.intl())) - - const cost = createMemo(() => { - return usd().format(metrics().totalCost) - }) - - const counts = createMemo(() => { - const all = messages() - const user = all.reduce((count, x) => count + (x.role === "user" ? 1 : 0), 0) - const assistant = all.reduce((count, x) => count + (x.role === "assistant" ? 1 : 0), 0) - return { - all: all.length, - user, - assistant, - } - }) - - const systemPrompt = createMemo(() => { - const msg = findLast(visibleUserMessages(), (m) => !!m.system) - const system = msg?.system - if (!system) return - const trimmed = system.trim() - if (!trimmed) return - return trimmed - }) - - const providerLabel = createMemo(() => { - const c = ctx() - if (!c) return "—" - return c.providerLabel - }) - - const modelLabel = createMemo(() => { - const c = ctx() - if (!c) return "—" - return c.modelLabel - }) - - const breakdown = createMemo( - on( - () => [ctx()?.message.id, ctx()?.input, messages().length, systemPrompt()], - () => { - const c = ctx() - if (!c?.input) return [] - return estimateSessionContextBreakdown({ - messages: messages(), - parts: sync.data.part as Record, - input: c.input, - systemPrompt: systemPrompt(), - }) - }, - ), - ) - - const breakdownLabel = (key: SessionContextBreakdownKey) => { - if (key === "system") return language.t("context.breakdown.system") - if (key === "user") return language.t("context.breakdown.user") - if (key === "assistant") return language.t("context.breakdown.assistant") - if (key === "tool") return language.t("context.breakdown.tool") - return language.t("context.breakdown.other") - } - - const stats = [ - { label: "context.stats.session", value: () => info()?.title ?? params.id ?? "—" }, - { label: "context.stats.messages", value: () => counts().all.toLocaleString(language.intl()) }, - { label: "context.stats.provider", value: providerLabel }, - { label: "context.stats.model", value: modelLabel }, - { label: "context.stats.limit", value: () => formatter().number(ctx()?.limit) }, - { label: "context.stats.totalTokens", value: () => formatter().number(ctx()?.total) }, - { label: "context.stats.usage", value: () => formatter().percent(ctx()?.usage) }, - { label: "context.stats.inputTokens", value: () => formatter().number(ctx()?.input) }, - { label: "context.stats.outputTokens", value: () => formatter().number(ctx()?.output) }, - { label: "context.stats.reasoningTokens", value: () => formatter().number(ctx()?.reasoning) }, - { - label: "context.stats.cacheTokens", - value: () => `${formatter().number(ctx()?.cacheRead)} / ${formatter().number(ctx()?.cacheWrite)}`, - }, - { label: "context.stats.userMessages", value: () => counts().user.toLocaleString(language.intl()) }, - { label: "context.stats.assistantMessages", value: () => counts().assistant.toLocaleString(language.intl()) }, - { label: "context.stats.totalCost", value: cost }, - { label: "context.stats.sessionCreated", value: () => formatter().time(info()?.time.created) }, - { label: "context.stats.lastActivity", value: () => formatter().time(ctx()?.message.time.created) }, - ] satisfies { label: string; value: () => JSX.Element }[] - - let scroll: HTMLDivElement | undefined - let frame: number | undefined - let pending: { x: number; y: number } | undefined - const getParts = (id: string) => (sync.data.part[id] ?? []) as Part[] - - const restoreScroll = () => { - const el = scroll - if (!el) return - - const s = view().scroll("context") - if (!s) return - - if (el.scrollTop !== s.y) el.scrollTop = s.y - if (el.scrollLeft !== s.x) el.scrollLeft = s.x - } - - const handleScroll = (event: Event & { currentTarget: HTMLDivElement }) => { - pending = { - x: event.currentTarget.scrollLeft, - y: event.currentTarget.scrollTop, - } - if (frame !== undefined) return - - frame = requestAnimationFrame(() => { - frame = undefined - - const next = pending - pending = undefined - if (!next) return - - view().setScroll("context", next) - }) - } - - createEffect( - on( - () => messages().length, - () => { - requestAnimationFrame(restoreScroll) - }, - { defer: true }, - ), - ) - - onCleanup(() => { - if (frame === undefined) return - cancelAnimationFrame(frame) - }) - - return ( - { - scroll = el - restoreScroll() - }} - onScroll={handleScroll} - > -
-
- - {(stat) => [0])} value={stat.value()} />} - -
- - 0}> -
-
{language.t("context.breakdown.title")}
-
- - {(segment) => ( -
- )} - -
-
- - {(segment) => ( -
-
-
{breakdownLabel(segment.key)}
-
{segment.percent.toLocaleString(language.intl())}%
-
- )} - -
- -
- - - - {(prompt) => ( -
-
{language.t("context.systemPrompt.title")}
-
- -
-
- )} -
- -
-
{language.t("context.rawMessages.title")}
- - - {(message) => ( - - )} - - -
-
- - ) -} diff --git a/packages/app/src/components/session/session-new-view.tsx b/packages/app/src/components/session/session-new-view.tsx deleted file mode 100644 index 36c1eb42c316..000000000000 --- a/packages/app/src/components/session/session-new-view.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import { Show, createMemo } from "solid-js" -import { DateTime } from "luxon" -import { useSync } from "@/context/sync" -import { useSDK } from "@/context/sdk" -import { useLanguage } from "@/context/language" -import { Icon } from "@opencode-ai/ui/icon" -import { Mark } from "@opencode-ai/ui/logo" -import { getDirectory, getFilename } from "@opencode-ai/core/util/path" - -const MAIN_WORKTREE = "main" -const CREATE_WORKTREE = "create" -const ROOT_CLASS = "size-full flex flex-col" - -interface NewSessionViewProps { - worktree: string -} - -export function NewSessionView(props: NewSessionViewProps) { - const sync = useSync() - const sdk = useSDK() - const language = useLanguage() - - const sandboxes = createMemo(() => sync.project?.sandboxes ?? []) - const options = createMemo(() => [MAIN_WORKTREE, ...sandboxes(), CREATE_WORKTREE]) - const current = createMemo(() => { - const selection = props.worktree - if (options().includes(selection)) return selection - return MAIN_WORKTREE - }) - const projectRoot = createMemo(() => sync.project?.worktree ?? sdk.directory) - const isWorktree = createMemo(() => { - const project = sync.project - if (!project) return false - return sdk.directory !== project.worktree - }) - - const label = (value: string) => { - if (value === MAIN_WORKTREE) { - if (isWorktree()) return language.t("session.new.worktree.main") - const branch = sync.data.vcs?.branch - if (branch) return language.t("session.new.worktree.mainWithBranch", { branch }) - return language.t("session.new.worktree.main") - } - - if (value === CREATE_WORKTREE) return language.t("session.new.worktree.create") - - return getFilename(value) - } - - return ( -
-
-
-
-
- -
{language.t("session.new.title")}
-
-
-
-
- {getDirectory(projectRoot())} - {getFilename(projectRoot())} -
-
-
- -
- {label(current())} -
-
- - {(project) => ( -
-
- {language.t("session.new.lastModified")}  - - {DateTime.fromMillis(project().time.updated ?? project().time.created) - .setLocale(language.intl()) - .toRelative()} - -
-
- )} -
-
-
-
-
- ) -} diff --git a/packages/app/src/components/settings-general.tsx b/packages/app/src/components/settings-general.tsx deleted file mode 100644 index 535bd72064e2..000000000000 --- a/packages/app/src/components/settings-general.tsx +++ /dev/null @@ -1,802 +0,0 @@ -import { Component, Show, createMemo, createResource, onMount, type JSX } from "solid-js" -import { createStore } from "solid-js/store" -import { Button } from "@opencode-ai/ui/button" -import { Icon } from "@opencode-ai/ui/icon" -import { Select } from "@opencode-ai/ui/select" -import { Switch } from "@opencode-ai/ui/switch" -import { TextField } from "@opencode-ai/ui/text-field" -import { Tooltip } from "@opencode-ai/ui/tooltip" -import { useTheme, type ColorScheme } from "@opencode-ai/ui/theme/context" -import { showToast } from "@opencode-ai/ui/toast" -import { useParams } from "@solidjs/router" -import { useLanguage } from "@/context/language" -import { usePermission } from "@/context/permission" -import { usePlatform, type DisplayBackend } from "@/context/platform" -import { useGlobalSync } from "@/context/global-sync" -import { useGlobalSDK } from "@/context/global-sdk" -import { - monoDefault, - monoFontFamily, - monoInput, - sansDefault, - sansFontFamily, - sansInput, - terminalDefault, - terminalFontFamily, - terminalInput, - useSettings, -} from "@/context/settings" -import { decode64 } from "@/utils/base64" -import { playSoundById, SOUND_OPTIONS } from "@/utils/sound" -import { Link } from "./link" -import { SettingsList } from "./settings-list" - -let demoSoundState = { - cleanup: undefined as (() => void) | undefined, - timeout: undefined as NodeJS.Timeout | undefined, - run: 0, -} - -type ThemeOption = { - id: string - name: string -} - -type ShellOption = { - path: string - name: string - acceptable: boolean -} - -type ShellSelectOption = { - id: string - value: string - label: string -} - -// To prevent audio from overlapping/playing very quickly when navigating the settings menus, -// delay the playback by 100ms during quick selection changes and pause existing sounds. -const stopDemoSound = () => { - demoSoundState.run += 1 - if (demoSoundState.cleanup) { - demoSoundState.cleanup() - } - clearTimeout(demoSoundState.timeout) - demoSoundState.cleanup = undefined -} - -const playDemoSound = (id: string | undefined) => { - stopDemoSound() - if (!id) return - - const run = ++demoSoundState.run - demoSoundState.timeout = setTimeout(() => { - void playSoundById(id).then((cleanup) => { - if (demoSoundState.run !== run) { - cleanup?.() - return - } - demoSoundState.cleanup = cleanup - }) - }, 100) -} - -export const SettingsGeneral: Component = () => { - const theme = useTheme() - const language = useLanguage() - const permission = usePermission() - const platform = usePlatform() - const params = useParams() - const settings = useSettings() - - const [store, setStore] = createStore({ - checking: false, - }) - - const linux = createMemo(() => platform.platform === "desktop" && platform.os === "linux") - const dir = createMemo(() => decode64(params.dir)) - const accepting = createMemo(() => { - const value = dir() - if (!value) return false - if (!params.id) return permission.isAutoAcceptingDirectory(value) - return permission.isAutoAccepting(params.id, value) - }) - - const toggleAccept = (checked: boolean) => { - const value = dir() - if (!value) return - - if (!params.id) { - if (permission.isAutoAcceptingDirectory(value) === checked) return - permission.toggleAutoAcceptDirectory(value) - return - } - - if (checked) { - permission.enableAutoAccept(params.id, value) - return - } - - permission.disableAutoAccept(params.id, value) - } - const desktop = createMemo(() => platform.platform === "desktop") - - const check = () => { - if (!platform.checkUpdate) return - setStore("checking", true) - - void platform - .checkUpdate() - .then((result) => { - if (!result.updateAvailable) { - showToast({ - variant: "success", - icon: "circle-check", - title: language.t("settings.updates.toast.latest.title"), - description: language.t("settings.updates.toast.latest.description", { version: platform.version ?? "" }), - }) - return - } - - const actions = platform.updateAndRestart - ? [ - { - label: language.t("toast.update.action.installRestart"), - onClick: async () => { - await platform.updateAndRestart!() - }, - }, - { - label: language.t("toast.update.action.notYet"), - onClick: "dismiss" as const, - }, - ] - : [ - { - label: language.t("toast.update.action.notYet"), - onClick: "dismiss" as const, - }, - ] - - showToast({ - persistent: true, - icon: "download", - title: language.t("toast.update.title"), - description: language.t("toast.update.description", { version: result.version ?? "" }), - actions, - }) - }) - .catch((err: unknown) => { - const message = err instanceof Error ? err.message : String(err) - showToast({ title: language.t("common.requestFailed"), description: message }) - }) - .finally(() => setStore("checking", false)) - } - - const themeOptions = createMemo(() => theme.ids().map((id) => ({ id, name: theme.name(id) }))) - - const globalSync = useGlobalSync() - const globalSdk = useGlobalSDK() - - const [shells] = createResource( - () => - globalSdk.client.pty - .shells() - .then((res) => res.data ?? []) - .catch(() => [] as ShellOption[]), - { initialValue: [] as ShellOption[] }, - ) - - const [displayBackend, { refetch: refetchDisplayBackend }] = createResource( - () => (linux() && platform.getDisplayBackend ? true : false), - () => Promise.resolve(platform.getDisplayBackend?.() ?? null).catch(() => null as DisplayBackend | null), - { initialValue: null as DisplayBackend | null }, - ) - - onMount(() => { - void theme.loadThemes() - }) - - const autoOption = { id: "auto", value: "", label: language.t("settings.general.row.shell.autoDefault") } - const currentShell = createMemo(() => globalSync.data.config.shell ?? "") - - const shellOptions = createMemo(() => { - const list = shells.latest - const current = globalSync.data.config.shell - - const nameCounts = new Map() - for (const s of list) { - nameCounts.set(s.name, (nameCounts.get(s.name) || 0) + 1) - } - - const options = [ - autoOption, - ...list.map((s) => { - const ambiguousName = (nameCounts.get(s.name) || 0) > 1 - const text = ambiguousName ? s.path : s.name - const label = s.acceptable ? text : `${text} (${language.t("settings.general.row.shell.terminalOnly")})` - return { - id: s.path, - // Prefer name over path - "bash" is much cleaner than the explicit full route even when it may change due to PATH. - value: ambiguousName ? s.path : s.name, - label, - } - }), - ] - - if (current && !options.some((o) => o.value === current)) { - options.push({ id: current, value: current, label: current }) - } - - return options - }) - - const onDisplayBackendChange = (checked: boolean) => { - const update = platform.setDisplayBackend?.(checked ? "wayland" : "auto") - if (!update) return - void update.finally(() => { - void refetchDisplayBackend() - }) - } - - const colorSchemeOptions = createMemo((): { value: ColorScheme; label: string }[] => [ - { value: "system", label: language.t("theme.scheme.system") }, - { value: "light", label: language.t("theme.scheme.light") }, - { value: "dark", label: language.t("theme.scheme.dark") }, - ]) - - const languageOptions = createMemo(() => - language.locales.map((locale) => ({ - value: locale, - label: language.label(locale), - })), - ) - - const noneSound = { id: "none", label: "sound.option.none" } as const - const soundOptions = [noneSound, ...SOUND_OPTIONS] - const mono = () => monoInput(settings.appearance.font()) - const sans = () => sansInput(settings.appearance.uiFont()) - const terminal = () => terminalInput(settings.appearance.terminalFont()) - - const soundSelectProps = ( - enabled: () => boolean, - current: () => string, - setEnabled: (value: boolean) => void, - set: (id: string) => void, - ) => ({ - options: soundOptions, - current: enabled() ? (soundOptions.find((o) => o.id === current()) ?? noneSound) : noneSound, - value: (o: (typeof soundOptions)[number]) => o.id, - label: (o: (typeof soundOptions)[number]) => language.t(o.label), - onHighlight: (option: (typeof soundOptions)[number] | undefined) => { - if (!option) return - playDemoSound(option.id === "none" ? undefined : option.id) - }, - onSelect: (option: (typeof soundOptions)[number] | undefined) => { - if (!option) return - if (option.id === "none") { - setEnabled(false) - stopDemoSound() - return - } - setEnabled(true) - set(option.id) - playDemoSound(option.id) - }, - variant: "secondary" as const, - size: "small" as const, - triggerVariant: "settings" as const, - }) - - const GeneralSection = () => ( -
- - - o.value === currentShell()) ?? autoOption} - value={(o) => o.id} - label={(o) => o.label} - onSelect={(option) => { - if (!option) return - if (option.value === currentShell()) return - globalSync.updateConfig({ shell: option.value }) - }} - variant="secondary" - size="small" - triggerVariant="settings" - triggerStyle={{ "min-width": "180px" }} - /> - - - -
- settings.general.setShowReasoningSummaries(checked)} - /> -
-
- - -
- settings.general.setShellToolPartsExpanded(checked)} - /> -
-
- - -
- settings.general.setEditToolPartsExpanded(checked)} - /> -
-
- - -
- settings.general.setShowSessionProgressBar(checked)} - /> -
-
-
-
- ) - - const AdvancedSection = () => ( -
-

{language.t("settings.general.section.advanced")}

- - - -
- settings.general.setShowFileTree(checked)} - /> -
-
- - -
- settings.general.setShowNavigation(checked)} - /> -
-
- - -
- settings.general.setShowSearch(checked)} - /> -
-
- - -
- settings.general.setShowTerminal(checked)} - /> -
-
- - -
- settings.general.setShowStatus(checked)} - /> -
-
-
-
- ) - - const AppearanceSection = () => ( -
-

{language.t("settings.general.section.appearance")}

- - - - o.id === theme.themeId())} - value={(o) => o.id} - label={(o) => o.name} - onSelect={(option) => { - if (!option) return - theme.setTheme(option.id) - }} - onHighlight={(option) => { - if (!option) return - theme.previewTheme(option.id) - return () => theme.cancelPreview() - }} - variant="secondary" - size="small" - triggerVariant="settings" - /> - - - -
- settings.appearance.setUIFont(value)} - placeholder={sansDefault} - spellcheck={false} - autocorrect="off" - autocomplete="off" - autocapitalize="off" - class="text-12-regular" - style={{ "font-family": sansFontFamily(settings.appearance.uiFont()) }} - /> -
-
- - -
- settings.appearance.setFont(value)} - placeholder={monoDefault} - spellcheck={false} - autocorrect="off" - autocomplete="off" - autocapitalize="off" - class="text-12-regular" - style={{ "font-family": monoFontFamily(settings.appearance.font()) }} - /> -
-
- - -
- settings.appearance.setTerminalFont(value)} - placeholder={terminalDefault} - spellcheck={false} - autocorrect="off" - autocomplete="off" - autocapitalize="off" - class="text-12-regular" - style={{ "font-family": terminalFontFamily(settings.appearance.terminalFont()) }} - /> -
-
-
-
- ) - - const NotificationsSection = () => ( -
-

{language.t("settings.general.section.notifications")}

- - - -
- settings.notifications.setAgent(checked)} - /> -
-
- - -
- settings.notifications.setPermissions(checked)} - /> -
-
- - -
- settings.notifications.setErrors(checked)} - /> -
-
-
-
- ) - - const SoundsSection = () => ( -
-

{language.t("settings.general.section.sounds")}

- - - - settings.sounds.permissionsEnabled(), - () => settings.sounds.permissions(), - (value) => settings.sounds.setPermissionsEnabled(value), - (id) => settings.sounds.setPermissions(id), - )} - /> - - - - option && setStore("changes", option)} - variant="ghost" - size="small" - valueClass="text-14-medium" - /> - ) - } - - const empty = (text: string) => ( -
-
{text}
-
- ) - - const createGit = (input: { emptyClass: string }) => ( -
-
-
{language.t("session.review.noVcs.createGit.title")}
-
- {language.t("session.review.noVcs.createGit.description")} -
-
- -
- ) - - const reviewEmptyText = createMemo(() => { - if (store.changes === "git") return language.t("session.review.noUncommittedChanges") - if (store.changes === "branch") return language.t("session.review.noBranchChanges") - return language.t("session.review.noChanges") - }) - - const reviewEmpty = (input: { loadingClass: string; emptyClass: string }) => { - if (store.changes === "git" || store.changes === "branch") { - if (!reviewReady()) return
{language.t("session.review.loadingChanges")}
- return empty(reviewEmptyText()) - } - - if (store.changes === "turn") { - if (nogit()) return createGit(input) - return empty(reviewEmptyText()) - } - - return ( -
-
{reviewEmptyText()}
-
- ) - } - - const reviewContent = (input: { - diffStyle: DiffStyle - onDiffStyleChange?: (style: DiffStyle) => void - classes?: SessionReviewTabProps["classes"] - loadingClass: string - emptyClass: string - }) => ( - - setTree("reviewScroll", el)} - focusedFile={tree.activeDiff} - onLineComment={(comment) => addCommentToContext({ ...comment, origin: "review" })} - onLineCommentUpdate={updateCommentInContext} - onLineCommentDelete={removeCommentFromContext} - lineCommentActions={reviewCommentActions()} - commentMentions={{ - items: file.searchFilesAndDirectories, - }} - comments={comments.all()} - focusedComment={comments.focus()} - onFocusedCommentChange={comments.setFocus} - onViewFile={openReviewFile} - classes={input.classes} - /> - - ) - - const reviewPanel = () => ( -
-
- {reviewContent({ - diffStyle: layout.review.diffStyle(), - onDiffStyleChange: layout.review.setDiffStyle, - loadingClass: "px-6 py-4 text-text-weak", - emptyClass: "h-full pb-64 -mt-4 flex flex-col items-center justify-center text-center gap-6", - })} -
-
- ) - - createEffect( - on( - activeFileTab, - (active) => { - if (!active) return - if (fileTreeTab() !== "changes") return - showAllFiles() - }, - { defer: true }, - ), - ) - - const reviewDiffId = (path: string) => { - const sum = checksum(path) - if (!sum) return - return `session-review-diff-${sum}` - } - - const reviewDiffTop = (path: string) => { - const root = tree.reviewScroll - if (!root) return - - const id = reviewDiffId(path) - if (!id) return - - const el = document.getElementById(id) - if (!(el instanceof HTMLElement)) return - if (!root.contains(el)) return - - const a = el.getBoundingClientRect() - const b = root.getBoundingClientRect() - return a.top - b.top + root.scrollTop - } - - const scrollToReviewDiff = (path: string) => { - const root = tree.reviewScroll - if (!root) return false - - const top = reviewDiffTop(path) - if (top === undefined) return false - - view().setScroll("review", { x: root.scrollLeft, y: top }) - root.scrollTo({ top, behavior: "auto" }) - return true - } - - const focusReviewDiff = (path: string) => { - openReviewPanel() - view().review.openPath(path) - setTree({ activeDiff: path, pendingDiff: path }) - } - - createEffect(() => { - const pending = tree.pendingDiff - if (!pending) return - if (!tree.reviewScroll) return - if (!reviewReady()) return - - const attempt = (count: number) => { - if (tree.pendingDiff !== pending) return - if (count > 60) { - setTree("pendingDiff", undefined) - return - } - - const root = tree.reviewScroll - if (!root) { - requestAnimationFrame(() => attempt(count + 1)) - return - } - - if (!scrollToReviewDiff(pending)) { - requestAnimationFrame(() => attempt(count + 1)) - return - } - - const top = reviewDiffTop(pending) - if (top === undefined) { - requestAnimationFrame(() => attempt(count + 1)) - return - } - - if (Math.abs(root.scrollTop - top) <= 1) { - setTree("pendingDiff", undefined) - return - } - - requestAnimationFrame(() => attempt(count + 1)) - } - - requestAnimationFrame(() => attempt(0)) - }) - - createEffect(() => { - const id = params.id - if (!id) return - - if (!wantsReview()) return - if (sync.data.session_diff[id] !== undefined) return - if (sync.status === "loading") return - - void sync.session.diff(id) - }) - - createEffect( - on( - () => [sessionKey(), wantsReview()] as const, - ([key, wants]) => { - if (diffFrame !== undefined) cancelAnimationFrame(diffFrame) - if (diffTimer !== undefined) window.clearTimeout(diffTimer) - diffFrame = undefined - diffTimer = undefined - if (!wants) return - - const id = params.id - if (!id) return - if (!untrack(() => sync.data.session_diff[id] !== undefined)) return - - diffFrame = requestAnimationFrame(() => { - diffFrame = undefined - diffTimer = window.setTimeout(() => { - diffTimer = undefined - if (sessionKey() !== key) return - void sync.session.diff(id, { force: true }) - }, 0) - }) - }, - { defer: true }, - ), - ) - - let treeDir: string | undefined - createEffect(() => { - const dir = sdk.directory - if (!isDesktop()) return - if (!layout.fileTree.opened()) return - if (sync.status === "loading") return - - fileTreeTab() - const refresh = treeDir !== dir - treeDir = dir - void (refresh ? file.tree.refresh("") : file.tree.list("")) - }) - - createEffect( - on( - () => sdk.directory, - () => { - const tab = activeFileTab() - if (!tab) return - const path = file.pathFromTab(tab) - if (!path) return - void file.load(path, { force: true }) - }, - { defer: true }, - ), - ) - - const autoScroll = createAutoScroll({ - working: () => true, - overflowAnchor: "dynamic", - }) - - let scrollStateFrame: number | undefined - let scrollStateTarget: HTMLDivElement | undefined - let fillFrame: number | undefined - - const jumpThreshold = (el: HTMLDivElement) => Math.max(400, el.clientHeight) - - const updateScrollState = (el: HTMLDivElement) => { - const max = el.scrollHeight - el.clientHeight - const distance = max - el.scrollTop - const overflow = max > 1 - const bottom = !overflow || distance <= 2 - const jump = overflow && distance > jumpThreshold(el) - - if (ui.scroll.overflow === overflow && ui.scroll.bottom === bottom && ui.scroll.jump === jump) return - setUi("scroll", { overflow, bottom, jump }) - } - - const scheduleScrollState = (el: HTMLDivElement) => { - scrollStateTarget = el - if (scrollStateFrame !== undefined) return - - scrollStateFrame = requestAnimationFrame(() => { - scrollStateFrame = undefined - - const target = scrollStateTarget - scrollStateTarget = undefined - if (!target) return - - updateScrollState(target) - }) - } - - const resumeScroll = () => { - setStore("messageId", undefined) - autoScroll.forceScrollToBottom() - clearMessageHash() - - const el = scroller - if (el) scheduleScrollState(el) - } - - // When the user returns to the bottom, treat the active message as "latest". - createEffect( - on( - autoScroll.userScrolled, - (scrolled) => { - if (scrolled) return - setStore("messageId", undefined) - clearMessageHash() - }, - { defer: true }, - ), - ) - - let fill = () => {} - - const setScrollRef = (el: HTMLDivElement | undefined) => { - scroller = el - autoScroll.scrollRef(el) - if (!el) return - scheduleScrollState(el) - fill() - } - - const markUserScroll = () => { - scrollMark += 1 - } - - createResizeObserver( - () => content, - () => { - const el = scroller - if (el) scheduleScrollState(el) - fill() - }, - ) - - const historyWindow = createSessionHistoryWindow({ - sessionID: () => params.id, - messagesReady, - loaded: () => messages().length, - visibleUserMessages, - historyMore, - historyLoading, - loadMore: (sessionID) => sync.session.history.loadMore(sessionID), - userScrolled: autoScroll.userScrolled, - scroller: () => scroller, - }) - - fill = () => { - if (fillFrame !== undefined) return - - fillFrame = requestAnimationFrame(() => { - fillFrame = undefined - - if (!params.id || !messagesReady()) return - if (autoScroll.userScrolled() || historyLoading()) return - - const el = scroller - if (!el) return - if (el.scrollHeight > el.clientHeight + 1) return - if (historyWindow.turnStart() <= 0 && !historyMore()) return - - void historyWindow.loadAndReveal() - }) - } - - createEffect( - on( - () => - [ - params.id, - messagesReady(), - historyWindow.turnStart(), - historyMore(), - historyLoading(), - autoScroll.userScrolled(), - visibleUserMessages().length, - ] as const, - ([id, ready, start, more, loading, scrolled]) => { - if (!id || !ready || loading || scrolled) return - if (start <= 0 && !more) return - fill() - }, - { defer: true }, - ), - ) - - const draft = (id: string) => - extractPromptFromParts(sync.data.part[id] ?? [], { - directory: sdk.directory, - attachmentName: language.t("common.attachment"), - }) - - const line = (id: string) => { - const text = draft(id) - .map((part) => (part.type === "image" ? `[image:${part.filename}]` : part.content)) - .join("") - .replace(/\s+/g, " ") - .trim() - if (text) return text - return `[${language.t("common.attachment")}]` - } - - const fail = (err: unknown) => { - showToast({ - variant: "error", - title: language.t("common.requestFailed"), - description: formatServerError(err, language.t), - }) - } - - const merge = (next: NonNullable>) => - sync.set("session", (list) => { - const idx = list.findIndex((item) => item.id === next.id) - if (idx < 0) return list - const out = list.slice() - out[idx] = next - return out - }) - - const roll = (sessionID: string, next: NonNullable>["revert"]) => - sync.set("session", (list) => { - const idx = list.findIndex((item) => item.id === sessionID) - if (idx < 0) return list - const out = list.slice() - out[idx] = { ...out[idx], revert: next } - return out - }) - - const busy = (sessionID: string) => sync.data.session_working(sessionID) - - const queuedFollowups = createMemo(() => { - const id = params.id - if (!id) return emptyFollowups - return followup.items[id] ?? emptyFollowups - }) - - const editingFollowup = createMemo(() => { - const id = params.id - if (!id) return - return followup.edit[id] - }) - - const followupMutation = useMutation(() => ({ - mutationFn: async (input: { sessionID: string; id: string; manual?: boolean }) => { - const item = (followup.items[input.sessionID] ?? []).find((entry) => entry.id === input.id) - if (!item) return - - if (input.manual) setFollowup("paused", input.sessionID, undefined) - setFollowup("failed", input.sessionID, undefined) - - const ok = await sendFollowupDraft({ - client: sdk.client, - sync, - globalSync, - draft: item, - optimisticBusy: item.sessionDirectory === sdk.directory, - }).catch((err) => { - setFollowup("failed", input.sessionID, input.id) - fail(err) - return false - }) - if (!ok) return - - setFollowup("items", input.sessionID, (items) => (items ?? []).filter((entry) => entry.id !== input.id)) - if (input.manual) resumeScroll() - }, - })) - - const followupBusy = (sessionID: string) => - followupMutation.isPending && followupMutation.variables?.sessionID === sessionID - - const sendingFollowup = createMemo(() => { - const id = params.id - if (!id) return - if (!followupBusy(id)) return - return followupMutation.variables?.id - }) - - const queueEnabled = createMemo(() => { - const id = params.id - if (!id) return false - return settings.general.followup() === "queue" && busy(id) && !composer.blocked() && !isChildSession() - }) - - const followupText = (item: FollowupDraft) => { - const text = item.prompt - .map((part) => { - if (part.type === "image") return `[image:${part.filename}]` - if (part.type === "file") return `[file:${part.path}]` - if (part.type === "agent") return `@${part.name}` - return part.content - }) - .join("") - .split(/\r?\n/) - .map((line) => line.trim()) - .find((line) => !!line) - - if (text) return text - return `[${language.t("common.attachment")}]` - } - - const queueFollowup = (draft: FollowupDraft) => { - setFollowup("items", draft.sessionID, (items) => [ - ...(items ?? []), - { id: Identifier.ascending("message"), ...draft }, - ]) - setFollowup("failed", draft.sessionID, undefined) - setFollowup("paused", draft.sessionID, undefined) - } - - const followupDock = createMemo(() => queuedFollowups().map((item) => ({ id: item.id, text: followupText(item) }))) - - const sendFollowup = (sessionID: string, id: string, opts?: { manual?: boolean }) => { - if (sync.session.get(sessionID)?.parentID) return Promise.resolve() - const item = (followup.items[sessionID] ?? []).find((entry) => entry.id === id) - if (!item) return Promise.resolve() - if (followupBusy(sessionID)) return Promise.resolve() - - return followupMutation.mutateAsync({ sessionID, id, manual: opts?.manual }) - } - - const editFollowup = (id: string) => { - const sessionID = params.id - if (!sessionID) return - if (followupBusy(sessionID)) return - - const item = queuedFollowups().find((entry) => entry.id === id) - if (!item) return - - setFollowup("items", sessionID, (items) => (items ?? []).filter((entry) => entry.id !== id)) - setFollowup("failed", sessionID, (value) => (value === id ? undefined : value)) - setFollowup("edit", sessionID, { - id: item.id, - prompt: item.prompt, - context: item.context, - }) - } - - const clearFollowupEdit = () => { - const id = params.id - if (!id) return - setFollowup("edit", id, undefined) - } - - const halt = (sessionID: string) => - busy(sessionID) ? sdk.client.session.abort({ sessionID }).catch(() => {}) : Promise.resolve() - - const revertMutation = useMutation(() => ({ - mutationFn: async (input: { sessionID: string; messageID: string }) => { - const prev = prompt.current().slice() - const last = info()?.revert - const value = draft(input.messageID) - batch(() => { - roll(input.sessionID, { messageID: input.messageID }) - prompt.set(value) - }) - await halt(input.sessionID) - .then(() => sdk.client.session.revert(input)) - .then((result) => { - if (result.data) merge(result.data) - }) - .catch((err) => { - batch(() => { - roll(input.sessionID, last) - prompt.set(prev) - }) - fail(err) - }) - }, - })) - - const restoreMutation = useMutation(() => ({ - mutationFn: async (id: string) => { - const sessionID = params.id - if (!sessionID) return - - const next = userMessages().find((item) => item.id > id) - const prev = prompt.current().slice() - const last = info()?.revert - - batch(() => { - roll(sessionID, next ? { messageID: next.id } : undefined) - if (next) { - prompt.set(draft(next.id)) - return - } - prompt.reset() - }) - - const task = !next - ? halt(sessionID).then(() => sdk.client.session.unrevert({ sessionID })) - : halt(sessionID).then(() => - sdk.client.session.revert({ - sessionID, - messageID: next.id, - }), - ) - - await task - .then((result) => { - if (result.data) merge(result.data) - }) - .catch((err) => { - batch(() => { - roll(sessionID, last) - prompt.set(prev) - }) - fail(err) - }) - }, - })) - - const reverting = createMemo(() => revertMutation.isPending || restoreMutation.isPending) - const restoring = createMemo(() => (restoreMutation.isPending ? restoreMutation.variables : undefined)) - - const revert = (input: { sessionID: string; messageID: string }) => { - if (reverting()) return - return revertMutation.mutateAsync(input) - } - - const restore = (id: string) => { - if (!params.id || reverting()) return - return restoreMutation.mutateAsync(id) - } - - const rolled = createMemo(() => { - const id = revertMessageID() - if (!id) return [] - return userMessages() - .filter((item) => item.id >= id) - .map((item) => ({ id: item.id, text: line(item.id) })) - }) - - const actions = { revert } - - createEffect(() => { - const sessionID = params.id - if (!sessionID) return - - const item = queuedFollowups()[0] - if (!item) return - if (followupBusy(sessionID)) return - if (followup.failed[sessionID] === item.id) return - if (followup.paused[sessionID]) return - if (isChildSession()) return - if (composer.blocked()) return - if (busy(sessionID)) return - - void sendFollowup(sessionID, item.id) - }) - - createResizeObserver( - () => promptDock, - ({ height }) => { - const next = Math.ceil(height) - - if (next === dockHeight) return - - const el = scroller - const delta = next - dockHeight - const stick = el - ? !autoScroll.userScrolled() || el.scrollHeight - el.clientHeight - el.scrollTop < 10 + Math.max(0, delta) - : false - - dockHeight = next - - if (stick) autoScroll.forceScrollToBottom() - - if (el) scheduleScrollState(el) - fill() - }, - ) - - const { clearMessageHash, scrollToMessage } = useSessionHashScroll({ - sessionKey, - sessionID: () => params.id, - messagesReady, - visibleUserMessages, - historyMore, - historyLoading, - loadMore: (sessionID) => sync.session.history.loadMore(sessionID), - turnStart: historyWindow.turnStart, - currentMessageId: () => store.messageId, - pendingMessage: () => ui.pendingMessage, - setPendingMessage: (value) => setUi("pendingMessage", value), - setActiveMessage, - setTurnStart: historyWindow.setTurnStart, - autoScroll, - scroller: () => scroller, - anchor, - scheduleScrollState, - consumePendingMessage: layout.pendingMessage.consume, - }) - - createEffect( - on( - () => params.id, - (id) => { - if (!id) requestAnimationFrame(() => inputRef?.focus()) - }, - ), - ) - - onMount(() => { - makeEventListener(document, "keydown", handleKeyDown) - }) - - onCleanup(() => { - if (reviewFrame !== undefined) cancelAnimationFrame(reviewFrame) - if (refreshFrame !== undefined) cancelAnimationFrame(refreshFrame) - if (refreshTimer !== undefined) window.clearTimeout(refreshTimer) - if (todoFrame !== undefined) cancelAnimationFrame(todoFrame) - if (todoTimer !== undefined) window.clearTimeout(todoTimer) - if (diffFrame !== undefined) cancelAnimationFrame(diffFrame) - if (diffTimer !== undefined) window.clearTimeout(diffTimer) - if (scrollStateFrame !== undefined) cancelAnimationFrame(scrollStateFrame) - if (fillFrame !== undefined) cancelAnimationFrame(fillFrame) - }) - - return ( -
- {sessionSync() ?? ""} - -
- - - - setStore("mobileTab", "session")} - > - {language.t("session.tab.session")} - - setStore("mobileTab", "changes")} - > - {hasReview() - ? language.t("session.review.filesChanged", { count: reviewCount() }) - : language.t("session.review.change.other")} - - - - - - {/* Session panel */} -
-
- - - - { - content = el - autoScroll.contentRef(el) - - const root = scroller - if (root) scheduleScrollState(root) - }} - turnStart={historyWindow.turnStart()} - historyMore={historyMore()} - historyLoading={historyLoading()} - onLoadEarlier={() => { - void historyWindow.loadAndReveal() - }} - renderedUserMessages={historyWindow.renderedUserMessages()} - anchor={anchor} - /> - - - - - - -
- - { - inputRef = el - }} - newSessionWorktree={newSessionWorktree()} - onNewSessionWorktreeReset={() => setStore("newSessionWorktree", "main")} - onSubmit={() => { - comments.clear() - resumeScroll() - }} - onResponseSubmit={resumeScroll} - followup={ - params.id && !isChildSession() - ? { - queue: queueEnabled, - items: followupDock(), - sending: sendingFollowup(), - edit: editingFollowup(), - onQueue: queueFollowup, - onAbort: () => { - const id = params.id - if (!id) return - setFollowup("paused", id, true) - }, - onSend: (id) => { - void sendFollowup(params.id!, id, { manual: true }) - }, - onEdit: editFollowup, - onEditLoaded: clearFollowupEdit, - } - : undefined - } - revert={ - rolled().length > 0 - ? { - items: rolled(), - restoring: restoring(), - disabled: reverting(), - onRestore: restore, - } - : undefined - } - setPromptDockRef={(el) => { - promptDock = el - }} - /> - - -
size.start()}> - { - size.touch() - layout.session.resize(width) - }} - /> -
-
-
- - -
- - -
- ) -} diff --git a/packages/app/src/pages/session/message-timeline.tsx b/packages/app/src/pages/session/message-timeline.tsx deleted file mode 100644 index 8bbaafb4e433..000000000000 --- a/packages/app/src/pages/session/message-timeline.tsx +++ /dev/null @@ -1,1118 +0,0 @@ -import { For, createEffect, createMemo, on, onCleanup, Show, Index, type JSX, createSignal } from "solid-js" -import { createStore, produce } from "solid-js/store" -import { useNavigate } from "@solidjs/router" -import { useMutation } from "@tanstack/solid-query" -import { Button } from "@opencode-ai/ui/button" -import { FileIcon } from "@opencode-ai/ui/file-icon" -import { Icon } from "@opencode-ai/ui/icon" -import { IconButton } from "@opencode-ai/ui/icon-button" -import { DropdownMenu } from "@opencode-ai/ui/dropdown-menu" -import { Dialog } from "@opencode-ai/ui/dialog" -import { InlineInput } from "@opencode-ai/ui/inline-input" -import { Spinner } from "@opencode-ai/ui/spinner" -import { SessionTurn } from "@opencode-ai/ui/session-turn" -import { ScrollView } from "@opencode-ai/ui/scroll-view" -import { TextField } from "@opencode-ai/ui/text-field" -import type { AssistantMessage, Message as MessageType, Part, TextPart, UserMessage } from "@opencode-ai/sdk/v2" -import { showToast } from "@opencode-ai/ui/toast" -import { Binary } from "@opencode-ai/core/util/binary" -import { getFilename } from "@opencode-ai/core/util/path" -import { Popover as KobaltePopover } from "@kobalte/core/popover" -import { shouldMarkBoundaryGesture, normalizeWheelDelta } from "@/pages/session/message-gesture" -import { SessionContextUsage } from "@/components/session-context-usage" -import { useDialog } from "@opencode-ai/ui/context/dialog" -import { createResizeObserver } from "@solid-primitives/resize-observer" -import { useLanguage } from "@/context/language" -import { useSessionKey } from "@/pages/session/session-layout" -import { useGlobalSDK } from "@/context/global-sdk" -import { usePlatform } from "@/context/platform" -import { useSettings } from "@/context/settings" -import { useSDK } from "@/context/sdk" -import { useSync } from "@/context/sync" -import { messageAgentColor } from "@/utils/agent" -import { sessionTitle } from "@/utils/session-title" -import { parseCommentNote, readCommentMetadata } from "@/utils/comment-note" -import { makeTimer } from "@solid-primitives/timer" - -type MessageComment = { - path: string - comment: string - selection?: { - startLine: number - endLine: number - } -} - -const emptyMessages: MessageType[] = [] -const idle = { type: "idle" as const } -type UserActions = { - fork?: (input: { sessionID: string; messageID: string }) => Promise | void - revert?: (input: { sessionID: string; messageID: string }) => Promise | void -} - -const messageComments = (parts: Part[]): MessageComment[] => - parts.flatMap((part) => { - if (part.type !== "text" || !(part as TextPart).synthetic) return [] - const next = readCommentMetadata(part.metadata) ?? parseCommentNote(part.text) - if (!next) return [] - return [ - { - path: next.path, - comment: next.comment, - selection: next.selection - ? { - startLine: next.selection.startLine, - endLine: next.selection.endLine, - } - : undefined, - }, - ] - }) - -const taskDescription = (part: Part, sessionID: string) => { - if (part.type !== "tool" || part.tool !== "task") return - const metadata = "metadata" in part.state ? part.state.metadata : undefined - if (metadata?.sessionId !== sessionID) return - const value = part.state.input?.description - if (typeof value === "string" && value) return value -} - -const pace = (width: number) => Math.round(Math.max(1200, Math.min(3200, (Math.max(width, 360) * 2000) / 900))) - -const boundaryTarget = (root: HTMLElement, target: EventTarget | null) => { - const current = target instanceof Element ? target : undefined - const nested = current?.closest("[data-scrollable]") - if (!nested || nested === root) return root - if (!(nested instanceof HTMLElement)) return root - return nested -} - -const markBoundaryGesture = (input: { - root: HTMLDivElement - target: EventTarget | null - delta: number - onMarkScrollGesture: (target?: EventTarget | null) => void -}) => { - const target = boundaryTarget(input.root, input.target) - if (target === input.root) { - input.onMarkScrollGesture(input.root) - return - } - if ( - shouldMarkBoundaryGesture({ - delta: input.delta, - scrollTop: target.scrollTop, - scrollHeight: target.scrollHeight, - clientHeight: target.clientHeight, - }) - ) { - input.onMarkScrollGesture(input.root) - } -} - -type StageConfig = { - init: number - batch: number -} - -type TimelineStageInput = { - sessionKey: () => string - turnStart: () => number - messages: () => UserMessage[] - config: StageConfig -} - -/** - * Defer-mounts small timeline windows so revealing older turns does not - * block first paint with a large DOM mount. - * - * Once staging completes for a session it never re-stages — backfill and - * new messages render immediately. - */ -function createTimelineStaging(input: TimelineStageInput) { - const [state, setState] = createStore({ - activeSession: "", - completedSession: "", - count: 0, - }) - - const stagedCount = createMemo(() => { - const total = input.messages().length - if (input.turnStart() <= 0) return total - if (state.completedSession === input.sessionKey()) return total - const init = Math.min(total, input.config.init) - if (state.count <= init) return init - if (state.count >= total) return total - return state.count - }) - - const stagedUserMessages = createMemo(() => { - const list = input.messages() - const count = stagedCount() - if (count >= list.length) return list - return list.slice(Math.max(0, list.length - count)) - }) - - let frame: number | undefined - const cancel = () => { - if (frame === undefined) return - cancelAnimationFrame(frame) - frame = undefined - } - - createEffect( - on( - () => [input.sessionKey(), input.turnStart() > 0, input.messages().length] as const, - ([sessionKey, isWindowed, total]) => { - cancel() - const shouldStage = - isWindowed && - total > input.config.init && - state.completedSession !== sessionKey && - state.activeSession !== sessionKey - if (!shouldStage) { - setState({ activeSession: "", count: total }) - return - } - - let count = Math.min(total, input.config.init) - setState({ activeSession: sessionKey, count }) - - const step = () => { - if (input.sessionKey() !== sessionKey) { - frame = undefined - return - } - const currentTotal = input.messages().length - count = Math.min(currentTotal, count + input.config.batch) - setState("count", count) - if (count >= currentTotal) { - setState({ completedSession: sessionKey, activeSession: "" }) - frame = undefined - return - } - frame = requestAnimationFrame(step) - } - frame = requestAnimationFrame(step) - }, - ), - ) - - const isStaging = createMemo(() => { - const key = input.sessionKey() - return state.activeSession === key && state.completedSession !== key - }) - - onCleanup(cancel) - return { messages: stagedUserMessages, isStaging } -} - -export function MessageTimeline(props: { - mobileChanges: boolean - mobileFallback: JSX.Element - actions?: UserActions - scroll: { overflow: boolean; bottom: boolean; jump: boolean } - onResumeScroll: () => void - setScrollRef: (el: HTMLDivElement | undefined) => void - onScheduleScrollState: (el: HTMLDivElement) => void - onAutoScrollHandleScroll: () => void - onMarkScrollGesture: (target?: EventTarget | null) => void - hasScrollGesture: () => boolean - onUserScroll: () => void - onTurnBackfillScroll: () => void - onAutoScrollInteraction: (event: MouseEvent) => void - centered: boolean - setContentRef: (el: HTMLDivElement) => void - turnStart: number - historyMore: boolean - historyLoading: boolean - onLoadEarlier: () => void - renderedUserMessages: UserMessage[] - anchor: (id: string) => string -}) { - let touchGesture: number | undefined - - const navigate = useNavigate() - const globalSDK = useGlobalSDK() - const sdk = useSDK() - const sync = useSync() - const settings = useSettings() - const dialog = useDialog() - const language = useLanguage() - const { params, sessionKey } = useSessionKey() - const platform = usePlatform() - - const rendered = createMemo(() => props.renderedUserMessages.map((message) => message.id)) - const sessionID = createMemo(() => params.id) - const sessionMessages = createMemo(() => { - const id = sessionID() - if (!id) return emptyMessages - return sync.data.message[id] ?? emptyMessages - }) - const pending = createMemo(() => - sessionMessages().findLast( - (item): item is AssistantMessage => item.role === "assistant" && typeof item.time.completed !== "number", - ), - ) - const sessionStatus = createMemo(() => { - const id = sessionID() - if (!id) return idle - return sync.data.session_status[id] ?? idle - }) - const working = createMemo(() => sessionStatus().type !== "idle") - const tint = createMemo(() => messageAgentColor(sessionMessages(), sync.data.agent)) - - const [timeoutDone, setTimeoutDone] = createSignal(true) - - const workingStatus = createMemo<"hidden" | "showing" | "hiding">((prev) => { - if (working()) return "showing" - if (prev === "showing" || !timeoutDone()) return "hiding" - return "hidden" - }) - - createEffect(() => { - if (workingStatus() !== "hiding") return - - setTimeoutDone(false) - makeTimer(() => setTimeoutDone(true), 260, setTimeout) - }) - - const activeMessageID = createMemo(() => { - const parentID = pending()?.parentID - if (parentID) { - const messages = sessionMessages() - const result = Binary.search(messages, parentID, (message) => message.id) - const message = result.found ? messages[result.index] : messages.find((item) => item.id === parentID) - if (message && message.role === "user") return message.id - } - - const status = sessionStatus() - if (status.type !== "idle") { - const messages = sessionMessages() - for (let i = messages.length - 1; i >= 0; i--) { - if (messages[i].role === "user") return messages[i].id - } - } - - return undefined - }) - const info = createMemo(() => { - const id = sessionID() - if (!id) return - return sync.session.get(id) - }) - const titleValue = createMemo(() => info()?.title) - const titleLabel = createMemo(() => sessionTitle(titleValue())) - const shareUrl = createMemo(() => info()?.share?.url) - const shareEnabled = createMemo(() => sync.data.config.share !== "disabled") - const parentID = createMemo(() => info()?.parentID) - const parent = createMemo(() => { - const id = parentID() - if (!id) return - return sync.session.get(id) - }) - const parentMessages = createMemo(() => { - const id = parentID() - if (!id) return emptyMessages - return sync.data.message[id] ?? emptyMessages - }) - const parentTitle = createMemo(() => sessionTitle(parent()?.title) ?? language.t("command.session.new")) - const childTaskDescription = createMemo(() => { - const id = sessionID() - if (!id) return - return parentMessages() - .flatMap((message) => sync.data.part[message.id] ?? []) - .map((part) => taskDescription(part, id)) - .findLast((value): value is string => !!value) - }) - const childTitle = createMemo(() => { - if (!parentID()) return titleLabel() ?? "" - if (childTaskDescription()) return childTaskDescription() - const value = titleLabel()?.replace(/\s+\(@[^)]+ subagent\)$/, "") - if (value) return value - return language.t("command.session.new") - }) - const showHeader = createMemo(() => !!(titleValue() || parentID())) - const stageCfg = { init: 1, batch: 3 } - const staging = createTimelineStaging({ - sessionKey, - turnStart: () => props.turnStart, - messages: () => props.renderedUserMessages, - config: stageCfg, - }) - - const [title, setTitle] = createStore({ - draft: "", - editing: false, - menuOpen: false, - pendingRename: false, - pendingShare: false, - }) - let titleRef: HTMLInputElement | undefined - - const [share, setShare] = createStore({ - open: false, - dismiss: null as "escape" | "outside" | null, - }) - const [bar, setBar] = createStore({ - ms: pace(640), - }) - - let more: HTMLButtonElement | undefined - let head: HTMLDivElement | undefined - - createResizeObserver( - () => head, - () => { - if (!head || head.clientWidth <= 0) return - setBar("ms", pace(head.clientWidth)) - }, - ) - - const viewShare = () => { - const url = shareUrl() - if (!url) return - platform.openLink(url) - } - - const errorMessage = (err: unknown) => { - if (err && typeof err === "object" && "data" in err) { - const data = (err as { data?: { message?: string } }).data - if (data?.message) return data.message - } - if (err instanceof Error) return err.message - return language.t("common.requestFailed") - } - - const shareMutation = useMutation(() => ({ - mutationFn: (id: string) => globalSDK.client.session.share({ sessionID: id, directory: sdk.directory }), - onError: (err) => { - console.error("Failed to share session", err) - }, - })) - - const unshareMutation = useMutation(() => ({ - mutationFn: (id: string) => globalSDK.client.session.unshare({ sessionID: id, directory: sdk.directory }), - onError: (err) => { - console.error("Failed to unshare session", err) - }, - })) - - const titleMutation = useMutation(() => ({ - mutationFn: (input: { id: string; title: string }) => - sdk.client.session.update({ sessionID: input.id, title: input.title }), - onSuccess: (_, input) => { - sync.set( - produce((draft) => { - const index = draft.session.findIndex((s) => s.id === input.id) - if (index !== -1) draft.session[index].title = input.title - }), - ) - setTitle("editing", false) - }, - onError: (err) => { - showToast({ - title: language.t("common.requestFailed"), - description: errorMessage(err), - }) - }, - })) - - const shareSession = () => { - const id = sessionID() - if (!id || shareMutation.isPending) return - if (!shareEnabled()) return - shareMutation.mutate(id) - } - - const unshareSession = () => { - const id = sessionID() - if (!id || unshareMutation.isPending) return - if (!shareEnabled()) return - unshareMutation.mutate(id) - } - - createEffect( - on( - sessionKey, - () => - setTitle({ - draft: "", - editing: false, - menuOpen: false, - pendingRename: false, - pendingShare: false, - }), - { defer: true }, - ), - ) - - createEffect( - on( - () => [parentID(), childTaskDescription()] as const, - ([id, description]) => { - if (!id || description) return - if (sync.data.message[id] !== undefined) return - void sync.session.sync(id) - }, - { defer: true }, - ), - ) - - const openTitleEditor = () => { - if (!sessionID() || parentID()) return - setTitle({ editing: true, draft: titleLabel() ?? "" }) - requestAnimationFrame(() => { - titleRef?.focus() - titleRef?.select() - }) - } - - const closeTitleEditor = () => { - if (titleMutation.isPending) return - setTitle("editing", false) - } - - const saveTitleEditor = () => { - const id = sessionID() - if (!id) return - if (titleMutation.isPending) return - - const next = title.draft.trim() - if (!next || next === (titleLabel() ?? "")) { - setTitle("editing", false) - return - } - - titleMutation.mutate({ id, title: next }) - } - - const navigateAfterSessionRemoval = (sessionID: string, parentID?: string, nextSessionID?: string) => { - if (params.id !== sessionID) return - if (parentID) { - navigate(`/${params.dir}/session/${parentID}`) - return - } - if (nextSessionID) { - navigate(`/${params.dir}/session/${nextSessionID}`) - return - } - navigate(`/${params.dir}/session`) - } - - const archiveSession = async (sessionID: string) => { - const session = sync.session.get(sessionID) - if (!session) return - - const sessions = sync.data.session ?? [] - const index = sessions.findIndex((s) => s.id === sessionID) - const nextSession = index === -1 ? undefined : (sessions[index + 1] ?? sessions[index - 1]) - - await sdk.client.session - .update({ sessionID, time: { archived: Date.now() } }) - .then(() => { - sync.set( - produce((draft) => { - const index = draft.session.findIndex((s) => s.id === sessionID) - if (index !== -1) draft.session.splice(index, 1) - }), - ) - navigateAfterSessionRemoval(sessionID, session.parentID, nextSession?.id) - }) - .catch((err) => { - showToast({ - title: language.t("common.requestFailed"), - description: errorMessage(err), - }) - }) - } - - const deleteSession = async (sessionID: string) => { - const session = sync.session.get(sessionID) - if (!session) return false - - const sessions = (sync.data.session ?? []).filter((s) => !s.parentID && !s.time?.archived) - const index = sessions.findIndex((s) => s.id === sessionID) - const nextSession = index === -1 ? undefined : (sessions[index + 1] ?? sessions[index - 1]) - - const result = await sdk.client.session - .delete({ sessionID }) - .then((x) => x.data) - .catch((err) => { - showToast({ - title: language.t("session.delete.failed.title"), - description: errorMessage(err), - }) - return false - }) - - if (!result) return false - - sync.set( - produce((draft) => { - const removed = new Set([sessionID]) - - const byParent = new Map() - for (const item of draft.session) { - const parentID = item.parentID - if (!parentID) continue - const existing = byParent.get(parentID) - if (existing) { - existing.push(item.id) - continue - } - byParent.set(parentID, [item.id]) - } - - const stack = [sessionID] - while (stack.length) { - const parentID = stack.pop() - if (!parentID) continue - - const children = byParent.get(parentID) - if (!children) continue - - for (const child of children) { - if (removed.has(child)) continue - removed.add(child) - stack.push(child) - } - } - - draft.session = draft.session.filter((s) => !removed.has(s.id)) - }), - ) - - navigateAfterSessionRemoval(sessionID, session.parentID, nextSession?.id) - return true - } - - const navigateParent = () => { - const id = parentID() - if (!id) return - navigate(`/${params.dir}/session/${id}`) - } - - function DialogDeleteSession(props: { sessionID: string }) { - const name = createMemo( - () => sessionTitle(sync.session.get(props.sessionID)?.title) ?? language.t("command.session.new"), - ) - const handleDelete = async () => { - await deleteSession(props.sessionID) - dialog.close() - } - - return ( - -
-
- - {language.t("session.delete.confirm", { name: name() })} - -
-
- - -
-
-
- ) - } - - return ( - {props.mobileFallback}
} - > -
-
- -
- { - const root = e.currentTarget - const delta = normalizeWheelDelta({ - deltaY: e.deltaY, - deltaMode: e.deltaMode, - rootHeight: root.clientHeight, - }) - if (!delta) return - markBoundaryGesture({ root, target: e.target, delta, onMarkScrollGesture: props.onMarkScrollGesture }) - }} - onTouchStart={(e) => { - touchGesture = e.touches[0]?.clientY - }} - onTouchMove={(e) => { - const next = e.touches[0]?.clientY - const prev = touchGesture - touchGesture = next - if (next === undefined || prev === undefined) return - - const delta = prev - next - if (!delta) return - - const root = e.currentTarget - markBoundaryGesture({ root, target: e.target, delta, onMarkScrollGesture: props.onMarkScrollGesture }) - }} - onTouchEnd={() => { - touchGesture = undefined - }} - onTouchCancel={() => { - touchGesture = undefined - }} - onPointerDown={(e) => { - if (e.target !== e.currentTarget) return - props.onMarkScrollGesture(e.currentTarget) - }} - onScroll={(e) => { - props.onScheduleScrollState(e.currentTarget) - props.onTurnBackfillScroll() - if (!props.hasScrollGesture()) return - props.onUserScroll() - props.onAutoScrollHandleScroll() - props.onMarkScrollGesture(e.currentTarget) - }} - onClick={props.onAutoScrollInteraction} - class="relative min-w-0 w-full h-full" - style={{ - "--session-title-height": showHeader() ? "40px" : "0px", - "--sticky-accordion-top": showHeader() ? "48px" : "0px", - }} - > -
- -
{ - head = el - setBar("ms", pace(el.clientWidth)) - }} - data-session-title - classList={{ - "sticky top-0 z-30 bg-[linear-gradient(to_bottom,var(--background-stronger)_48px,transparent)]": true, - relative: true, - "w-full": true, - "pb-4": true, - "pl-2 pr-3 md:pl-4 md:pr-3": true, - "md:max-w-200 md:mx-auto 2xl:max-w-[1000px]": props.centered, - }} - > - - - -
- 0 || props.historyMore}> -
- -
-
- - {(messageID) => { - const active = createMemo(() => activeMessageID() === messageID) - const comments = createMemo(() => messageComments(sync.data.part[messageID] ?? []), [], { - equals: (a, b) => - a.length === b.length && - a.every( - (c, i) => - c.path === b[i].path && - c.comment === b[i].comment && - c.selection?.startLine === b[i].selection?.startLine && - c.selection?.endLine === b[i].selection?.endLine, - ), - }) - const commentCount = createMemo(() => comments().length) - return ( -
- 0}> -
-
-
- - {(commentAccessor: () => MessageComment) => { - const comment = createMemo(() => commentAccessor()) - return ( - - {(c) => ( -
-
- - {getFilename(c().path)} - - {(selection) => ( - - {selection().startLine === selection().endLine - ? `:${selection().startLine}` - : `:${selection().startLine}-${selection().endLine}`} - - )} - -
-
- {c().comment} -
-
- )} -
- ) - }} -
-
-
-
-
- -
- ) - }} -
-
-
- -
- - ) -} diff --git a/packages/console/app/src/asset/brand/opencode-logo-dark.svg b/packages/console/app/src/asset/brand/opencode-logo-dark.svg index c28babff1be1..790fbc494fe5 100644 --- a/packages/console/app/src/asset/brand/opencode-logo-dark.svg +++ b/packages/console/app/src/asset/brand/opencode-logo-dark.svg @@ -1,16 +1 @@ - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/go-ornate-dark.svg b/packages/console/app/src/asset/go-ornate-dark.svg index 9b617c6777f0..ce7686e1f3ef 100644 --- a/packages/console/app/src/asset/go-ornate-dark.svg +++ b/packages/console/app/src/asset/go-ornate-dark.svg @@ -1,6 +1 @@ - - - - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/go-ornate-light.svg b/packages/console/app/src/asset/go-ornate-light.svg index 79991973d6d9..a96688ba1d59 100644 --- a/packages/console/app/src/asset/go-ornate-light.svg +++ b/packages/console/app/src/asset/go-ornate-light.svg @@ -1,6 +1 @@ - - - - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/lander/check.svg b/packages/console/app/src/asset/lander/check.svg index 0ac7759ea56c..0a68876455e2 100644 --- a/packages/console/app/src/asset/lander/check.svg +++ b/packages/console/app/src/asset/lander/check.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/lander/copy.svg b/packages/console/app/src/asset/lander/copy.svg index e2263279e5ea..727e5ff384f5 100644 --- a/packages/console/app/src/asset/lander/copy.svg +++ b/packages/console/app/src/asset/lander/copy.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/lander/opencode-logo-dark.svg b/packages/console/app/src/asset/lander/opencode-logo-dark.svg index 154000aaa585..07efb360eba0 100644 --- a/packages/console/app/src/asset/lander/opencode-logo-dark.svg +++ b/packages/console/app/src/asset/lander/opencode-logo-dark.svg @@ -1,11 +1 @@ - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/lander/wordmark-dark.svg b/packages/console/app/src/asset/lander/wordmark-dark.svg index 42f8e22a6dc7..aee42d41b16f 100644 --- a/packages/console/app/src/asset/lander/wordmark-dark.svg +++ b/packages/console/app/src/asset/lander/wordmark-dark.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/logo-ornate-dark.svg b/packages/console/app/src/asset/logo-ornate-dark.svg index a1582732423a..9c8e076d266c 100644 --- a/packages/console/app/src/asset/logo-ornate-dark.svg +++ b/packages/console/app/src/asset/logo-ornate-dark.svg @@ -1,18 +1 @@ - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/logo-ornate-light.svg b/packages/console/app/src/asset/logo-ornate-light.svg index 2a856dccefe8..6e3e80c6a2ed 100644 --- a/packages/console/app/src/asset/logo-ornate-light.svg +++ b/packages/console/app/src/asset/logo-ornate-light.svg @@ -1,18 +1 @@ - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/console/app/src/asset/logo.svg b/packages/console/app/src/asset/logo.svg index 2a856dccefe8..6e3e80c6a2ed 100644 --- a/packages/console/app/src/asset/logo.svg +++ b/packages/console/app/src/asset/logo.svg @@ -1,18 +1 @@ - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/docs/favicon-v3.svg b/packages/docs/favicon-v3.svg index b785c738bf17..595c77f3a150 100644 --- a/packages/docs/favicon-v3.svg +++ b/packages/docs/favicon-v3.svg @@ -1,19 +1 @@ - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/docs/favicon.svg b/packages/docs/favicon.svg index b785c738bf17..595c77f3a150 100644 --- a/packages/docs/favicon.svg +++ b/packages/docs/favicon.svg @@ -1,19 +1 @@ - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/docs/logo/dark.svg b/packages/docs/logo/dark.svg index 8b343cd6fc90..8b0ec5562f16 100644 --- a/packages/docs/logo/dark.svg +++ b/packages/docs/logo/dark.svg @@ -1,21 +1 @@ - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/docs/logo/light.svg b/packages/docs/logo/light.svg index 03e62bf1d9fc..a9cfe5450db5 100644 --- a/packages/docs/logo/light.svg +++ b/packages/docs/logo/light.svg @@ -1,21 +1 @@ - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/packages/extensions/zed/icons/opencode.svg b/packages/extensions/zed/icons/opencode.svg index fc001e49b5c6..7868bb73d36f 100644 --- a/packages/extensions/zed/icons/opencode.svg +++ b/packages/extensions/zed/icons/opencode.svg @@ -1,3 +1 @@ - - - + \ No newline at end of file diff --git a/packages/identity/mark-light.svg b/packages/identity/mark-light.svg index ac619f1b2ff2..19a2b29408e0 100644 --- a/packages/identity/mark-light.svg +++ b/packages/identity/mark-light.svg @@ -1,5 +1 @@ - - - - - + \ No newline at end of file diff --git a/packages/identity/mark.svg b/packages/identity/mark.svg index 157edc4d7522..7e00cac5fafb 100644 --- a/packages/identity/mark.svg +++ b/packages/identity/mark.svg @@ -1,7 +1 @@ - - - - - \ No newline at end of file + \ No newline at end of file diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts deleted file mode 100644 index 545e48e64d9f..000000000000 --- a/packages/opencode/src/config/config.ts +++ /dev/null @@ -1,834 +0,0 @@ -import * as Log from "@opencode-ai/core/util/log" -import path from "path" -import { pathToFileURL } from "url" -import os from "os" -import { mergeDeep } from "remeda" -import { Global } from "@opencode-ai/core/global" -import fsNode from "fs/promises" -import { NamedError } from "@opencode-ai/core/util/error" -import { Flag } from "@opencode-ai/core/flag/flag" -import { Auth } from "../auth" -import { Env } from "../env" -import { applyEdits, modify } from "jsonc-parser" -import { type InstanceContext } from "../project/instance" -import { InstallationLocal, InstallationVersion } from "@opencode-ai/core/installation/version" -import { existsSync } from "fs" -import { Account } from "@/account/account" -import { isRecord } from "@/util/record" -import type { ConsoleState } from "./console-state" -import { AppFileSystem } from "@opencode-ai/core/filesystem" -import { InstanceState } from "@/effect/instance-state" -import { Context, Duration, Effect, Exit, Fiber, Layer, Option, Schema } from "effect" -import { EffectFlock } from "@opencode-ai/core/util/effect-flock" -import { containsPath } from "../project/instance-context" -import { NonNegativeInt, PositiveInt, type DeepMutable } from "@opencode-ai/core/schema" -import { ConfigAgent } from "./agent" -import { ConfigAttachment } from "./attachment" -import { ConfigCommand } from "./command" -import { ConfigFormatter } from "./formatter" -import { ConfigLayout } from "./layout" -import { ConfigLSP } from "./lsp" -import { ConfigManaged } from "./managed" -import { ConfigMCP } from "./mcp" -import { ConfigModelID } from "./model-id" -import { ConfigParse } from "./parse" -import { ConfigPaths } from "./paths" -import { ConfigPermission } from "./permission" -import { ConfigPlugin } from "./plugin" -import { ConfigProvider } from "./provider" -import { ConfigReference } from "./reference" -import { ConfigServer } from "./server" -import { ConfigSkills } from "./skills" -import { ConfigVariable } from "./variable" -import { Npm } from "@opencode-ai/core/npm" - -const log = Log.create({ service: "config" }) - -// Custom merge function that concatenates array fields instead of replacing them -// Keep remeda's deep conditional merge type out of hot config-loading paths; TS profiling showed it dominates here. -function mergeConfig(target: Info, source: Info): Info { - return mergeDeep(target, source) as Info -} - -function mergeConfigConcatArrays(target: Info, source: Info): Info { - const merged = mergeConfig(target, source) - if (target.instructions && source.instructions) { - merged.instructions = Array.from(new Set([...target.instructions, ...source.instructions])) - } - return merged -} - -function normalizeLoadedConfig(data: unknown, source: string) { - if (!isRecord(data)) return data - const copy = { ...data } - const hadLegacy = "theme" in copy || "keybinds" in copy || "tui" in copy - if (!hadLegacy) return copy - delete copy.theme - delete copy.keybinds - delete copy.tui - log.warn("tui keys in opencode config are deprecated; move them to tui.json", { path: source }) - return copy -} - -async function substituteWellKnownRemoteConfig(input: { value: unknown; dir: string; source: string }) { - if (!isRecord(input.value) || typeof input.value.url !== "string") return - - const url = await ConfigVariable.substitute({ - text: input.value.url, - type: "virtual", - dir: input.dir, - source: input.source, - }) - const headers = isRecord(input.value.headers) - ? Object.fromEntries( - await Promise.all( - Object.entries(input.value.headers) - .filter((entry): entry is [string, string] => typeof entry[1] === "string") - .map(async ([key, value]) => [ - key, - await ConfigVariable.substitute({ - text: value, - type: "virtual", - dir: input.dir, - source: input.source, - }), - ]), - ), - ) - : undefined - - return { url, headers } -} - -async function resolveLoadedPlugins(config: T, filepath: string) { - if (!config.plugin) return config - for (let i = 0; i < config.plugin.length; i++) { - // Normalize path-like plugin specs while we still know which config file declared them. - // This prevents `./plugin.ts` from being reinterpreted relative to some later merge location. - config.plugin[i] = await ConfigPlugin.resolvePluginSpec(config.plugin[i], filepath) - } - return config -} - -export type Layout = ConfigLayout.Layout - -const LogLevelRef = Schema.Literals(["DEBUG", "INFO", "WARN", "ERROR"]).annotate({ - identifier: "LogLevel", - description: "Log level", -}) - -export const Info = Schema.Struct({ - $schema: Schema.optional(Schema.String).annotate({ - description: "JSON schema reference for configuration validation", - }), - shell: Schema.optional(Schema.String).annotate({ - description: "Default shell to use for terminal and bash tool", - }), - logLevel: Schema.optional(LogLevelRef).annotate({ description: "Log level" }), - server: Schema.optional(ConfigServer.Server).annotate({ - description: "Server configuration for opencode serve and web commands", - }), - command: Schema.optional(Schema.Record(Schema.String, ConfigCommand.Info)).annotate({ - description: "Command configuration, see https://opencode.ai/docs/commands", - }), - skills: Schema.optional(ConfigSkills.Info).annotate({ description: "Additional skill folder paths" }), - reference: Schema.optional(ConfigReference.Info).annotate({ - description: "Named git or local directory references that can be mentioned as @alias or @alias/path", - }), - watcher: Schema.optional( - Schema.Struct({ - ignore: Schema.optional(Schema.mutable(Schema.Array(Schema.String))), - }), - ), - snapshot: Schema.optional(Schema.Boolean).annotate({ - description: - "Enable or disable snapshot tracking. When false, filesystem snapshots are not recorded and undoing or reverting will not undo/redo file changes. Defaults to true.", - }), - // User-facing plugin config is stored as Specs; provenance gets attached later while configs are merged. - plugin: Schema.optional(Schema.mutable(Schema.Array(ConfigPlugin.Spec))), - share: Schema.optional(Schema.Literals(["manual", "auto", "disabled"])).annotate({ - description: - "Control sharing behavior:'manual' allows manual sharing via commands, 'auto' enables automatic sharing, 'disabled' disables all sharing", - }), - autoshare: Schema.optional(Schema.Boolean).annotate({ - description: "@deprecated Use 'share' field instead. Share newly created sessions automatically", - }), - autoupdate: Schema.optional(Schema.Union([Schema.Boolean, Schema.Literal("notify")])).annotate({ - description: - "Automatically update to the latest version. Set to true to auto-update, false to disable, or 'notify' to show update notifications", - }), - disabled_providers: Schema.optional(Schema.mutable(Schema.Array(Schema.String))).annotate({ - description: "Disable providers that are loaded automatically", - }), - enabled_providers: Schema.optional(Schema.mutable(Schema.Array(Schema.String))).annotate({ - description: "When set, ONLY these providers will be enabled. All other providers will be ignored", - }), - model: Schema.optional(ConfigModelID).annotate({ - description: "Model to use in the format of provider/model, eg anthropic/claude-2", - }), - small_model: Schema.optional(ConfigModelID).annotate({ - description: "Small model to use for tasks like title generation in the format of provider/model", - }), - default_agent: Schema.optional(Schema.String).annotate({ - description: - "Default agent to use when none is specified. Must be a primary agent. Falls back to 'build' if not set or if the specified agent is invalid.", - }), - username: Schema.optional(Schema.String).annotate({ - description: "Custom username to display in conversations instead of system username", - }), - mode: Schema.optional( - Schema.StructWithRest( - Schema.Struct({ - build: Schema.optional(ConfigAgent.Info), - plan: Schema.optional(ConfigAgent.Info), - }), - [Schema.Record(Schema.String, ConfigAgent.Info)], - ), - ).annotate({ description: "@deprecated Use `agent` field instead." }), - agent: Schema.optional( - Schema.StructWithRest( - Schema.Struct({ - // primary - plan: Schema.optional(ConfigAgent.Info), - build: Schema.optional(ConfigAgent.Info), - // subagent - general: Schema.optional(ConfigAgent.Info), - explore: Schema.optional(ConfigAgent.Info), - scout: Schema.optional(ConfigAgent.Info), - // specialized - title: Schema.optional(ConfigAgent.Info), - summary: Schema.optional(ConfigAgent.Info), - compaction: Schema.optional(ConfigAgent.Info), - }), - [Schema.Record(Schema.String, ConfigAgent.Info)], - ), - ).annotate({ description: "Agent configuration, see https://opencode.ai/docs/agents" }), - provider: Schema.optional(Schema.Record(Schema.String, ConfigProvider.Info)).annotate({ - description: "Custom provider configurations and model overrides", - }), - mcp: Schema.optional( - Schema.Record( - Schema.String, - Schema.Union([ - ConfigMCP.Info, - // Matches the legacy `{ enabled: false }` form used to disable a server. - Schema.Struct({ enabled: Schema.Boolean }), - ]), - ), - ).annotate({ description: "MCP (Model Context Protocol) server configurations" }), - formatter: Schema.optional(ConfigFormatter.Info).annotate({ - description: - "Enable or configure formatters. Omit or set to false to disable, true to enable built-ins, or an object to enable built-ins with overrides.", - }), - lsp: Schema.optional(ConfigLSP.Info).annotate({ - description: - "Enable or configure LSP servers. Omit or set to false to disable, true to enable built-ins, or an object to enable built-ins with overrides.", - }), - instructions: Schema.optional(Schema.mutable(Schema.Array(Schema.String))).annotate({ - description: "Additional instruction files or patterns to include", - }), - layout: Schema.optional(ConfigLayout.Layout).annotate({ description: "@deprecated Always uses stretch layout." }), - permission: Schema.optional(ConfigPermission.Info), - tools: Schema.optional(Schema.Record(Schema.String, Schema.Boolean)), - attachment: Schema.optional(ConfigAttachment.Info).annotate({ - description: "Attachment processing configuration, including image size limits and resizing behavior", - }), - enterprise: Schema.optional( - Schema.Struct({ - url: Schema.optional(Schema.String).annotate({ description: "Enterprise URL" }), - }), - ), - tool_output: Schema.optional( - Schema.Struct({ - max_lines: Schema.optional(PositiveInt).annotate({ - description: "Maximum lines of tool output before it is truncated and saved to disk (default: 2000)", - }), - max_bytes: Schema.optional(PositiveInt).annotate({ - description: "Maximum bytes of tool output before it is truncated and saved to disk (default: 51200)", - }), - }), - ).annotate({ - description: - "Thresholds for truncating tool output. When output exceeds either limit, the full text is written to the truncation directory and a preview is returned.", - }), - compaction: Schema.optional( - Schema.Struct({ - auto: Schema.optional(Schema.Boolean).annotate({ - description: "Enable automatic compaction when context is full (default: true)", - }), - prune: Schema.optional(Schema.Boolean).annotate({ - description: "Enable pruning of old tool outputs (default: true)", - }), - tail_turns: Schema.optional(NonNegativeInt).annotate({ - description: - "Number of recent user turns, including their following assistant/tool responses, to keep verbatim during compaction (default: 2)", - }), - preserve_recent_tokens: Schema.optional(NonNegativeInt).annotate({ - description: "Maximum number of tokens from recent turns to preserve verbatim after compaction", - }), - reserved: Schema.optional(NonNegativeInt).annotate({ - description: "Token buffer for compaction. Leaves enough window to avoid overflow during compaction.", - }), - }), - ), - experimental: Schema.optional( - Schema.Struct({ - disable_paste_summary: Schema.optional(Schema.Boolean), - batch_tool: Schema.optional(Schema.Boolean).annotate({ description: "Enable the batch tool" }), - openTelemetry: Schema.optional(Schema.Boolean).annotate({ - description: "Enable OpenTelemetry spans for AI SDK calls (using the 'experimental_telemetry' flag)", - }), - primary_tools: Schema.optional(Schema.mutable(Schema.Array(Schema.String))).annotate({ - description: "Tools that should only be available to primary agents.", - }), - continue_loop_on_deny: Schema.optional(Schema.Boolean).annotate({ - description: "Continue the agent loop when a tool call is denied", - }), - mcp_timeout: Schema.optional(PositiveInt).annotate({ - description: "Timeout in milliseconds for model context protocol (MCP) requests", - }), - }), - ), -}).annotate({ identifier: "Config" }) - -// Uses the shared `DeepMutable` from `@opencode-ai/core/schema`. See the definition -// there for why the local variant is needed over `Types.DeepMutable` from -// effect-smol (the upstream version collapses `unknown` to `{}`). -export type Info = DeepMutable> & { - // plugin_origins is derived state, not a persisted config field. It keeps each winning plugin spec together - // with the file and scope it came from so later runtime code can make location-sensitive decisions. - plugin_origins?: ConfigPlugin.Origin[] -} - -type State = { - config: Info - directories: string[] - deps: Fiber.Fiber[] - consoleState: ConsoleState -} - -export interface Interface { - readonly get: () => Effect.Effect - readonly getGlobal: () => Effect.Effect - readonly getConsoleState: () => Effect.Effect - readonly update: (config: Info) => Effect.Effect - readonly updateGlobal: (config: Info) => Effect.Effect<{ info: Info; changed: boolean }> - readonly invalidate: () => Effect.Effect - readonly directories: () => Effect.Effect - readonly waitForDependencies: () => Effect.Effect -} - -export class Service extends Context.Service()("@opencode/Config") {} - -function globalConfigFile() { - const candidates = ["opencode.jsonc", "opencode.json", "config.json"].map((file) => - path.join(Global.Path.config, file), - ) - for (const file of candidates) { - if (existsSync(file)) return file - } - return candidates[0] -} - -function patchJsonc(input: string, patch: unknown, path: string[] = []): string { - if (!isRecord(patch)) { - const edits = modify(input, path, patch, { - formattingOptions: { - insertSpaces: true, - tabSize: 2, - }, - }) - return applyEdits(input, edits) - } - - return Object.entries(patch).reduce((result, [key, value]) => patchJsonc(result, value, [...path, key]), input) -} - -function writable(info: Info) { - const { plugin_origins: _plugin_origins, ...next } = info - return next -} - -function writableGlobal(info: Info) { - const next = writable(info) - // When a user changes config from a value back to default in the Desktop app, we don't want to leave a blank `"shell": "",` key - if ("shell" in next && next.shell === "") return { ...next, shell: undefined } - return next -} - -export const ConfigDirectoryTypoError = NamedError.create("ConfigDirectoryTypoError", { - path: Schema.String, - dir: Schema.String, - suggestion: Schema.String, -}) - -export const layer = Layer.effect( - Service, - Effect.gen(function* () { - const fs = yield* AppFileSystem.Service - const authSvc = yield* Auth.Service - const accountSvc = yield* Account.Service - const env = yield* Env.Service - const npmSvc = yield* Npm.Service - - const readConfigFile = (filepath: string) => fs.readFileStringSafe(filepath).pipe(Effect.orDie) - - const loadConfig = Effect.fnUntraced(function* ( - text: string, - options: { path: string } | { dir: string; source: string }, - ) { - const source = "path" in options ? options.path : options.source - const expanded = yield* Effect.promise(() => - ConfigVariable.substitute( - "path" in options ? { text, type: "path", path: options.path } : { text, type: "virtual", ...options }, - ), - ) - const parsed = ConfigParse.jsonc(expanded, source) - const data = ConfigParse.schema(Info, normalizeLoadedConfig(parsed, source), source) - if (!("path" in options)) return data - - yield* Effect.promise(() => resolveLoadedPlugins(data, options.path)) - if (!data.$schema) { - data.$schema = "https://opencode.ai/config.json" - const updated = text.replace(/^\s*\{/, '{\n "$schema": "https://opencode.ai/config.json",') - yield* fs.writeFileString(options.path, updated).pipe(Effect.catch(() => Effect.void)) - } - return data - }) - - const loadFile = Effect.fnUntraced(function* (filepath: string) { - log.info("loading", { path: filepath }) - const text = yield* readConfigFile(filepath) - if (!text) return {} as Info - return yield* loadConfig(text, { path: filepath }) - }) - - const loadGlobal = Effect.fnUntraced(function* () { - let result: Info = {} - // Seed the default global config with the schema for editor completion, but avoid writing when the user - // explicitly routes config through env-provided paths or content. - if (!Flag.OPENCODE_CONFIG && !Flag.OPENCODE_CONFIG_DIR && !Flag.OPENCODE_CONFIG_CONTENT) { - const file = globalConfigFile() - if (!existsSync(file)) { - yield* fs - .writeWithDirs(file, JSON.stringify({ $schema: "https://opencode.ai/config.json" }, null, 2)) - .pipe(Effect.catch(() => Effect.void)) - } - } - result = mergeConfig(result, yield* loadFile(path.join(Global.Path.config, "config.json"))) - result = mergeConfig(result, yield* loadFile(path.join(Global.Path.config, "opencode.json"))) - result = mergeConfig(result, yield* loadFile(path.join(Global.Path.config, "opencode.jsonc"))) - - const legacy = path.join(Global.Path.config, "config") - if (existsSync(legacy)) { - yield* Effect.promise(() => - import(pathToFileURL(legacy).href, { with: { type: "toml" } }) - .then(async (mod) => { - const { provider, model, ...rest } = mod.default - if (provider && model) result.model = `${provider}/${model}` - result["$schema"] = "https://opencode.ai/config.json" - result = mergeConfig(result, rest) - await fsNode.writeFile(path.join(Global.Path.config, "config.json"), JSON.stringify(result, null, 2)) - await fsNode.unlink(legacy) - }) - .catch(() => {}), - ) - } - - return result - }) - - const [cachedGlobal, invalidateGlobal] = yield* Effect.cachedInvalidateWithTTL( - loadGlobal().pipe( - Effect.tapError((error) => - Effect.sync(() => log.error("failed to load global config, using defaults", { error: String(error) })), - ), - Effect.orElseSucceed((): Info => ({})), - ), - Duration.infinity, - ) - - const getGlobal = Effect.fn("Config.getGlobal")(function* () { - return yield* cachedGlobal - }) - - const ensureGitignore = Effect.fn("Config.ensureGitignore")(function* (dir: string) { - const gitignore = path.join(dir, ".gitignore") - const hasIgnore = yield* fs.existsSafe(gitignore) - if (!hasIgnore) { - yield* fs - .writeFileString( - gitignore, - ["node_modules", "package.json", "package-lock.json", "bun.lock", ".gitignore"].join("\n"), - ) - .pipe( - Effect.catchIf( - (e) => e.reason._tag === "PermissionDenied", - () => Effect.void, - ), - ) - } - }) - - const loadInstanceState = Effect.fn("Config.loadInstanceState")( - function* (ctx: InstanceContext) { - const auth = yield* authSvc.all().pipe(Effect.orDie) - - let result: Info = {} - const consoleManagedProviders = new Set() - let activeOrgName: string | undefined - - const pluginScopeForSource = Effect.fnUntraced(function* (source: string) { - if (source.startsWith("http://") || source.startsWith("https://")) return "global" - if (source === "OPENCODE_CONFIG_CONTENT") return "local" - if (containsPath(source, ctx)) return "local" - return "global" - }) - - const mergePluginOrigins = Effect.fnUntraced(function* ( - source: string, - // mergePluginOrigins receives raw Specs from one config source, before provenance for this merge step - // is attached. - list: ConfigPlugin.Spec[] | undefined, - // Scope can be inferred from the source path, but some callers already know whether the config should - // behave as global or local and can pass that explicitly. - kind?: ConfigPlugin.Scope, - ) { - if (!list?.length) return - const hit = kind ?? (yield* pluginScopeForSource(source)) - // Merge newly seen plugin origins with previously collected ones, then dedupe by plugin identity while - // keeping the winning source/scope metadata for downstream installs, writes, and diagnostics. - const plugins = ConfigPlugin.deduplicatePluginOrigins([ - ...(result.plugin_origins ?? []), - ...list.map((spec) => ({ spec, source, scope: hit })), - ]) - result.plugin = plugins.map((item) => item.spec) - result.plugin_origins = plugins - }) - - const merge = (source: string, next: Info, kind?: ConfigPlugin.Scope) => { - result = mergeConfigConcatArrays(result, next) - return mergePluginOrigins(source, next.plugin, kind) - } - - for (const [key, value] of Object.entries(auth)) { - if (value.type === "wellknown") { - const url = key.replace(/\/+$/, "") - process.env[value.key] = value.token - log.debug("fetching remote config", { url: `${url}/.well-known/opencode` }) - const response = yield* Effect.promise(() => fetch(`${url}/.well-known/opencode`)) - if (!response.ok) { - throw new Error(`failed to fetch remote config from ${url}: ${response.status}`) - } - const wellknown = (yield* Effect.promise(() => response.json())) as { - config?: Record - remote_config?: unknown - } - const remote = yield* Effect.promise(() => - substituteWellKnownRemoteConfig({ - value: wellknown.remote_config, - dir: url, - source: `${url}/.well-known/opencode`, - }), - ) - const fetchedConfig = remote - ? ((yield* Effect.promise(async () => { - log.debug("fetching remote config", { url: remote.url }) - const response = await fetch(remote.url, { headers: remote.headers }) - if (!response.ok) - throw new Error(`failed to fetch remote config from ${remote.url}: ${response.status}`) - const data = await response.json() - return isRecord(data) && isRecord(data.config) ? data.config : data - })) as Record) - : {} - const remoteConfig = mergeConfig(wellknown.config ?? {}, fetchedConfig as Info) - if (!remoteConfig.$schema) remoteConfig.$schema = "https://opencode.ai/config.json" - const source = `${url}/.well-known/opencode` - const next = yield* loadConfig(JSON.stringify(remoteConfig), { - dir: path.dirname(source), - source, - }) - yield* merge(source, next, "global") - log.debug("loaded remote config from well-known", { url }) - } - } - - const global = yield* getGlobal() - yield* merge(Global.Path.config, global, "global") - - if (Flag.OPENCODE_CONFIG) { - yield* merge(Flag.OPENCODE_CONFIG, yield* loadFile(Flag.OPENCODE_CONFIG)) - log.debug("loaded custom config", { path: Flag.OPENCODE_CONFIG }) - } - - if (!Flag.OPENCODE_DISABLE_PROJECT_CONFIG) { - for (const file of yield* ConfigPaths.files("opencode", ctx.directory, ctx.worktree).pipe(Effect.orDie)) { - yield* merge(file, yield* loadFile(file), "local") - } - } - - result.agent = result.agent || {} - result.mode = result.mode || {} - result.plugin = result.plugin || [] - - const directories = yield* ConfigPaths.directories(ctx.directory, ctx.worktree) - - if (Flag.OPENCODE_CONFIG_DIR) { - log.debug("loading config from OPENCODE_CONFIG_DIR", { path: Flag.OPENCODE_CONFIG_DIR }) - } - - const deps: Fiber.Fiber[] = [] - - for (const dir of directories) { - if (dir.endsWith(".opencode") || dir === Flag.OPENCODE_CONFIG_DIR) { - for (const file of ["opencode.json", "opencode.jsonc"]) { - const source = path.join(dir, file) - log.debug(`loading config from ${source}`) - yield* merge(source, yield* loadFile(source)) - result.agent ??= {} - result.mode ??= {} - result.plugin ??= [] - } - } - - yield* ensureGitignore(dir).pipe(Effect.orDie) - - const dep = yield* npmSvc - .install(dir, { - add: [ - { - name: "@opencode-ai/plugin", - version: InstallationLocal ? undefined : InstallationVersion, - }, - ], - }) - .pipe( - Effect.exit, - Effect.tap((exit) => - Exit.isFailure(exit) - ? Effect.sync(() => { - log.warn("background dependency install failed", { dir, error: String(exit.cause) }) - }) - : Effect.void, - ), - Effect.asVoid, - Effect.forkDetach, - ) - deps.push(dep) - - result.command = mergeDeep(result.command ?? {}, yield* Effect.promise(() => ConfigCommand.load(dir))) - result.agent = mergeDeep(result.agent ?? {}, yield* Effect.promise(() => ConfigAgent.load(dir))) - result.agent = mergeDeep(result.agent ?? {}, yield* Effect.promise(() => ConfigAgent.loadMode(dir))) - // Auto-discovered plugins under `.opencode/plugin(s)` are already local files, so ConfigPlugin.load - // returns normalized Specs and we only need to attach origin metadata here. - const list = yield* Effect.promise(() => ConfigPlugin.load(dir)) - yield* mergePluginOrigins(dir, list) - } - - if (process.env.OPENCODE_CONFIG_CONTENT) { - const source = "OPENCODE_CONFIG_CONTENT" - const next = yield* loadConfig(process.env.OPENCODE_CONFIG_CONTENT, { - dir: ctx.directory, - source, - }) - yield* merge(source, next, "local") - log.debug("loaded custom config from OPENCODE_CONFIG_CONTENT") - } - - const activeAccount = Option.getOrUndefined( - yield* accountSvc.active().pipe(Effect.catch(() => Effect.succeed(Option.none()))), - ) - if (activeAccount?.active_org_id) { - const accountID = activeAccount.id - const orgID = activeAccount.active_org_id - const url = activeAccount.url - yield* Effect.gen(function* () { - const [configOpt, tokenOpt] = yield* Effect.all( - [accountSvc.config(accountID, orgID), accountSvc.token(accountID)], - { concurrency: 2 }, - ) - if (Option.isSome(tokenOpt)) { - process.env["OPENCODE_CONSOLE_TOKEN"] = tokenOpt.value - yield* env.set("OPENCODE_CONSOLE_TOKEN", tokenOpt.value) - } - - if (Option.isSome(configOpt)) { - const source = `${url}/api/config` - const next = yield* loadConfig(JSON.stringify(configOpt.value), { - dir: path.dirname(source), - source, - }) - for (const providerID of Object.keys(next.provider ?? {})) { - consoleManagedProviders.add(providerID) - } - yield* merge(source, next, "global") - } - }).pipe( - Effect.withSpan("Config.loadActiveOrgConfig"), - Effect.catch((err) => { - log.debug("failed to fetch remote account config", { - error: err instanceof Error ? err.message : String(err), - }) - return Effect.void - }), - ) - } - - const managedDir = ConfigManaged.managedConfigDir() - if (existsSync(managedDir)) { - for (const file of ["opencode.json", "opencode.jsonc"]) { - const source = path.join(managedDir, file) - yield* merge(source, yield* loadFile(source), "global") - } - } - - // macOS managed preferences (.mobileconfig deployed via MDM) override everything - const managed = yield* Effect.promise(() => ConfigManaged.readManagedPreferences()) - if (managed) { - result = mergeConfigConcatArrays( - result, - yield* loadConfig(managed.text, { - dir: path.dirname(managed.source), - source: managed.source, - }), - ) - } - - for (const [name, mode] of Object.entries(result.mode ?? {})) { - result.agent = mergeDeep(result.agent ?? {}, { - [name]: { - ...mode, - mode: "primary" as const, - }, - }) - } - - if (Flag.OPENCODE_PERMISSION) { - result.permission = mergeDeep(result.permission ?? {}, JSON.parse(Flag.OPENCODE_PERMISSION)) - } - - if (result.tools) { - const perms: Record = {} - for (const [tool, enabled] of Object.entries(result.tools)) { - const action: ConfigPermission.Action = enabled ? "allow" : "deny" - if (tool === "write" || tool === "edit" || tool === "patch") { - perms.edit = action - continue - } - perms[tool] = action - } - result.permission = mergeDeep(perms, result.permission ?? {}) - } - - if (!result.username) result.username = os.userInfo().username - - if (result.autoshare === true && !result.share) { - result.share = "auto" - } - - if (Flag.OPENCODE_DISABLE_AUTOCOMPACT) { - result.compaction = { ...result.compaction, auto: false } - } - if (Flag.OPENCODE_DISABLE_PRUNE) { - result.compaction = { ...result.compaction, prune: false } - } - - return { - config: result, - directories, - deps, - consoleState: { - consoleManagedProviders: Array.from(consoleManagedProviders), - activeOrgName, - switchableOrgCount: 0, - }, - } - }, - Effect.provideService(AppFileSystem.Service, fs), - ) - - const state = yield* InstanceState.make( - Effect.fn("Config.state")(function* (ctx) { - return yield* loadInstanceState(ctx).pipe(Effect.orDie) - }), - ) - - const get = Effect.fn("Config.get")(function* () { - return yield* InstanceState.use(state, (s) => s.config) - }) - - const directories = Effect.fn("Config.directories")(function* () { - return yield* InstanceState.use(state, (s) => s.directories) - }) - - const getConsoleState = Effect.fn("Config.getConsoleState")(function* () { - return yield* InstanceState.use(state, (s) => s.consoleState) - }) - - const waitForDependencies = Effect.fn("Config.waitForDependencies")(function* () { - yield* InstanceState.useEffect(state, (s) => - Effect.forEach(s.deps, Fiber.join, { concurrency: "unbounded" }).pipe(Effect.asVoid), - ) - }) - - const update = Effect.fn("Config.update")(function* (config: Info) { - const dir = yield* InstanceState.directory - const file = path.join(dir, "config.json") - const existing = yield* loadFile(file) - yield* fs - .writeFileString(file, JSON.stringify(mergeDeep(writable(existing), writable(config)), null, 2)) - .pipe(Effect.orDie) - }) - - const invalidate = Effect.fn("Config.invalidate")(function* () { - yield* invalidateGlobal - }) - - const updateGlobal = Effect.fn("Config.updateGlobal")(function* (config: Info) { - const file = globalConfigFile() - const before = (yield* readConfigFile(file)) ?? "{}" - const patch = writableGlobal(config) - - let next: Info - let changed: boolean - if (!file.endsWith(".jsonc")) { - const existing = ConfigParse.schema(Info, ConfigParse.jsonc(before, file), file) - const merged = mergeDeep(writable(existing), patch) - const serialized = JSON.stringify(merged, null, 2) - changed = serialized !== before - if (changed) yield* fs.writeFileString(file, serialized).pipe(Effect.orDie) - next = merged - } else { - const updated = patchJsonc(before, patch) - next = ConfigParse.schema(Info, ConfigParse.jsonc(updated, file), file) - changed = updated !== before - if (changed) yield* fs.writeFileString(file, updated).pipe(Effect.orDie) - } - - if (changed) yield* invalidate() - return { info: next, changed } - }) - - return Service.of({ - get, - getGlobal, - getConsoleState, - update, - updateGlobal, - invalidate, - directories, - waitForDependencies, - }) - }), -) - -export const defaultLayer = layer.pipe( - Layer.provide(EffectFlock.defaultLayer), - Layer.provide(AppFileSystem.defaultLayer), - Layer.provide(Env.defaultLayer), - Layer.provide(Auth.defaultLayer), - Layer.provide(Account.defaultLayer), - Layer.provide(Npm.defaultLayer), -) - -export * as Config from "./config" diff --git a/packages/opencode/src/provider/error.ts b/packages/opencode/src/provider/error.ts deleted file mode 100644 index 7363b5ce5969..000000000000 --- a/packages/opencode/src/provider/error.ts +++ /dev/null @@ -1,204 +0,0 @@ -import { APICallError } from "ai" -import { STATUS_CODES } from "http" -import { iife } from "@/util/iife" -import type { ProviderID } from "./schema" - -// Adapted from overflow detection patterns in: -// https://github.com/badlogic/pi-mono/blob/main/packages/ai/src/utils/overflow.ts -const OVERFLOW_PATTERNS = [ - /prompt is too long/i, // Anthropic - /input is too long for requested model/i, // Amazon Bedrock - /exceeds the context window/i, // OpenAI (Completions + Responses API message text) - /input token count.*exceeds the maximum/i, // Google (Gemini) - /maximum prompt length is \d+/i, // xAI (Grok) - /reduce the length of the messages/i, // Groq - /maximum context length is \d+ tokens/i, // OpenRouter, DeepSeek, vLLM - /exceeds the limit of \d+/i, // GitHub Copilot - /exceeds the available context size/i, // llama.cpp server - /greater than the context length/i, // LM Studio - /context window exceeds limit/i, // MiniMax - /exceeded model token limit/i, // Kimi For Coding, Moonshot - /context[_ ]length[_ ]exceeded/i, // Generic fallback - /request entity too large/i, // HTTP 413 - /context length is only \d+ tokens/i, // vLLM - /input length.*exceeds.*context length/i, // vLLM - /prompt too long; exceeded (?:max )?context length/i, // Ollama explicit overflow error - /too large for model with \d+ maximum context length/i, // Mistral - /model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text -] - -function isOpenAiErrorRetryable(e: APICallError) { - const status = e.statusCode - if (!status) return e.isRetryable - // openai sometimes returns 404 for models that are actually available - return status === 404 || e.isRetryable -} - -// Providers not reliably handled in this function: -// - z.ai: can accept overflow silently (needs token-count/context-window checks) -function isOverflow(message: string) { - if (OVERFLOW_PATTERNS.some((p) => p.test(message))) return true - - // Providers/status patterns handled outside of regex list: - // - Cerebras: often returns "400 (no body)" / "413 (no body)" - // - Mistral: often returns "400 (no body)" / "413 (no body)" - return /^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message) -} - -function message(providerID: ProviderID, e: APICallError) { - return iife(() => { - const msg = e.message - if (msg === "") { - if (e.responseBody) return e.responseBody - if (e.statusCode) { - const err = STATUS_CODES[e.statusCode] - if (err) return err - } - return "Unknown error" - } - - if (!e.responseBody || (e.statusCode && msg !== STATUS_CODES[e.statusCode])) { - return msg - } - - try { - const body = JSON.parse(e.responseBody) - // try to extract common error message fields - const errMsg = body.message || body.error || body.error?.message - if (errMsg && typeof errMsg === "string") { - return `${msg}: ${errMsg}` - } - } catch {} - - // If responseBody is HTML (e.g. from a gateway or proxy error page), - // provide a human-readable message instead of dumping raw markup - if (/^\s*` to re-authenticate." - } - if (e.statusCode === 403) { - return "Forbidden: request was blocked by a gateway or proxy. You may not have permission to access this resource — check your account and provider settings." - } - return msg - } - - return `${msg}: ${e.responseBody}` - }).trim() -} - -function json(input: unknown) { - if (typeof input === "string") { - try { - const result = JSON.parse(input) - if (result && typeof result === "object") return result - return undefined - } catch { - return undefined - } - } - if (typeof input === "object" && input !== null) { - return input - } - return undefined -} - -export type ParsedStreamError = - | { - type: "context_overflow" - message: string - responseBody: string - } - | { - type: "api_error" - message: string - isRetryable: boolean - responseBody: string - } - -export function parseStreamError(input: unknown): ParsedStreamError | undefined { - const raw = json(input) - const body = typeof raw?.message === "string" ? (json(raw.message) ?? raw) : raw - if (!body) return - - const responseBody = JSON.stringify(body) - if (body.type !== "error") return - - switch (body?.error?.code) { - case "context_length_exceeded": - return { - type: "context_overflow", - message: "Input exceeds context window of this model", - responseBody, - } - case "insufficient_quota": - return { - type: "api_error", - message: "Quota exceeded. Check your plan and billing details.", - isRetryable: false, - responseBody, - } - case "usage_not_included": - return { - type: "api_error", - message: "To use Codex with your ChatGPT plan, upgrade to Plus: https://chatgpt.com/explore/plus.", - isRetryable: false, - responseBody, - } - case "invalid_prompt": - return { - type: "api_error", - message: typeof body?.error?.message === "string" ? body?.error?.message : "Invalid prompt.", - isRetryable: false, - responseBody, - } - case "server_is_overloaded": - case "server_error": - return { - type: "api_error", - message: typeof body?.error?.message === "string" ? body?.error?.message : "Server error.", - isRetryable: true, - responseBody, - } - } -} - -export type ParsedAPICallError = - | { - type: "context_overflow" - message: string - responseBody?: string - } - | { - type: "api_error" - message: string - statusCode?: number - isRetryable: boolean - responseHeaders?: Record - responseBody?: string - metadata?: Record - } - -export function parseAPICallError(input: { providerID: ProviderID; error: APICallError }): ParsedAPICallError { - const m = message(input.providerID, input.error) - const body = json(input.error.responseBody) - if (isOverflow(m) || input.error.statusCode === 413 || body?.error?.code === "context_length_exceeded") { - return { - type: "context_overflow", - message: m, - responseBody: input.error.responseBody, - } - } - - const metadata = input.error.url ? { url: input.error.url } : undefined - return { - type: "api_error", - message: m, - statusCode: input.error.statusCode, - isRetryable: input.providerID.startsWith("openai") ? isOpenAiErrorRetryable(input.error) : input.error.isRetryable, - responseHeaders: input.error.responseHeaders, - responseBody: input.error.responseBody, - metadata, - } -} - -export * as ProviderError from "./error" diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts deleted file mode 100644 index d4d28088d988..000000000000 --- a/packages/opencode/src/provider/provider.ts +++ /dev/null @@ -1,1784 +0,0 @@ -import os from "os" -import fuzzysort from "fuzzysort" -import { Config } from "@/config/config" -import { mapValues, mergeDeep, omit, pickBy, sortBy } from "remeda" -import { NoSuchModelError, type Provider as SDK } from "ai" -import * as Log from "@opencode-ai/core/util/log" -import { Npm } from "@opencode-ai/core/npm" -import { Hash } from "@opencode-ai/core/util/hash" -import { Plugin } from "../plugin" -import { type LanguageModelV3 } from "@ai-sdk/provider" -import * as ModelsDev from "./models" -import { Auth } from "../auth" -import { Env } from "../env" -import { InstallationVersion } from "@opencode-ai/core/installation/version" -import { Flag } from "@opencode-ai/core/flag/flag" -import { NamedError } from "@opencode-ai/core/util/error" -import { iife } from "@/util/iife" -import { Global } from "@opencode-ai/core/global" -import path from "path" -import { pathToFileURL } from "url" -import { Effect, Layer, Context, Schema, Types } from "effect" -import { EffectBridge } from "@/effect/bridge" -import { InstanceState } from "@/effect/instance-state" -import { AppFileSystem } from "@opencode-ai/core/filesystem" -import { isRecord } from "@/util/record" -import { optionalOmitUndefined } from "@opencode-ai/core/schema" - -import * as ProviderTransform from "./transform" -import { ModelID, ProviderID } from "./schema" -import { ModelStatus } from "./model-status" - -const log = Log.create({ service: "provider" }) - -function shouldUseCopilotResponsesApi(modelID: string): boolean { - const match = /^gpt-(\d+)/.exec(modelID) - if (!match) return false - return Number(match[1]) >= 5 && !modelID.startsWith("gpt-5-mini") -} - -function wrapSSE(res: Response, ms: number, ctl: AbortController) { - if (typeof ms !== "number" || ms <= 0) return res - if (!res.body) return res - if (!res.headers.get("content-type")?.includes("text/event-stream")) return res - - const reader = res.body.getReader() - const body = new ReadableStream({ - async pull(ctrl) { - const part = await new Promise>>((resolve, reject) => { - const id = setTimeout(() => { - const err = new Error("SSE read timed out") - ctl.abort(err) - void reader.cancel(err) - reject(err) - }, ms) - - reader.read().then( - (part) => { - clearTimeout(id) - resolve(part) - }, - (err) => { - clearTimeout(id) - reject(err) - }, - ) - }) - - if (part.done) { - ctrl.close() - return - } - - ctrl.enqueue(part.value) - }, - async cancel(reason) { - ctl.abort(reason) - await reader.cancel(reason) - }, - }) - - return new Response(body, { - headers: new Headers(res.headers), - status: res.status, - statusText: res.statusText, - }) -} - -type BundledSDK = { - languageModel(modelId: string): LanguageModelV3 -} - -const BUNDLED_PROVIDERS: Record Promise<(opts: any) => BundledSDK>> = { - "@ai-sdk/amazon-bedrock": () => import("@ai-sdk/amazon-bedrock").then((m) => m.createAmazonBedrock), - "@ai-sdk/anthropic": () => import("@ai-sdk/anthropic").then((m) => m.createAnthropic), - "@ai-sdk/azure": () => import("@ai-sdk/azure").then((m) => m.createAzure), - "@ai-sdk/google": () => import("@ai-sdk/google").then((m) => m.createGoogleGenerativeAI), - "@ai-sdk/google-vertex": () => import("@ai-sdk/google-vertex").then((m) => m.createVertex), - "@ai-sdk/google-vertex/anthropic": () => - import("@ai-sdk/google-vertex/anthropic").then((m) => m.createVertexAnthropic), - "@ai-sdk/openai": () => import("@ai-sdk/openai").then((m) => m.createOpenAI), - "@ai-sdk/openai-compatible": () => import("@ai-sdk/openai-compatible").then((m) => m.createOpenAICompatible), - "@openrouter/ai-sdk-provider": () => import("@openrouter/ai-sdk-provider").then((m) => m.createOpenRouter), - "@ai-sdk/xai": () => import("@ai-sdk/xai").then((m) => m.createXai), - "@ai-sdk/mistral": () => import("@ai-sdk/mistral").then((m) => m.createMistral), - "@ai-sdk/groq": () => import("@ai-sdk/groq").then((m) => m.createGroq), - "@ai-sdk/deepinfra": () => import("@ai-sdk/deepinfra").then((m) => m.createDeepInfra), - "@ai-sdk/cerebras": () => import("@ai-sdk/cerebras").then((m) => m.createCerebras), - "@ai-sdk/cohere": () => import("@ai-sdk/cohere").then((m) => m.createCohere), - "@ai-sdk/gateway": () => import("@ai-sdk/gateway").then((m) => m.createGateway), - "@ai-sdk/togetherai": () => import("@ai-sdk/togetherai").then((m) => m.createTogetherAI), - "@ai-sdk/perplexity": () => import("@ai-sdk/perplexity").then((m) => m.createPerplexity), - "@ai-sdk/vercel": () => import("@ai-sdk/vercel").then((m) => m.createVercel), - "@ai-sdk/alibaba": () => import("@ai-sdk/alibaba").then((m) => m.createAlibaba), - "gitlab-ai-provider": () => import("gitlab-ai-provider").then((m) => m.createGitLab), - "@ai-sdk/github-copilot": () => import("./sdk/copilot/copilot-provider").then((m) => m.createOpenaiCompatible), - "venice-ai-sdk-provider": () => import("venice-ai-sdk-provider").then((m) => m.createVenice), -} - -type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise -type CustomVarsLoader = (options: Record) => Record -type CustomDiscoverModels = () => Promise> -type CustomLoader = (provider: Info) => Effect.Effect<{ - autoload: boolean - getModel?: CustomModelLoader - vars?: CustomVarsLoader - options?: Record - discoverModels?: CustomDiscoverModels -}> - -type CustomDep = { - auth: (id: string) => Effect.Effect - config: () => Effect.Effect - env: () => Effect.Effect> - get: (key: string) => Effect.Effect -} - -function useLanguageModel(sdk: any) { - return sdk.responses === undefined && sdk.chat === undefined -} - -function selectAzureLanguageModel(sdk: any, modelID: string, useChat: boolean) { - if (useChat && sdk.chat) return sdk.chat(modelID) - if (sdk.responses) return sdk.responses(modelID) - if (sdk.messages) return sdk.messages(modelID) - if (sdk.chat) return sdk.chat(modelID) - return sdk.languageModel(modelID) -} - -function custom(dep: CustomDep): Record { - return { - anthropic: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "anthropic-beta": "interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14", - }, - }, - }), - opencode: Effect.fnUntraced(function* (input: Info) { - const env = yield* dep.env() - const hasKey = iife(() => { - if (input.env.some((item) => env[item])) return true - return false - }) - const ok = - hasKey || - Boolean(yield* dep.auth(input.id)) || - Boolean((yield* dep.config()).provider?.["opencode"]?.options?.apiKey) - - if (!ok) { - for (const [key, value] of Object.entries(input.models)) { - if (value.cost.input === 0) continue - delete input.models[key] - } - } - - return { - autoload: Object.keys(input.models).length > 0, - options: ok ? {} : { apiKey: "public" }, - } - }), - openai: () => - Effect.succeed({ - autoload: false, - async getModel(sdk: any, modelID: string, _options?: Record) { - return sdk.responses(modelID) - }, - options: {}, - }), - xai: () => - Effect.succeed({ - autoload: false, - async getModel(sdk: any, modelID: string, _options?: Record) { - return sdk.responses(modelID) - }, - options: {}, - }), - "github-copilot": () => - Effect.succeed({ - autoload: false, - async getModel(sdk: any, modelID: string, _options?: Record) { - if (useLanguageModel(sdk)) return sdk.languageModel(modelID) - return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID) - }, - options: {}, - }), - azure: Effect.fnUntraced(function* (provider: Info) { - const env = yield* dep.env() - const auth = yield* dep.auth(provider.id) - const resource = iife(() => { - return [ - provider.options?.resourceName, - auth?.type === "api" ? auth.metadata?.resourceName : undefined, - env["AZURE_RESOURCE_NAME"], - ].find((name) => typeof name === "string" && name.trim() !== "") - }) - - if (!resource && !provider.options?.baseURL) { - return { - autoload: false, - async getModel() { - throw new Error( - "AZURE_RESOURCE_NAME is missing, set it using env var or reconnecting the azure provider and setting it", - ) - }, - } - } - - return { - autoload: false, - async getModel(sdk: any, modelID: string, options?: Record) { - return selectAzureLanguageModel(sdk, modelID, Boolean(options?.["useCompletionUrls"])) - }, - options: { - resourceName: resource, - }, - vars(_options): Record { - if (resource) { - return { - AZURE_RESOURCE_NAME: resource, - } - } - return {} - }, - } - }), - "azure-cognitive-services": Effect.fnUntraced(function* () { - const resourceName = yield* dep.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME") - return { - autoload: false, - async getModel(sdk: any, modelID: string, options?: Record) { - return selectAzureLanguageModel(sdk, modelID, Boolean(options?.["useCompletionUrls"])) - }, - options: { - baseURL: resourceName ? `https://${resourceName}.cognitiveservices.azure.com/openai` : undefined, - }, - } - }), - "amazon-bedrock": Effect.fnUntraced(function* () { - const providerConfig = (yield* dep.config()).provider?.["amazon-bedrock"] - const auth = yield* dep.auth("amazon-bedrock") - const env = yield* dep.env() - - // Region precedence: 1) config file, 2) env var, 3) default - const configRegion = providerConfig?.options?.region - const envRegion = env["AWS_REGION"] - const defaultRegion = configRegion ?? envRegion ?? "us-east-1" - - // Profile: config file takes precedence over env var - const configProfile = providerConfig?.options?.profile - const envProfile = env["AWS_PROFILE"] - const profile = configProfile ?? envProfile - - const awsAccessKeyId = env["AWS_ACCESS_KEY_ID"] - - // TODO: Using process.env directly because Env.set only updates a process.env shallow copy, - // until the scope of the Env API is clarified (test only or runtime?) - const awsBearerToken = iife(() => { - const envToken = process.env.AWS_BEARER_TOKEN_BEDROCK - if (envToken) return envToken - if (auth?.type === "api") { - process.env.AWS_BEARER_TOKEN_BEDROCK = auth.key - return auth.key - } - return undefined - }) - - const awsWebIdentityTokenFile = env["AWS_WEB_IDENTITY_TOKEN_FILE"] - - const containerCreds = Boolean( - process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI || process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI, - ) - - if (!profile && !awsAccessKeyId && !awsBearerToken && !awsWebIdentityTokenFile && !containerCreds) - return { autoload: false } - - const { fromNodeProviderChain } = yield* Effect.promise(() => import("@aws-sdk/credential-providers")) - - const providerOptions: Record = { - region: defaultRegion, - } - - // Only use credential chain if no bearer token exists - // Bearer token takes precedence over credential chain (profiles, access keys, IAM roles, web identity tokens) - if (!awsBearerToken) { - // Build credential provider options (only pass profile if specified) - const credentialProviderOptions = profile ? { profile } : {} - - providerOptions.credentialProvider = fromNodeProviderChain(credentialProviderOptions) - } - - // Add custom endpoint if specified (endpoint takes precedence over baseURL) - const endpoint = providerConfig?.options?.endpoint ?? providerConfig?.options?.baseURL - if (endpoint) { - providerOptions.baseURL = endpoint - } - - return { - autoload: true, - options: providerOptions, - async getModel(sdk: any, modelID: string, options?: Record) { - // Skip region prefixing if model already has a cross-region inference profile prefix - // Models from models.dev may already include prefixes like us., eu., global., etc. - const crossRegionPrefixes = ["global.", "us.", "eu.", "jp.", "apac.", "au."] - if (crossRegionPrefixes.some((prefix) => modelID.startsWith(prefix))) { - return sdk.languageModel(modelID) - } - - // Region resolution precedence (highest to lowest): - // 1. options.region from opencode.json provider config - // 2. defaultRegion from AWS_REGION environment variable - // 3. Default "us-east-1" (baked into defaultRegion) - const region = options?.region ?? defaultRegion - - let regionPrefix = region.split("-")[0] - - switch (regionPrefix) { - case "us": { - const modelRequiresPrefix = [ - "nova-micro", - "nova-lite", - "nova-pro", - "nova-premier", - "nova-2", - "claude", - "deepseek", - ].some((m) => modelID.includes(m)) - const isGovCloud = region.startsWith("us-gov") - if (modelRequiresPrefix && !isGovCloud) { - modelID = `${regionPrefix}.${modelID}` - } - break - } - case "eu": { - const regionRequiresPrefix = [ - "eu-west-1", - "eu-west-2", - "eu-west-3", - "eu-north-1", - "eu-central-1", - "eu-south-1", - "eu-south-2", - ].some((r) => region.includes(r)) - const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "llama3", "pixtral"].some((m) => - modelID.includes(m), - ) - if (regionRequiresPrefix && modelRequiresPrefix) { - modelID = `${regionPrefix}.${modelID}` - } - break - } - case "ap": { - const isAustraliaRegion = ["ap-southeast-2", "ap-southeast-4"].includes(region) - const isTokyoRegion = region === "ap-northeast-1" - if ( - isAustraliaRegion && - ["anthropic.claude-sonnet-4-5", "anthropic.claude-haiku"].some((m) => modelID.includes(m)) - ) { - regionPrefix = "au" - modelID = `${regionPrefix}.${modelID}` - } else if (isTokyoRegion) { - // Tokyo region uses jp. prefix for cross-region inference - const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "nova-pro"].some((m) => - modelID.includes(m), - ) - if (modelRequiresPrefix) { - regionPrefix = "jp" - modelID = `${regionPrefix}.${modelID}` - } - } else { - // Other APAC regions use apac. prefix - const modelRequiresPrefix = ["claude", "nova-lite", "nova-micro", "nova-pro"].some((m) => - modelID.includes(m), - ) - if (modelRequiresPrefix) { - regionPrefix = "apac" - modelID = `${regionPrefix}.${modelID}` - } - } - break - } - } - - return sdk.languageModel(modelID) - }, - } - }), - llmgateway: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "HTTP-Referer": "https://opencode.ai/", - "X-Title": "opencode", - "X-Source": "opencode", - }, - }, - }), - openrouter: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "HTTP-Referer": "https://opencode.ai/", - "X-Title": "opencode", - }, - }, - }), - nvidia: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "HTTP-Referer": "https://opencode.ai/", - "X-Title": "opencode", - }, - }, - }), - vercel: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "http-referer": "https://opencode.ai/", - "x-title": "opencode", - }, - }, - }), - "google-vertex": Effect.fnUntraced(function* (provider: Info) { - const env = yield* dep.env() - const project = - provider.options?.project ?? env["GOOGLE_CLOUD_PROJECT"] ?? env["GCP_PROJECT"] ?? env["GCLOUD_PROJECT"] - - const location = String( - provider.options?.location ?? - env["GOOGLE_VERTEX_LOCATION"] ?? - env["GOOGLE_CLOUD_LOCATION"] ?? - env["VERTEX_LOCATION"] ?? - "us-central1", - ) - - const autoload = Boolean(project) - if (!autoload) return { autoload: false } - return { - autoload: true, - vars(_options: Record) { - const endpoint = location === "global" ? "aiplatform.googleapis.com" : `${location}-aiplatform.googleapis.com` - return { - ...(project && { GOOGLE_VERTEX_PROJECT: project }), - GOOGLE_VERTEX_LOCATION: location, - GOOGLE_VERTEX_ENDPOINT: endpoint, - } - }, - options: { - project, - location, - fetch: async (input: RequestInfo | URL, init?: RequestInit) => { - const { GoogleAuth } = await import("google-auth-library") - const auth = new GoogleAuth() - const client = await auth.getApplicationDefault() - const token = await client.credential.getAccessToken() - - const headers = new Headers(init?.headers) - headers.set("Authorization", `Bearer ${token.token}`) - - return fetch(input, { ...init, headers }) - }, - }, - async getModel(sdk: any, modelID: string) { - const id = String(modelID).trim() - return sdk.languageModel(id) - }, - } - }), - "google-vertex-anthropic": Effect.fnUntraced(function* () { - const env = yield* dep.env() - const project = env["GOOGLE_CLOUD_PROJECT"] ?? env["GCP_PROJECT"] ?? env["GCLOUD_PROJECT"] - const location = env["GOOGLE_CLOUD_LOCATION"] ?? env["VERTEX_LOCATION"] ?? "global" - const autoload = Boolean(project) - if (!autoload) return { autoload: false } - return { - autoload: true, - options: { - project, - location, - }, - async getModel(sdk: any, modelID) { - const id = String(modelID).trim() - return sdk.languageModel(id) - }, - } - }), - "sap-ai-core": Effect.fnUntraced(function* () { - const auth = yield* dep.auth("sap-ai-core") - // TODO: Using process.env directly because Env.set only updates a shallow copy (not process.env), - // until the scope of the Env API is clarified (test only or runtime?) - const envServiceKey = iife(() => { - const envAICoreServiceKey = process.env.AICORE_SERVICE_KEY - if (envAICoreServiceKey) return envAICoreServiceKey - if (auth?.type === "api") { - process.env.AICORE_SERVICE_KEY = auth.key - return auth.key - } - return undefined - }) - const deploymentId = process.env.AICORE_DEPLOYMENT_ID - const resourceGroup = process.env.AICORE_RESOURCE_GROUP - - return { - autoload: !!envServiceKey, - options: envServiceKey ? { deploymentId, resourceGroup } : {}, - async getModel(sdk: any, modelID: string) { - return sdk(modelID) - }, - } - }), - zenmux: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "HTTP-Referer": "https://opencode.ai/", - "X-Title": "opencode", - }, - }, - }), - gitlab: Effect.fnUntraced(function* (input: Info) { - const { - VERSION: GITLAB_PROVIDER_VERSION, - isWorkflowModel, - discoverWorkflowModels, - } = yield* Effect.promise(() => import("gitlab-ai-provider")) - - const instanceUrl = (yield* dep.get("GITLAB_INSTANCE_URL")) || "https://gitlab.com" - - const auth = yield* dep.auth(input.id) - const apiKey = yield* Effect.sync(() => { - if (auth?.type === "oauth") return auth.access - if (auth?.type === "api") return auth.key - return undefined - }) - const token = apiKey ?? (yield* dep.get("GITLAB_TOKEN")) - - const providerConfig = (yield* dep.config()).provider?.["gitlab"] - const directory = yield* InstanceState.directory - - const aiGatewayHeaders = { - "User-Agent": `opencode/${InstallationVersion} gitlab-ai-provider/${GITLAB_PROVIDER_VERSION} (${os.platform()} ${os.release()}; ${os.arch()})`, - "anthropic-beta": "context-1m-2025-08-07", - ...providerConfig?.options?.aiGatewayHeaders, - } - - const featureFlags = { - duo_agent_platform_agentic_chat: true, - duo_agent_platform: true, - ...providerConfig?.options?.featureFlags, - } - - return { - autoload: !!token, - options: { - instanceUrl, - apiKey: token, - aiGatewayHeaders, - featureFlags, - }, - async getModel(sdk: any, modelID: string, options?: Record) { - if (modelID.startsWith("duo-workflow-")) { - const workflowRef = typeof options?.workflowRef === "string" ? options.workflowRef : undefined - // Use the static mapping if it exists, otherwise use duo-workflow with selectedModelRef - const sdkModelID = isWorkflowModel(modelID) ? modelID : "duo-workflow" - const workflowDefinition = - typeof options?.workflowDefinition === "string" ? options.workflowDefinition : undefined - const model = sdk.workflowChat(sdkModelID, { - featureFlags, - workflowDefinition, - }) - if (workflowRef) { - model.selectedModelRef = workflowRef - } - return model - } - return sdk.agenticChat(modelID, { - aiGatewayHeaders, - featureFlags, - }) - }, - async discoverModels(): Promise> { - if (!apiKey) { - log.info("gitlab model discovery skipped: no apiKey") - return {} - } - - try { - const token = apiKey - const getHeaders = (): Record => - auth?.type === "api" ? { "PRIVATE-TOKEN": token } : { Authorization: `Bearer ${token}` } - - log.info("gitlab model discovery starting", { instanceUrl }) - const result = await discoverWorkflowModels({ instanceUrl, getHeaders }, { workingDirectory: directory }) - - if (!result.models.length) { - log.info("gitlab model discovery skipped: no models found", { - project: result.project - ? { - id: result.project.id, - path: result.project.pathWithNamespace, - } - : null, - }) - return {} - } - - const models: Record = {} - for (const m of result.models) { - if (!input.models[m.id]) { - models[m.id] = { - id: ModelID.make(m.id), - providerID: ProviderID.make("gitlab"), - name: `Agent Platform (${m.name})`, - family: "", - api: { - id: m.id, - url: instanceUrl, - npm: "gitlab-ai-provider", - }, - status: "active", - headers: {}, - options: { workflowRef: m.ref }, - cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, - limit: { context: m.context, output: m.output }, - capabilities: { - temperature: false, - reasoning: true, - attachment: true, - toolcall: true, - input: { - text: true, - audio: false, - image: true, - video: false, - pdf: true, - }, - output: { - text: true, - audio: false, - image: false, - video: false, - pdf: false, - }, - interleaved: false, - }, - release_date: "", - variants: {}, - } - } - } - - log.info("gitlab model discovery complete", { - count: Object.keys(models).length, - models: Object.keys(models), - }) - return models - } catch (e) { - log.warn("gitlab model discovery failed", { error: e }) - return {} - } - }, - } - }), - "cloudflare-workers-ai": Effect.fnUntraced(function* (input: Info) { - // When baseURL is already configured (e.g. corporate config routing through a proxy/gateway), - // skip the account ID check because the URL is already fully specified. - if (input.options?.baseURL) return { autoload: false } - - const auth = yield* dep.auth(input.id) - const env = yield* dep.env() - const accountId = env["CLOUDFLARE_ACCOUNT_ID"] || (auth?.type === "api" ? auth.metadata?.accountId : undefined) - if (!accountId) - return { - autoload: false, - async getModel() { - throw new Error( - "CLOUDFLARE_ACCOUNT_ID is missing. Set it with: export CLOUDFLARE_ACCOUNT_ID=", - ) - }, - } - - const apiKey = yield* Effect.gen(function* () { - const envToken = env["CLOUDFLARE_API_KEY"] - if (envToken) return envToken - if (auth?.type === "api") return auth.key - return undefined - }) - - return { - autoload: !!apiKey, - options: { - apiKey, - headers: { - "User-Agent": `opencode/${InstallationVersion} cloudflare-workers-ai (${os.platform()} ${os.release()}; ${os.arch()})`, - }, - }, - async getModel(sdk: any, modelID: string) { - return sdk.languageModel(modelID) - }, - vars(_options) { - return { - CLOUDFLARE_ACCOUNT_ID: accountId, - } - }, - } - }), - "cloudflare-ai-gateway": Effect.fnUntraced(function* (input: Info) { - // When baseURL is already configured (e.g. corporate config), skip the ID checks. - if (input.options?.baseURL) return { autoload: false } - - const auth = yield* dep.auth(input.id) - const env = yield* dep.env() - const accountId = env["CLOUDFLARE_ACCOUNT_ID"] || (auth?.type === "api" ? auth.metadata?.accountId : undefined) - const gateway = env["CLOUDFLARE_GATEWAY_ID"] || (auth?.type === "api" ? auth.metadata?.gatewayId : undefined) - - if (!accountId || !gateway) { - const missing = [ - !accountId ? "CLOUDFLARE_ACCOUNT_ID" : undefined, - !gateway ? "CLOUDFLARE_GATEWAY_ID" : undefined, - ].filter((x): x is string => Boolean(x)) - return { - autoload: false, - async getModel() { - throw new Error( - `${missing.join(" and ")} missing. Set with: ${missing.map((x) => `export ${x}=`).join(" && ")}`, - ) - }, - } - } - - // Get API token from env or auth - required for authenticated gateways - const apiToken = yield* Effect.gen(function* () { - const envToken = env["CLOUDFLARE_API_TOKEN"] || env["CF_AIG_TOKEN"] - if (envToken) return envToken - if (auth?.type === "api") return auth.key - return undefined - }) - - if (!apiToken) { - throw new Error( - "CLOUDFLARE_API_TOKEN (or CF_AIG_TOKEN) is required for Cloudflare AI Gateway. " + - "Set it via environment variable or run `opencode auth cloudflare-ai-gateway`.", - ) - } - - // Use official ai-gateway-provider package (v2.x for AI SDK v5 compatibility) - const { createAiGateway } = yield* Effect.promise(() => import("ai-gateway-provider")) - const { createUnified } = yield* Effect.promise(() => import("ai-gateway-provider/providers/unified")) - - const metadata = iife(() => { - if (input.options?.metadata) return input.options.metadata - try { - return JSON.parse(input.options?.headers?.["cf-aig-metadata"]) - } catch { - return undefined - } - }) - const opts = { - metadata, - cacheTtl: input.options?.cacheTtl, - cacheKey: input.options?.cacheKey, - skipCache: input.options?.skipCache, - collectLog: input.options?.collectLog, - headers: { - "User-Agent": `opencode/${InstallationVersion} cloudflare-ai-gateway (${os.platform()} ${os.release()}; ${os.arch()})`, - }, - } - - const aigateway = createAiGateway({ - accountId, - gateway, - apiKey: apiToken, - ...(Object.values(opts).some((v) => v !== undefined) ? { options: opts } : {}), - }) - const unified = createUnified() - - return { - autoload: true, - async getModel(_sdk: any, modelID: string, _options?: Record) { - // Model IDs use Unified API format: provider/model (e.g., "anthropic/claude-sonnet-4-5") - return aigateway(unified(modelID)) - }, - options: {}, - } - }), - cerebras: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "X-Cerebras-3rd-Party-Integration": "opencode", - }, - }, - }), - kilo: () => - Effect.succeed({ - autoload: false, - options: { - headers: { - "HTTP-Referer": "https://opencode.ai/", - "X-Title": "opencode", - }, - }, - }), - } -} - -const ProviderApiInfo = Schema.Struct({ - id: Schema.String, - url: Schema.String, - npm: Schema.String, -}) - -const ProviderModalities = Schema.Struct({ - text: Schema.Boolean, - audio: Schema.Boolean, - image: Schema.Boolean, - video: Schema.Boolean, - pdf: Schema.Boolean, -}) - -const ProviderInterleaved = Schema.Union([ - Schema.Boolean, - Schema.Struct({ - field: Schema.Literals(["reasoning_content", "reasoning_details"]), - }), -]) - -const ProviderCapabilities = Schema.Struct({ - temperature: Schema.Boolean, - reasoning: Schema.Boolean, - attachment: Schema.Boolean, - toolcall: Schema.Boolean, - input: ProviderModalities, - output: ProviderModalities, - interleaved: ProviderInterleaved, -}) - -const ProviderCacheCost = Schema.Struct({ - read: Schema.Finite, - write: Schema.Finite, -}) - -const ProviderCostTier = Schema.Struct({ - input: Schema.Finite, - output: Schema.Finite, - cache: ProviderCacheCost, - tier: Schema.Struct({ - type: Schema.Literal("context"), - size: Schema.Finite, - }), -}) - -const ProviderCost = Schema.Struct({ - input: Schema.Finite, - output: Schema.Finite, - cache: ProviderCacheCost, - tiers: optionalOmitUndefined(Schema.Array(ProviderCostTier)), - experimentalOver200K: optionalOmitUndefined( - Schema.Struct({ - input: Schema.Finite, - output: Schema.Finite, - cache: ProviderCacheCost, - }), - ), -}) - -const ProviderLimit = Schema.Struct({ - context: Schema.Finite, - input: optionalOmitUndefined(Schema.Finite), - output: Schema.Finite, -}) - -export const Model = Schema.Struct({ - id: ModelID, - providerID: ProviderID, - api: ProviderApiInfo, - name: Schema.String, - family: optionalOmitUndefined(Schema.String), - capabilities: ProviderCapabilities, - cost: ProviderCost, - limit: ProviderLimit, - status: ModelStatus, - options: Schema.Record(Schema.String, Schema.Any), - headers: Schema.Record(Schema.String, Schema.String), - release_date: Schema.String, - variants: optionalOmitUndefined(Schema.Record(Schema.String, Schema.Record(Schema.String, Schema.Any))), -}).annotate({ identifier: "Model" }) -export type Model = Types.DeepMutable> - -export const Info = Schema.Struct({ - id: ProviderID, - name: Schema.String, - source: Schema.Literals(["env", "config", "custom", "api"]), - env: Schema.Array(Schema.String), - key: optionalOmitUndefined(Schema.String), - options: Schema.Record(Schema.String, Schema.Any), - models: Schema.Record(Schema.String, Model), -}).annotate({ identifier: "Provider" }) -export type Info = Types.DeepMutable> - -const DefaultModelIDs = Schema.Record(Schema.String, Schema.String) - -export const ListResult = Schema.Struct({ - all: Schema.Array(Info), - default: DefaultModelIDs, - connected: Schema.Array(Schema.String), -}) -export type ListResult = Types.DeepMutable> - -export const ConfigProvidersResult = Schema.Struct({ - providers: Schema.Array(Info), - default: DefaultModelIDs, -}) -export type ConfigProvidersResult = Types.DeepMutable> - -export function toPublicInfo(provider: Info): Info { - return JSON.parse( - JSON.stringify(provider, (_, value) => { - if (typeof value === "function" || typeof value === "symbol" || value === undefined) return undefined - if (typeof value === "bigint") return value.toString() - return value - }), - ) -} - -export function defaultModelIDs }>(providers: Record) { - return mapValues(providers, (item) => sort(Object.values(item.models))[0].id) -} - -export interface Interface { - readonly list: () => Effect.Effect> - readonly getProvider: (providerID: ProviderID) => Effect.Effect - readonly getModel: (providerID: ProviderID, modelID: ModelID) => Effect.Effect - readonly getLanguage: (model: Model) => Effect.Effect - readonly closest: ( - providerID: ProviderID, - query: string[], - ) => Effect.Effect<{ providerID: ProviderID; modelID: string } | undefined> - readonly getSmallModel: (providerID: ProviderID) => Effect.Effect - readonly defaultModel: () => Effect.Effect<{ providerID: ProviderID; modelID: ModelID }> -} - -interface State { - models: Map - providers: Record - sdk: Map - modelLoaders: Record - varsLoaders: Record -} - -export class Service extends Context.Service()("@opencode/Provider") {} - -function cost(c: ModelsDev.Model["cost"]): Model["cost"] { - const result: Model["cost"] = { - input: c?.input ?? 0, - output: c?.output ?? 0, - cache: { - read: c?.cache_read ?? 0, - write: c?.cache_write ?? 0, - }, - } - if (c?.tiers) { - result.tiers = c.tiers.map((item) => ({ - input: item.input, - output: item.output, - cache: { - read: item.cache_read ?? 0, - write: item.cache_write ?? 0, - }, - tier: item.tier, - })) - } - if (c?.context_over_200k) { - result.experimentalOver200K = { - cache: { - read: c.context_over_200k.cache_read ?? 0, - write: c.context_over_200k.cache_write ?? 0, - }, - input: c.context_over_200k.input, - output: c.context_over_200k.output, - } - } - return result -} - -function fromModelsDevModel(provider: ModelsDev.Provider, model: ModelsDev.Model): Model { - const base: Model = { - id: ModelID.make(model.id), - providerID: ProviderID.make(provider.id), - name: model.name, - family: model.family, - api: { - id: model.id, - url: model.provider?.api ?? provider.api ?? "", - npm: model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible", - }, - status: model.status ?? "active", - headers: {}, - options: {}, - cost: cost(model.cost), - limit: { - context: model.limit.context, - input: model.limit.input, - output: model.limit.output, - }, - capabilities: { - temperature: model.temperature ?? false, - reasoning: model.reasoning ?? false, - attachment: model.attachment ?? false, - toolcall: model.tool_call ?? true, - input: { - text: model.modalities?.input?.includes("text") ?? false, - audio: model.modalities?.input?.includes("audio") ?? false, - image: model.modalities?.input?.includes("image") ?? false, - video: model.modalities?.input?.includes("video") ?? false, - pdf: model.modalities?.input?.includes("pdf") ?? false, - }, - output: { - text: model.modalities?.output?.includes("text") ?? false, - audio: model.modalities?.output?.includes("audio") ?? false, - image: model.modalities?.output?.includes("image") ?? false, - video: model.modalities?.output?.includes("video") ?? false, - pdf: model.modalities?.output?.includes("pdf") ?? false, - }, - interleaved: model.interleaved ?? false, - }, - release_date: model.release_date ?? "", - variants: {}, - } - - return { - ...base, - variants: mapValues(ProviderTransform.variants(base), (v) => v), - } -} - -export function fromModelsDevProvider(provider: ModelsDev.Provider): Info { - const models: Record = {} - for (const [key, model] of Object.entries(provider.models)) { - models[key] = fromModelsDevModel(provider, model) - for (const [mode, opts] of Object.entries(model.experimental?.modes ?? {})) { - const id = `${model.id}-${mode}` - const base = fromModelsDevModel(provider, model) - models[id] = { - ...base, - id: ModelID.make(id), - name: `${model.name} ${mode[0].toUpperCase()}${mode.slice(1)}`, - cost: opts.cost ? mergeDeep(base.cost, cost(opts.cost)) : base.cost, - options: opts.provider?.body - ? Object.fromEntries( - Object.entries(opts.provider.body).map(([k, v]) => [ - k.replace(/_([a-z])/g, (_, c) => c.toUpperCase()), - v, - ]), - ) - : base.options, - headers: opts.provider?.headers ?? base.headers, - } - } - } - return { - id: ProviderID.make(provider.id), - source: "custom", - name: provider.name, - env: [...(provider.env ?? [])], - options: {}, - models, - } -} - -const layer: Layer.Layer< - Service, - never, - Config.Service | Auth.Service | Plugin.Service | AppFileSystem.Service | Env.Service | ModelsDev.Service -> = Layer.effect( - Service, - Effect.gen(function* () { - const fs = yield* AppFileSystem.Service - const config = yield* Config.Service - const auth = yield* Auth.Service - const env = yield* Env.Service - const plugin = yield* Plugin.Service - const modelsDevSvc = yield* ModelsDev.Service - - const state = yield* InstanceState.make(() => - Effect.gen(function* () { - using _ = log.time("state") - const bridge = yield* EffectBridge.make() - const cfg = yield* config.get() - const modelsDev = yield* modelsDevSvc.get() - const database = mapValues(modelsDev, fromModelsDevProvider) - - const providers: Record = {} as Record - const languages = new Map() - const modelLoaders: { - [providerID: string]: CustomModelLoader - } = {} - const varsLoaders: { - [providerID: string]: CustomVarsLoader - } = {} - const sdk = new Map() - const discoveryLoaders: { - [providerID: string]: CustomDiscoverModels - } = {} - const dep = { - auth: (id: string) => auth.get(id).pipe(Effect.orDie), - config: () => config.get(), - env: () => env.all(), - get: (key: string) => env.get(key), - } - - log.info("init") - - function mergeProvider(providerID: ProviderID, provider: Partial) { - const existing = providers[providerID] - if (existing) { - // @ts-expect-error - providers[providerID] = mergeDeep(existing, provider) - return - } - const match = database[providerID] - if (!match) return - // @ts-expect-error - providers[providerID] = mergeDeep(match, provider) - } - - // load plugins first so config() hook runs before reading cfg.provider - const plugins = yield* plugin.list() - - // now read config providers - includes any modifications from plugin config() hook - const configProviders = Object.entries(cfg.provider ?? {}) - const disabled = new Set(cfg.disabled_providers ?? []) - const enabled = cfg.enabled_providers ? new Set(cfg.enabled_providers) : null - - function isProviderAllowed(providerID: ProviderID): boolean { - if (enabled && !enabled.has(providerID)) return false - if (disabled.has(providerID)) return false - return true - } - - for (const hook of plugins) { - const p = hook.provider - const models = p?.models - if (!p || !models) continue - - const providerID = ProviderID.make(p.id) - if (disabled.has(providerID)) continue - - const provider = database[providerID] - if (!provider) continue - const pluginAuth = yield* auth.get(providerID).pipe(Effect.orDie) - - provider.models = yield* Effect.promise(async () => { - const next = await models(toPublicInfo(provider), { auth: pluginAuth }) - return Object.fromEntries( - Object.entries(next).map(([id, model]) => [ - id, - { - ...model, - id: ModelID.make(id), - providerID, - }, - ]), - ) - }) - } - - // extend database from config - for (const [providerID, provider] of configProviders) { - const existing = database[providerID] - const parsed: Info = { - id: ProviderID.make(providerID), - name: provider.name ?? existing?.name ?? providerID, - env: provider.env ?? existing?.env ?? [], - options: mergeDeep(existing?.options ?? {}, provider.options ?? {}), - source: "config", - models: existing?.models ?? {}, - } - - for (const [modelID, model] of Object.entries(provider.models ?? {})) { - const existingModel = parsed.models[model.id ?? modelID] - const apiID = model.id ?? existingModel?.api.id ?? modelID - const apiNpm = - model.provider?.npm ?? - provider.npm ?? - existingModel?.api.npm ?? - modelsDev[providerID]?.npm ?? - "@ai-sdk/openai-compatible" - const name = iife(() => { - if (model.name) return model.name - if (model.id && model.id !== modelID) return modelID - return existingModel?.name ?? modelID - }) - const parsedModel: Model = { - id: ModelID.make(modelID), - api: { - id: apiID, - npm: apiNpm, - url: model.provider?.api ?? provider?.api ?? existingModel?.api.url ?? modelsDev[providerID]?.api ?? "", - }, - status: model.status ?? existingModel?.status ?? "active", - name, - providerID: ProviderID.make(providerID), - capabilities: { - temperature: model.temperature ?? existingModel?.capabilities.temperature ?? false, - reasoning: model.reasoning ?? existingModel?.capabilities.reasoning ?? false, - attachment: model.attachment ?? existingModel?.capabilities.attachment ?? false, - toolcall: model.tool_call ?? existingModel?.capabilities.toolcall ?? true, - input: { - text: model.modalities?.input?.includes("text") ?? existingModel?.capabilities.input.text ?? true, - audio: model.modalities?.input?.includes("audio") ?? existingModel?.capabilities.input.audio ?? false, - image: model.modalities?.input?.includes("image") ?? existingModel?.capabilities.input.image ?? false, - video: model.modalities?.input?.includes("video") ?? existingModel?.capabilities.input.video ?? false, - pdf: model.modalities?.input?.includes("pdf") ?? existingModel?.capabilities.input.pdf ?? false, - }, - output: { - text: model.modalities?.output?.includes("text") ?? existingModel?.capabilities.output.text ?? true, - audio: - model.modalities?.output?.includes("audio") ?? existingModel?.capabilities.output.audio ?? false, - image: - model.modalities?.output?.includes("image") ?? existingModel?.capabilities.output.image ?? false, - video: - model.modalities?.output?.includes("video") ?? existingModel?.capabilities.output.video ?? false, - pdf: model.modalities?.output?.includes("pdf") ?? existingModel?.capabilities.output.pdf ?? false, - }, - interleaved: - model.interleaved ?? - existingModel?.capabilities.interleaved ?? - (!existingModel && apiNpm === "@ai-sdk/openai-compatible" && apiID.includes("deepseek") - ? { field: "reasoning_content" } - : false), - }, - cost: { - input: model?.cost?.input ?? existingModel?.cost?.input ?? 0, - output: model?.cost?.output ?? existingModel?.cost?.output ?? 0, - cache: { - read: model?.cost?.cache_read ?? existingModel?.cost?.cache.read ?? 0, - write: model?.cost?.cache_write ?? existingModel?.cost?.cache.write ?? 0, - }, - }, - options: mergeDeep(existingModel?.options ?? {}, model.options ?? {}), - limit: { - context: model.limit?.context ?? existingModel?.limit?.context ?? 0, - input: model.limit?.input ?? existingModel?.limit?.input, - output: model.limit?.output ?? existingModel?.limit?.output ?? 0, - }, - headers: mergeDeep(existingModel?.headers ?? {}, model.headers ?? {}), - family: model.family ?? existingModel?.family ?? "", - release_date: model.release_date ?? existingModel?.release_date ?? "", - variants: {}, - } - const merged = mergeDeep(ProviderTransform.variants(parsedModel), model.variants ?? {}) - parsedModel.variants = mapValues( - pickBy(merged, (v) => !v.disabled), - (v) => omit(v, ["disabled"]), - ) - parsed.models[modelID] = parsedModel - } - database[providerID] = parsed - } - - // load env - const envs = yield* env.all() - for (const [id, provider] of Object.entries(database)) { - const providerID = ProviderID.make(id) - if (disabled.has(providerID)) continue - const apiKey = provider.env.map((item) => envs[item]).find(Boolean) - if (!apiKey) continue - mergeProvider(providerID, { - source: "env", - key: provider.env.length === 1 ? apiKey : undefined, - }) - } - - // load apikeys - const auths = yield* auth.all().pipe(Effect.orDie) - for (const [id, provider] of Object.entries(auths)) { - const providerID = ProviderID.make(id) - if (disabled.has(providerID)) continue - if (provider.type === "api") { - mergeProvider(providerID, { - source: "api", - key: provider.key, - }) - } - } - - // plugin auth loader - database now has entries for config providers - for (const plugin of plugins) { - if (!plugin.auth) continue - const providerID = ProviderID.make(plugin.auth.provider) - if (disabled.has(providerID)) continue - - const stored = yield* auth.get(providerID).pipe(Effect.orDie) - if (!stored) continue - if (!plugin.auth.loader) continue - - const options = yield* Effect.promise(() => - plugin.auth!.loader!( - () => bridge.promise(auth.get(providerID).pipe(Effect.orDie)) as any, - toPublicInfo(database[plugin.auth!.provider]), - ), - ) - const opts = options ?? {} - const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts } - mergeProvider(providerID, patch) - } - - for (const [id, fn] of Object.entries(custom(dep))) { - const providerID = ProviderID.make(id) - if (disabled.has(providerID)) continue - const data = database[providerID] - if (!data) { - log.error("Provider does not exist in model list " + providerID) - continue - } - const result = yield* fn(data) - if (result && (result.autoload || providers[providerID])) { - if (result.getModel) modelLoaders[providerID] = result.getModel - if (result.vars) varsLoaders[providerID] = result.vars - if (result.discoverModels) discoveryLoaders[providerID] = result.discoverModels - const opts = result.options ?? {} - const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts } - mergeProvider(providerID, patch) - } - } - - // load config - re-apply with updated data - for (const [id, provider] of configProviders) { - const providerID = ProviderID.make(id) - const partial: Partial = { source: "config" } - if (provider.env) partial.env = provider.env - if (provider.name) partial.name = provider.name - if (provider.options) partial.options = provider.options - mergeProvider(providerID, partial) - } - - const gitlab = ProviderID.make("gitlab") - if (discoveryLoaders[gitlab] && providers[gitlab] && isProviderAllowed(gitlab)) { - yield* Effect.promise(async () => { - try { - const discovered = await discoveryLoaders[gitlab]() - for (const [modelID, model] of Object.entries(discovered)) { - if (!providers[gitlab].models[modelID]) { - providers[gitlab].models[modelID] = model - } - } - } catch (e) { - log.warn("state discovery error", { id: "gitlab", error: e }) - } - }) - } - - for (const [id, provider] of Object.entries(providers)) { - const providerID = ProviderID.make(id) - if (!isProviderAllowed(providerID)) { - delete providers[providerID] - continue - } - - const configProvider = cfg.provider?.[providerID] - - for (const [modelID, model] of Object.entries(provider.models)) { - model.api.id = model.api.id ?? model.id ?? modelID - if ( - modelID === "gpt-5-chat-latest" || - (providerID === ProviderID.openrouter && modelID === "openai/gpt-5-chat") - ) - delete provider.models[modelID] - if (model.status === "alpha" && !Flag.OPENCODE_ENABLE_EXPERIMENTAL_MODELS) delete provider.models[modelID] - if (model.status === "deprecated") delete provider.models[modelID] - if ( - (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) || - (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) - ) - delete provider.models[modelID] - - if (!model.variants || Object.keys(model.variants).length === 0) { - model.variants = mapValues(ProviderTransform.variants(model), (v) => v) - } - - const configVariants = configProvider?.models?.[modelID]?.variants - if (configVariants && model.variants) { - const merged = mergeDeep(model.variants, configVariants) - model.variants = mapValues( - pickBy(merged, (v) => !v.disabled), - (v) => omit(v, ["disabled"]), - ) - } - } - - if (Object.keys(provider.models).length === 0) { - delete providers[providerID] - continue - } - - log.info("found", { providerID }) - } - - return { - models: languages, - providers, - sdk, - modelLoaders, - varsLoaders, - } - }), - ) - - const list = Effect.fn("Provider.list")(() => InstanceState.use(state, (s) => s.providers)) - - async function resolveSDK(model: Model, s: State, envs: Record) { - try { - using _ = log.time("getSDK", { - providerID: model.providerID, - }) - const provider = s.providers[model.providerID] - const options = { ...provider.options } - - if (model.providerID === "google-vertex" && !model.api.npm.includes("@ai-sdk/openai-compatible")) { - delete options.fetch - } - - if (model.api.npm.includes("@ai-sdk/openai-compatible") && options["includeUsage"] !== false) { - options["includeUsage"] = true - } - - const baseURL = iife(() => { - let url = - typeof options["baseURL"] === "string" && options["baseURL"] !== "" ? options["baseURL"] : model.api.url - if (!url) return - - const loader = s.varsLoaders[model.providerID] - if (loader) { - const vars = loader(options) - for (const [key, value] of Object.entries(vars)) { - const field = "${" + key + "}" - url = url.replaceAll(field, value) - } - } - - url = url.replace(/\$\{([^}]+)\}/g, (item, key) => { - const val = envs[String(key)] - return val ?? item - }) - return url - }) - - if (baseURL !== undefined) options["baseURL"] = baseURL - if (options["apiKey"] === undefined && provider.key) options["apiKey"] = provider.key - if (model.headers) - options["headers"] = { - ...options["headers"], - ...model.headers, - } - - const key = Hash.fast( - JSON.stringify({ - providerID: model.providerID, - npm: model.api.npm, - options, - }), - ) - const existing = s.sdk.get(key) - if (existing) return existing - - const customFetch = options["fetch"] - const chunkTimeout = options["chunkTimeout"] - delete options["chunkTimeout"] - - options["fetch"] = async (input: any, init?: BunFetchRequestInit) => { - const fetchFn = customFetch ?? fetch - const opts = init ?? {} - const chunkAbortCtl = typeof chunkTimeout === "number" && chunkTimeout > 0 ? new AbortController() : undefined - const signals: AbortSignal[] = [] - - if (opts.signal) signals.push(opts.signal) - if (chunkAbortCtl) signals.push(chunkAbortCtl.signal) - if (options["timeout"] !== undefined && options["timeout"] !== null && options["timeout"] !== false) - signals.push(AbortSignal.timeout(options["timeout"])) - - const combined = signals.length === 0 ? null : signals.length === 1 ? signals[0] : AbortSignal.any(signals) - if (combined) opts.signal = combined - - // Strip openai itemId metadata following what codex does - if ( - (model.api.npm === "@ai-sdk/openai" || model.api.npm === "@ai-sdk/azure") && - opts.body && - opts.method === "POST" - ) { - const body = JSON.parse(opts.body as string) - const keepIds = body.store === true - if (!keepIds && Array.isArray(body.input)) { - for (const item of body.input) { - if ("id" in item) { - delete item.id - } - } - opts.body = JSON.stringify(body) - } - } - - const res = await fetchFn(input, { - ...opts, - // @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682 - timeout: false, - }) - - if (!chunkAbortCtl) return res - return wrapSSE(res, chunkTimeout, chunkAbortCtl) - } - - const bundledLoader = BUNDLED_PROVIDERS[model.api.npm] - if (bundledLoader) { - log.info("using bundled provider", { - providerID: model.providerID, - pkg: model.api.npm, - }) - const factory = await bundledLoader() - const loaded = factory({ - name: model.providerID, - ...options, - }) - s.sdk.set(key, loaded) - return loaded as SDK - } - - let installedPath: string - if (!model.api.npm.startsWith("file://")) { - const item = await Npm.add(model.api.npm) - if (!item.entrypoint) throw new Error(`Package ${model.api.npm} has no import entrypoint`) - installedPath = item.entrypoint - } else { - log.info("loading local provider", { pkg: model.api.npm }) - installedPath = model.api.npm - } - - // `installedPath` is a local entry path or an existing `file://` URL. Normalize - // only path inputs so Node on Windows accepts the dynamic import. - const importSpec = installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href - const mod = await import(importSpec) - - const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] - const loaded = fn({ - name: model.providerID, - ...options, - }) - s.sdk.set(key, loaded) - return loaded as SDK - } catch (e) { - throw new InitError({ providerID: model.providerID }, { cause: e }) - } - } - - const getProvider = Effect.fn("Provider.getProvider")((providerID: ProviderID) => - InstanceState.use(state, (s) => s.providers[providerID]), - ) - - const getModel = Effect.fn("Provider.getModel")(function* (providerID: ProviderID, modelID: ModelID) { - const s = yield* InstanceState.get(state) - const provider = s.providers[providerID] - if (!provider) { - const available = Object.keys(s.providers) - const matches = fuzzysort.go(providerID, available, { limit: 3, threshold: -10000 }) - throw new ModelNotFoundError({ providerID, modelID, suggestions: matches.map((m) => m.target) }) - } - - const info = provider.models[modelID] - if (!info) { - const available = Object.keys(provider.models) - const matches = fuzzysort.go(modelID, available, { limit: 3, threshold: -10000 }) - throw new ModelNotFoundError({ providerID, modelID, suggestions: matches.map((m) => m.target) }) - } - return info - }) - - const getLanguage = Effect.fn("Provider.getLanguage")(function* (model: Model) { - const s = yield* InstanceState.get(state) - const envs = yield* env.all() - const key = `${model.providerID}/${model.id}` - if (s.models.has(key)) return s.models.get(key)! - - return yield* Effect.promise(async () => { - const provider = s.providers[model.providerID] - const sdk = await resolveSDK(model, s, envs) - - try { - const language = s.modelLoaders[model.providerID] - ? await s.modelLoaders[model.providerID](sdk, model.api.id, { - ...provider.options, - ...model.options, - }) - : sdk.languageModel(model.api.id) - s.models.set(key, language) - return language - } catch (e) { - if (e instanceof NoSuchModelError) - throw new ModelNotFoundError( - { - modelID: model.id, - providerID: model.providerID, - }, - { cause: e }, - ) - throw e - } - }) - }) - - const closest = Effect.fn("Provider.closest")(function* (providerID: ProviderID, query: string[]) { - const s = yield* InstanceState.get(state) - const provider = s.providers[providerID] - if (!provider) return undefined - for (const item of query) { - for (const modelID of Object.keys(provider.models)) { - if (modelID.includes(item)) return { providerID, modelID } - } - } - return undefined - }) - - const getSmallModel = Effect.fn("Provider.getSmallModel")(function* (providerID: ProviderID) { - const cfg = yield* config.get() - - if (cfg.small_model) { - const parsed = parseModel(cfg.small_model) - return yield* getModel(parsed.providerID, parsed.modelID) - } - - const s = yield* InstanceState.get(state) - const provider = s.providers[providerID] - if (!provider) return undefined - - let priority = [ - "claude-haiku-4-5", - "claude-haiku-4.5", - "3-5-haiku", - "3.5-haiku", - "gemini-3-flash", - "gemini-2.5-flash", - "gpt-5-nano", - ] - if (providerID.startsWith("opencode")) { - priority = ["gpt-5-nano"] - } - if (providerID.startsWith("github-copilot")) { - priority = ["gpt-5-mini", "claude-haiku-4.5", ...priority] - } - for (const item of priority) { - if (providerID === ProviderID.amazonBedrock) { - const crossRegionPrefixes = ["global.", "us.", "eu."] - const candidates = Object.keys(provider.models).filter((m) => m.includes(item)) - - const globalMatch = candidates.find((m) => m.startsWith("global.")) - if (globalMatch) return yield* getModel(providerID, ModelID.make(globalMatch)) - - const region = provider.options?.region - if (region) { - const regionPrefix = region.split("-")[0] - if (regionPrefix === "us" || regionPrefix === "eu") { - const regionalMatch = candidates.find((m) => m.startsWith(`${regionPrefix}.`)) - if (regionalMatch) return yield* getModel(providerID, ModelID.make(regionalMatch)) - } - } - - const unprefixed = candidates.find((m) => !crossRegionPrefixes.some((p) => m.startsWith(p))) - if (unprefixed) return yield* getModel(providerID, ModelID.make(unprefixed)) - } else { - for (const model of Object.keys(provider.models)) { - if (model.includes(item)) return yield* getModel(providerID, ModelID.make(model)) - } - } - } - - return undefined - }) - - const defaultModel = Effect.fn("Provider.defaultModel")(function* () { - const cfg = yield* config.get() - if (cfg.model) return parseModel(cfg.model) - - const s = yield* InstanceState.get(state) - const recent = yield* fs.readJson(path.join(Global.Path.state, "model.json")).pipe( - Effect.map((x): { providerID: ProviderID; modelID: ModelID }[] => { - if (!isRecord(x) || !Array.isArray(x.recent)) return [] - return x.recent.flatMap((item) => { - if (!isRecord(item)) return [] - if (typeof item.providerID !== "string") return [] - if (typeof item.modelID !== "string") return [] - return [{ providerID: ProviderID.make(item.providerID), modelID: ModelID.make(item.modelID) }] - }) - }), - Effect.catch(() => Effect.succeed([] as { providerID: ProviderID; modelID: ModelID }[])), - ) - for (const entry of recent) { - const provider = s.providers[entry.providerID] - if (!provider) continue - if (!provider.models[entry.modelID]) continue - return { providerID: entry.providerID, modelID: entry.modelID } - } - - const provider = Object.values(s.providers).find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id)) - if (!provider) throw new Error("no providers found") - const [model] = sort(Object.values(provider.models)) - if (!model) throw new Error("no models found") - return { - providerID: provider.id, - modelID: model.id, - } - }) - - return Service.of({ list, getProvider, getModel, getLanguage, closest, getSmallModel, defaultModel }) - }), -) - -export const defaultLayer = Layer.suspend(() => - layer.pipe( - Layer.provide(AppFileSystem.defaultLayer), - Layer.provide(Env.defaultLayer), - Layer.provide(Config.defaultLayer), - Layer.provide(Auth.defaultLayer), - Layer.provide(Plugin.defaultLayer), - Layer.provide(ModelsDev.defaultLayer), - ), -) - -const priority = ["gpt-5", "claude-sonnet-4", "big-pickle", "gemini-3-pro"] -export function sort(models: T[]) { - return sortBy( - models, - [(model) => priority.findIndex((filter) => model.id.includes(filter)), "desc"], - [(model) => (model.id.includes("latest") ? 0 : 1), "asc"], - [(model) => model.id, "desc"], - ) -} - -export function parseModel(model: string) { - const [providerID, ...rest] = model.split("/") - return { - providerID: ProviderID.make(providerID), - modelID: ModelID.make(rest.join("/")), - } -} - -export const ModelNotFoundError = NamedError.create("ProviderModelNotFoundError", { - providerID: ProviderID, - modelID: ModelID, - suggestions: Schema.optional(Schema.Array(Schema.String)), -}) - -export const InitError = NamedError.create("ProviderInitError", { - providerID: ProviderID, -}) - -export * as Provider from "./provider" diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts deleted file mode 100644 index f3c160fe73c2..000000000000 --- a/packages/opencode/src/session/compaction.ts +++ /dev/null @@ -1,652 +0,0 @@ -import { BusEvent } from "@/bus/bus-event" -import { Bus } from "@/bus" -import * as Session from "./session" -import { SessionID, MessageID, PartID } from "./schema" -import { Provider } from "@/provider/provider" -import { MessageV2 } from "./message-v2" -import { Token } from "@/util/token" -import * as Log from "@opencode-ai/core/util/log" -import { SessionProcessor } from "./processor" -import { Agent } from "@/agent/agent" -import { Plugin } from "@/plugin" -import { Config } from "@/config/config" -import { NotFoundError } from "@/storage/storage" -import { ModelID, ProviderID } from "@/provider/schema" -import { Effect, Layer, Context, Schema } from "effect" -import * as DateTime from "effect/DateTime" -import { InstanceState } from "@/effect/instance-state" -import { isOverflow as overflow, usable } from "./overflow" -import { makeRuntime } from "@/effect/run-service" -import { serviceUse } from "@/effect/service-use" -import { SyncEvent } from "@/sync" -import { SessionEvent } from "@/v2/session-event" -import { Flag } from "@opencode-ai/core/flag/flag" - -const log = Log.create({ service: "session.compaction" }) - -export const Event = { - Compacted: BusEvent.define( - "session.compacted", - Schema.Struct({ - sessionID: SessionID, - }), - ), -} - -export const PRUNE_MINIMUM = 20_000 -export const PRUNE_PROTECT = 40_000 -const TOOL_OUTPUT_MAX_CHARS = 2_000 -const PRUNE_PROTECTED_TOOLS = ["skill"] -const DEFAULT_TAIL_TURNS = 2 -const MIN_PRESERVE_RECENT_TOKENS = 2_000 -const MAX_PRESERVE_RECENT_TOKENS = 8_000 -const SUMMARY_TEMPLATE = `Output exactly the Markdown structure shown inside