From 9cab54e94cee87036d231f5fa3346594f7155676 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sun, 15 Mar 2026 01:25:37 -0700 Subject: [PATCH 1/6] feat: add prompt enhancement feature Add AI-powered prompt enhancement that rewrites rough user prompts into clearer, more specific versions before sending to the main model. - Add `enhancePrompt()` utility using a small/cheap model to polish prompts - Register `prompt.enhance` TUI command with `i` keybind - Show "enhance" hint in the bottom bar alongside agents/commands - Add `prompt_enhance` keybind to config schema - Add unit tests for the `clean()` text sanitization function Inspired by KiloCode's prompt enhancement feature. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../opencode/src/altimate/enhance-prompt.ts | 95 +++++++++++++++++++ .../cli/cmd/tui/component/prompt/index.tsx | 52 ++++++++++ packages/opencode/src/config/config.ts | 3 + .../test/altimate/enhance-prompt.test.ts | 41 ++++++++ 4 files changed, 191 insertions(+) create mode 100644 packages/opencode/src/altimate/enhance-prompt.ts create mode 100644 packages/opencode/test/altimate/enhance-prompt.test.ts diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts new file mode 100644 index 0000000000..24fd43d702 --- /dev/null +++ b/packages/opencode/src/altimate/enhance-prompt.ts @@ -0,0 +1,95 @@ +// altimate_change - new file +import { Provider } from "@/provider/provider" +import { LLM } from "@/session/llm" +import { Agent } from "@/agent/agent" +import { Log } from "@/util/log" +import { MessageV2 } from "@/session/message-v2" + +const log = Log.create({ service: "enhance-prompt" }) + +const ENHANCE_SYSTEM_PROMPT = `You are a prompt enhancement specialist for a data engineering coding agent. + +Your job is to take a user's rough prompt and rewrite it into a clearer, more specific version that will produce better results from the coding agent. + +Rules: +- Reply with ONLY the enhanced prompt text — no conversation, explanations, lead-in, bullet points, placeholders, or surrounding quotes +- Preserve the user's intent exactly — do not add requirements they didn't ask for +- Make implicit requirements explicit (e.g. if they say "fix the bug", specify what kind of verification to do) +- Add structure when the prompt is vague (e.g. "look at X first, then modify Y") +- Keep the enhanced prompt concise — longer is not better +- If the original prompt is already clear and specific, return it unchanged +- Do not wrap your response in markdown code fences or quotes` + +export function clean(text: string) { + return text + .replace(/^```\w*\n?|```$/g, "") + .trim() + .replace(/^(['"])([\s\S]*)\1$/, "$2") + .trim() +} + +export async function enhancePrompt(text: string): Promise { + if (!text.trim()) return text + + log.info("enhancing", { length: text.length }) + + const defaultModel = await Provider.defaultModel() + const model = + (await Provider.getSmallModel(defaultModel.providerID)) ?? + (await Provider.getModel(defaultModel.providerID, defaultModel.modelID)) + + const agent: Agent.Info = { + name: "enhance-prompt", + mode: "primary", + hidden: true, + options: {}, + permission: [], + prompt: ENHANCE_SYSTEM_PROMPT, + temperature: 0.7, + } + + const user: MessageV2.User = { + id: "enhance-prompt" as any, + sessionID: "enhance-prompt" as any, + role: "user", + time: { created: Date.now() }, + agent: "enhance-prompt", + model: { + providerID: model.providerID, + modelID: model.id, + }, + } + + const stream = await LLM.stream({ + agent, + user, + system: [], + small: true, + tools: {}, + model, + abort: new AbortController().signal, + sessionID: "enhance-prompt" as any, + retries: 2, + messages: [ + { + role: "user", + content: text, + }, + ], + }) + + const result = await stream.text.catch((err) => { + log.error("failed to enhance prompt", { error: err }) + return undefined + }) + + if (!result) return text + + const cleaned = clean( + result + .replace(/[\s\S]*?<\/think>\s*/g, "") + .trim(), + ) + + return cleaned || text +} diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index c85426cc24..4c11538997 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -34,6 +34,9 @@ import { useToast } from "../../ui/toast" import { useKV } from "../../context/kv" import { useTextareaKeybindings } from "../textarea-keybindings" import { DialogSkill } from "../dialog-skill" +// altimate_change start - import prompt enhancement +import { enhancePrompt } from "@/altimate/enhance-prompt" +// altimate_change end export type PromptProps = { sessionID?: string @@ -194,6 +197,50 @@ export function Prompt(props: PromptProps) { dialog.clear() }, }, + // altimate_change start - add prompt enhance command + { + title: "Enhance prompt", + value: "prompt.enhance", + keybind: "prompt_enhance", + category: "Prompt", + enabled: !!store.prompt.input, + onSelect: async (dialog) => { + if (!store.prompt.input.trim()) return + dialog.clear() + const original = store.prompt.input + toast.show({ + message: "Enhancing prompt...", + variant: "info", + duration: 2000, + }) + try { + const enhanced = await enhancePrompt(original) + if (enhanced !== original) { + input.setText(enhanced) + setStore("prompt", "input", enhanced) + input.gotoBufferEnd() + toast.show({ + message: "Prompt enhanced", + variant: "success", + duration: 2000, + }) + } else { + toast.show({ + message: "Prompt already looks good", + variant: "info", + duration: 2000, + }) + } + } catch { + toast.show({ + message: "Failed to enhance prompt", + variant: "error", + duration: 3000, + }) + } + }, + }, + // altimate_change end { title: "Paste", value: "prompt.paste", @@ -1155,6 +1202,11 @@ export function Prompt(props: PromptProps) { {keybind.print("command_list")} commands + {/* altimate_change start - show enhance hint */} + + {keybind.print("prompt_enhance")} enhance + + {/* altimate_change end */} diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 1ab39ad18a..d58592f2dd 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -866,6 +866,9 @@ export namespace Config { agent_cycle: z.string().optional().default("tab").describe("Next agent"), agent_cycle_reverse: z.string().optional().default("shift+tab").describe("Previous agent"), variant_cycle: z.string().optional().default("ctrl+t").describe("Cycle model variants"), + // altimate_change start - add prompt enhance keybind + prompt_enhance: z.string().optional().default("i").describe("Enhance prompt with AI before sending"), + // altimate_change end input_clear: z.string().optional().default("ctrl+c").describe("Clear input field"), input_paste: z.string().optional().default("ctrl+v").describe("Paste from clipboard"), input_submit: z.string().optional().default("return").describe("Submit input"), diff --git a/packages/opencode/test/altimate/enhance-prompt.test.ts b/packages/opencode/test/altimate/enhance-prompt.test.ts new file mode 100644 index 0000000000..53142adb24 --- /dev/null +++ b/packages/opencode/test/altimate/enhance-prompt.test.ts @@ -0,0 +1,41 @@ +import { describe, expect, test } from "bun:test" +import { clean } from "../../src/altimate/enhance-prompt" + +describe("enhance-prompt clean()", () => { + test("strips markdown code fences", () => { + expect(clean("```\nfixed prompt\n```")).toBe("fixed prompt") + }) + + test("strips code fences with language tag", () => { + expect(clean("```text\nenhanced prompt\n```")).toBe("enhanced prompt") + }) + + test("strips surrounding single quotes", () => { + expect(clean("'enhanced prompt'")).toBe("enhanced prompt") + }) + + test("strips surrounding double quotes", () => { + expect(clean('"enhanced prompt"')).toBe("enhanced prompt") + }) + + test("trims whitespace", () => { + expect(clean(" enhanced prompt ")).toBe("enhanced prompt") + }) + + test("handles combined wrapping", () => { + expect(clean('```\n"enhanced prompt"\n```')).toBe("enhanced prompt") + }) + + test("returns plain text unchanged", () => { + expect(clean("fix the auth bug")).toBe("fix the auth bug") + }) + + test("handles empty string", () => { + expect(clean("")).toBe("") + }) + + test("handles multiline content", () => { + const input = "```\nFirst do X.\nThen do Y.\n```" + expect(clean(input)).toBe("First do X.\nThen do Y.") + }) +}) From 1ddc4f5f82ab7a404774162c7992161c8e1f25c5 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sun, 15 Mar 2026 01:35:47 -0700 Subject: [PATCH 2/6] feat: improve enhancement prompt with research-backed approach and add auto-enhance config - Rewrite system prompt based on AutoPrompter research (5 missing info categories: specifics, action plan, scope, verification, intent) - Add few-shot examples for data engineering tasks (dbt, SQL, migrations) - Add `experimental.auto_enhance_prompt` config flag (default: false) - Auto-enhance normal prompts on submit when enabled (skips shell/slash) - Export `isAutoEnhanceEnabled()` for config-driven behavior Co-Authored-By: Claude Opus 4.6 (1M context) --- .../opencode/src/altimate/enhance-prompt.ts | 58 +++++++++++++++---- .../cli/cmd/tui/component/prompt/index.tsx | 16 ++++- packages/opencode/src/config/config.ts | 8 +++ 3 files changed, 71 insertions(+), 11 deletions(-) diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts index 24fd43d702..03b4b8fb69 100644 --- a/packages/opencode/src/altimate/enhance-prompt.ts +++ b/packages/opencode/src/altimate/enhance-prompt.ts @@ -2,23 +2,52 @@ import { Provider } from "@/provider/provider" import { LLM } from "@/session/llm" import { Agent } from "@/agent/agent" +import { Config } from "@/config/config" import { Log } from "@/util/log" import { MessageV2 } from "@/session/message-v2" const log = Log.create({ service: "enhance-prompt" }) -const ENHANCE_SYSTEM_PROMPT = `You are a prompt enhancement specialist for a data engineering coding agent. +// Research-backed enhancement prompt based on: +// - AutoPrompter (arxiv 2504.20196): 5 missing info categories that cause 27% lower edit correctness +// - Meta-prompting best practices: clear role, structural scaffolding, few-shot examples +// - KiloCode's enhance-prompt implementation: lightweight model, preserve intent, no wrapping +const ENHANCE_SYSTEM_PROMPT = `You are a prompt rewriter for a data engineering coding agent. The agent can read/write files, run SQL, manage dbt models, inspect schemas, and execute shell commands. -Your job is to take a user's rough prompt and rewrite it into a clearer, more specific version that will produce better results from the coding agent. +Your task: rewrite the user's rough prompt into a clearer version that will produce better results. Reply with ONLY the enhanced prompt — no explanations, no wrapping in quotes or code fences. -Rules: -- Reply with ONLY the enhanced prompt text — no conversation, explanations, lead-in, bullet points, placeholders, or surrounding quotes -- Preserve the user's intent exactly — do not add requirements they didn't ask for -- Make implicit requirements explicit (e.g. if they say "fix the bug", specify what kind of verification to do) -- Add structure when the prompt is vague (e.g. "look at X first, then modify Y") -- Keep the enhanced prompt concise — longer is not better -- If the original prompt is already clear and specific, return it unchanged -- Do not wrap your response in markdown code fences or quotes` +## What to improve + +Research shows developer prompts commonly lack these five categories of information. Add them when missing: + +1. **Specifics** — Add concrete details the agent needs: table names, column names, file paths, SQL dialects, error messages. If the user references "the model" or "the table", keep the reference but clarify what the agent should look for. +2. **Action plan** — When the prompt is vague ("fix this"), add explicit steps: investigate first, then modify, then verify. Structure as a logical sequence. +3. **Scope** — Clarify what files, models, or queries are in scope. If ambiguous, instruct the agent to identify the scope first. +4. **Verification** — Add a verification step when the user implies correctness matters (fixes, migrations, refactors). E.g. "run the query to confirm results" or "run dbt test after changes". +5. **Intent clarification** — When the request could be interpreted multiple ways, pick the most likely interpretation and make it explicit. + +## Rules + +- Preserve the user's intent exactly — never add requirements they didn't ask for +- Keep it concise — a good enhancement adds 1-3 sentences, not paragraphs +- If the prompt is already clear and specific, return it unchanged +- Write in the same tone/style as the user (casual stays casual, technical stays technical) +- Never add generic filler like "please ensure best practices" or "follow coding standards" +- Do not mention yourself or the enhancement process + +## Examples + +User: "fix the failing test" +Enhanced: "Investigate the failing test — run the test suite first to identify which test is failing and why, then examine the relevant source code, apply a fix, and re-run the test to confirm it passes." + +User: "add a created_at column to the users model" +Enhanced: "Add a created_at timestamp column to the users dbt model. Update the SQL definition and the schema.yml entry. Use the appropriate timestamp type for the target warehouse." + +User: "why is this query slow" +Enhanced: "Analyze why the query is slow. Run EXPLAIN/query profile to identify bottlenecks (full table scans, missing indexes, expensive joins). Suggest specific optimizations based on the findings." + +User: "migrate this from snowflake to bigquery" +Enhanced: "Migrate the SQL from Snowflake dialect to BigQuery dialect. Convert Snowflake-specific functions (e.g. DATEADD, IFF, QUALIFY) to BigQuery equivalents. Preserve the query logic and verify the translated query is syntactically valid."` export function clean(text: string) { return text @@ -28,6 +57,15 @@ export function clean(text: string) { .trim() } +/** + * Check if auto-enhance is enabled in config. + * Defaults to false — user must explicitly opt in. + */ +export async function isAutoEnhanceEnabled(): Promise { + const cfg = await Config.get() + return cfg.experimental?.auto_enhance_prompt === true +} + export async function enhancePrompt(text: string): Promise { if (!text.trim()) return text diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index 4c11538997..e5eea7055b 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -35,7 +35,7 @@ import { useKV } from "../../context/kv" import { useTextareaKeybindings } from "../textarea-keybindings" import { DialogSkill } from "../dialog-skill" // altimate_change start - import prompt enhancement -import { enhancePrompt } from "@/altimate/enhance-prompt" +import { enhancePrompt, isAutoEnhanceEnabled } from "@/altimate/enhance-prompt" // altimate_change end export type PromptProps = { @@ -630,6 +630,20 @@ export function Prompt(props: PromptProps) { // Filter out text parts (pasted content) since they're now expanded inline const nonTextParts = store.prompt.parts.filter((part) => part.type !== "text") + // altimate_change start - auto-enhance prompt before sending (if enabled) + // Only enhance normal prompts, not shell commands or slash commands + if (store.mode === "normal" && !inputText.startsWith("/")) { + try { + const autoEnhance = await isAutoEnhanceEnabled() + if (autoEnhance) { + inputText = await enhancePrompt(inputText) + } + } catch { + // Enhancement failure should never block prompt submission + } + } + // altimate_change end + // Capture mode before it gets reset const currentMode = store.mode const variant = local.model.variant.current() diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index d58592f2dd..fca9982cc9 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -1229,6 +1229,14 @@ export namespace Config { .positive() .optional() .describe("Timeout in milliseconds for model context protocol (MCP) requests"), + // altimate_change start - auto-enhance prompt config + auto_enhance_prompt: z + .boolean() + .optional() + .describe( + "Automatically enhance prompts with AI before sending (default: false). Uses a small model to rewrite rough prompts into clearer versions.", + ), + // altimate_change end }) .optional(), }) From ea709584692f87153a7f7402515761906b499288 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sun, 15 Mar 2026 11:12:24 -0700 Subject: [PATCH 3/6] fix: address code review findings for prompt enhancement - Add 15s timeout via `AbortController` to prevent indefinite hangs - Extract `ENHANCE_ID` constant and document synthetic `as any` casts - Fix `clean()` regex to match full-string code fences only (avoids stripping inner code blocks) - Export `stripThinkTags()` as separate utility for testability - Move auto-enhance before extmark expansion (prevents sending expanded paste content to the small model) - Add toast feedback and error logging for auto-enhance path - Update `store.prompt.input` after enhancement so history is accurate - Add outer try/catch with logging to `enhancePrompt()` - Expand tests from 9 to 30: `stripThinkTags()`, `clean()` edge cases, combined pipeline tests Co-Authored-By: Claude Opus 4.6 (1M context) --- .../opencode/src/altimate/enhance-prompt.ts | 141 ++++++++++-------- .../cli/cmd/tui/component/prompt/index.tsx | 34 +++-- .../test/altimate/enhance-prompt.test.ts | 104 ++++++++++++- 3 files changed, 201 insertions(+), 78 deletions(-) diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts index 03b4b8fb69..7806de16ce 100644 --- a/packages/opencode/src/altimate/enhance-prompt.ts +++ b/packages/opencode/src/altimate/enhance-prompt.ts @@ -8,6 +8,10 @@ import { MessageV2 } from "@/session/message-v2" const log = Log.create({ service: "enhance-prompt" }) +const ENHANCE_TIMEOUT_MS = 15_000 +// Synthetic ID for enhancement requests — not a real session/message +const ENHANCE_ID = "enhance-prompt" as any + // Research-backed enhancement prompt based on: // - AutoPrompter (arxiv 2504.20196): 5 missing info categories that cause 27% lower edit correctness // - Meta-prompting best practices: clear role, structural scaffolding, few-shot examples @@ -49,9 +53,14 @@ Enhanced: "Analyze why the query is slow. Run EXPLAIN/query profile to identify User: "migrate this from snowflake to bigquery" Enhanced: "Migrate the SQL from Snowflake dialect to BigQuery dialect. Convert Snowflake-specific functions (e.g. DATEADD, IFF, QUALIFY) to BigQuery equivalents. Preserve the query logic and verify the translated query is syntactically valid."` +export function stripThinkTags(text: string) { + return text.replace(/[\s\S]*?<\/think>\s*/g, "") +} + export function clean(text: string) { return text - .replace(/^```\w*\n?|```$/g, "") + .trim() + .replace(/^```\w*\n([\s\S]*?)\n```$/, "$1") .trim() .replace(/^(['"])([\s\S]*)\1$/, "$2") .trim() @@ -67,67 +76,73 @@ export async function isAutoEnhanceEnabled(): Promise { } export async function enhancePrompt(text: string): Promise { - if (!text.trim()) return text - - log.info("enhancing", { length: text.length }) - - const defaultModel = await Provider.defaultModel() - const model = - (await Provider.getSmallModel(defaultModel.providerID)) ?? - (await Provider.getModel(defaultModel.providerID, defaultModel.modelID)) - - const agent: Agent.Info = { - name: "enhance-prompt", - mode: "primary", - hidden: true, - options: {}, - permission: [], - prompt: ENHANCE_SYSTEM_PROMPT, - temperature: 0.7, - } - - const user: MessageV2.User = { - id: "enhance-prompt" as any, - sessionID: "enhance-prompt" as any, - role: "user", - time: { created: Date.now() }, - agent: "enhance-prompt", - model: { - providerID: model.providerID, - modelID: model.id, - }, - } - - const stream = await LLM.stream({ - agent, - user, - system: [], - small: true, - tools: {}, - model, - abort: new AbortController().signal, - sessionID: "enhance-prompt" as any, - retries: 2, - messages: [ - { - role: "user", - content: text, + const trimmed = text.trim() + if (!trimmed) return text + + log.info("enhancing", { length: trimmed.length }) + + const controller = new AbortController() + const timeout = setTimeout(() => controller.abort(), ENHANCE_TIMEOUT_MS) + + try { + const defaultModel = await Provider.defaultModel() + const model = + (await Provider.getSmallModel(defaultModel.providerID)) ?? + (await Provider.getModel(defaultModel.providerID, defaultModel.modelID)) + + const agent: Agent.Info = { + name: "enhance-prompt", + mode: "primary", + hidden: true, + options: {}, + permission: [], + prompt: ENHANCE_SYSTEM_PROMPT, + temperature: 0.7, + } + + const user: MessageV2.User = { + id: ENHANCE_ID, + sessionID: ENHANCE_ID, + role: "user", + time: { created: Date.now() }, + agent: "enhance-prompt", + model: { + providerID: model.providerID, + modelID: model.id, }, - ], - }) - - const result = await stream.text.catch((err) => { - log.error("failed to enhance prompt", { error: err }) - return undefined - }) - - if (!result) return text - - const cleaned = clean( - result - .replace(/[\s\S]*?<\/think>\s*/g, "") - .trim(), - ) - - return cleaned || text + } + + const stream = await LLM.stream({ + agent, + user, + system: [], + small: true, + tools: {}, + model, + abort: controller.signal, + sessionID: ENHANCE_ID, + retries: 2, + messages: [ + { + role: "user", + content: trimmed, + }, + ], + }) + + const result = await stream.text.catch((err) => { + log.error("failed to enhance prompt", { error: err }) + return undefined + }) + + if (!result) return text + + const cleaned = clean(stripThinkTags(result).trim()) + return cleaned || text + } catch (err) { + log.error("enhance prompt failed", { error: err }) + return text + } finally { + clearTimeout(timeout) + } } diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index e5eea7055b..d9224deee1 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -611,6 +611,26 @@ export function Prompt(props: PromptProps) { const messageID = MessageID.ascending() let inputText = store.prompt.input + // altimate_change start - auto-enhance prompt before expanding paste text + // Only enhance the raw user text, not shell commands or slash commands + if (store.mode === "normal" && !inputText.startsWith("/")) { + try { + const autoEnhance = await isAutoEnhanceEnabled() + if (autoEnhance) { + toast.show({ message: "Enhancing prompt...", variant: "info", duration: 2000 }) + const enhanced = await enhancePrompt(inputText) + if (enhanced !== inputText) { + inputText = enhanced + setStore("prompt", "input", enhanced) + } + } + } catch (err) { + // Enhancement failure should never block prompt submission + console.error("auto-enhance failed, using original prompt", err) + } + } + // altimate_change end + // Expand pasted text inline before submitting const allExtmarks = input.extmarks.getAllForTypeId(promptPartTypeId) const sortedExtmarks = allExtmarks.sort((a: { start: number }, b: { start: number }) => b.start - a.start) @@ -630,20 +650,6 @@ export function Prompt(props: PromptProps) { // Filter out text parts (pasted content) since they're now expanded inline const nonTextParts = store.prompt.parts.filter((part) => part.type !== "text") - // altimate_change start - auto-enhance prompt before sending (if enabled) - // Only enhance normal prompts, not shell commands or slash commands - if (store.mode === "normal" && !inputText.startsWith("/")) { - try { - const autoEnhance = await isAutoEnhanceEnabled() - if (autoEnhance) { - inputText = await enhancePrompt(inputText) - } - } catch { - // Enhancement failure should never block prompt submission - } - } - // altimate_change end - // Capture mode before it gets reset const currentMode = store.mode const variant = local.model.variant.current() diff --git a/packages/opencode/test/altimate/enhance-prompt.test.ts b/packages/opencode/test/altimate/enhance-prompt.test.ts index 53142adb24..816d145985 100644 --- a/packages/opencode/test/altimate/enhance-prompt.test.ts +++ b/packages/opencode/test/altimate/enhance-prompt.test.ts @@ -1,5 +1,5 @@ import { describe, expect, test } from "bun:test" -import { clean } from "../../src/altimate/enhance-prompt" +import { clean, stripThinkTags } from "../../src/altimate/enhance-prompt" describe("enhance-prompt clean()", () => { test("strips markdown code fences", () => { @@ -38,4 +38,106 @@ describe("enhance-prompt clean()", () => { const input = "```\nFirst do X.\nThen do Y.\n```" expect(clean(input)).toBe("First do X.\nThen do Y.") }) + + test("handles code fences with trailing whitespace", () => { + expect(clean(" ```\nenhanced prompt\n``` ")).toBe("enhanced prompt") + }) + + test("preserves inner code blocks", () => { + const input = "Run this:\n```sql\nSELECT 1\n```\nThen verify." + expect(clean(input)).toBe("Run this:\n```sql\nSELECT 1\n```\nThen verify.") + }) + + test("handles whitespace-only string", () => { + expect(clean(" ")).toBe("") + }) + + test("handles code fence with no newline before content", () => { + expect(clean("```enhanced prompt```")).toBe("```enhanced prompt```") + }) + + test("handles single backtick quotes (not code fences)", () => { + expect(clean("`enhanced prompt`")).toBe("`enhanced prompt`") + }) + + test("strips quotes from multiline content", () => { + expect(clean('"First line.\nSecond line."')).toBe("First line.\nSecond line.") + }) + + test("does not strip mismatched quotes", () => { + expect(clean("'enhanced prompt\"")).toBe("'enhanced prompt\"") + }) + + test("handles nested quotes inside code fences", () => { + // After fence stripping, quote stripping also triggers on surrounding quotes + expect(clean('```\n\'inner quoted\'\n```')).toBe("inner quoted") + }) +}) + +describe("enhance-prompt stripThinkTags()", () => { + test("removes single think block", () => { + expect(stripThinkTags("reasoning hereactual prompt")).toBe("actual prompt") + }) + + test("removes think block with trailing whitespace", () => { + expect(stripThinkTags("reasoning\n\nactual prompt")).toBe("actual prompt") + }) + + test("removes multiple think blocks", () => { + const input = "firstpart one secondpart two" + expect(stripThinkTags(input)).toBe("part one part two") + }) + + test("handles multiline think content", () => { + const input = "\nStep 1: analyze\nStep 2: rewrite\n\nEnhanced prompt here" + expect(stripThinkTags(input)).toBe("Enhanced prompt here") + }) + + test("returns text unchanged when no think tags", () => { + expect(stripThinkTags("fix the auth bug")).toBe("fix the auth bug") + }) + + test("handles empty string", () => { + expect(stripThinkTags("")).toBe("") + }) + + test("handles think tags with no content after", () => { + expect(stripThinkTags("reasoning only")).toBe("") + }) + + test("handles nested angle brackets inside think tags", () => { + expect(stripThinkTags("check if x < 5 and y > 3result")).toBe("result") + }) +}) + +describe("enhance-prompt combined pipeline", () => { + test("strips think tags then code fences then quotes", () => { + const input = 'reasoning```\n"enhanced prompt"\n```' + const result = clean(stripThinkTags(input).trim()) + expect(result).toBe("enhanced prompt") + }) + + test("strips think tags and preserves plain text", () => { + const input = "let me think about thisFix the failing dbt test by checking the schema." + const result = clean(stripThinkTags(input).trim()) + expect(result).toBe("Fix the failing dbt test by checking the schema.") + }) + + test("handles think tags with code-fenced response", () => { + const input = "The user wants to fix a test\n```text\nInvestigate the failing test.\n```" + const result = clean(stripThinkTags(input).trim()) + expect(result).toBe("Investigate the failing test.") + }) + + test("handles clean output that is empty after stripping", () => { + const input = 'everything is reasoning```\n\n```' + const result = clean(stripThinkTags(input).trim()) + expect(result).toBe("") + }) + + test("preserves content when no wrapping detected", () => { + const input = "Add a created_at timestamp column to the users dbt model." + const result = clean(stripThinkTags(input).trim()) + expect(result).toBe("Add a created_at timestamp column to the users dbt model.") + }) }) From 276519675afe04832cc8d3891ee3c1ad9ee19094 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sun, 15 Mar 2026 11:14:42 -0700 Subject: [PATCH 4/6] fix: handle unclosed `` tags from truncated model output When the small model hits its token limit mid-generation, `` tags may not have a closing ``. The previous regex required a closing tag, which would leak the entire reasoning block into the enhanced prompt. Now `stripThinkTags()` matches both closed and unclosed think blocks. Co-Authored-By: Claude Opus 4.6 (1M context) --- packages/opencode/src/altimate/enhance-prompt.ts | 4 +++- packages/opencode/test/altimate/enhance-prompt.test.ts | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts index 7806de16ce..3ef43e4e16 100644 --- a/packages/opencode/src/altimate/enhance-prompt.ts +++ b/packages/opencode/src/altimate/enhance-prompt.ts @@ -54,7 +54,9 @@ User: "migrate this from snowflake to bigquery" Enhanced: "Migrate the SQL from Snowflake dialect to BigQuery dialect. Convert Snowflake-specific functions (e.g. DATEADD, IFF, QUALIFY) to BigQuery equivalents. Preserve the query logic and verify the translated query is syntactically valid."` export function stripThinkTags(text: string) { - return text.replace(/[\s\S]*?<\/think>\s*/g, "") + // Match closed ... blocks, and also unclosed ... to end of string + // (unclosed tags happen when the model hits token limit mid-generation) + return text.replace(/[\s\S]*?(?:<\/think>\s*|$)/g, "") } export function clean(text: string) { diff --git a/packages/opencode/test/altimate/enhance-prompt.test.ts b/packages/opencode/test/altimate/enhance-prompt.test.ts index 816d145985..26fee42d12 100644 --- a/packages/opencode/test/altimate/enhance-prompt.test.ts +++ b/packages/opencode/test/altimate/enhance-prompt.test.ts @@ -108,6 +108,14 @@ describe("enhance-prompt stripThinkTags()", () => { test("handles nested angle brackets inside think tags", () => { expect(stripThinkTags("check if x < 5 and y > 3result")).toBe("result") }) + + test("strips unclosed think tag (model hit token limit)", () => { + expect(stripThinkTags("reasoning that got cut off")).toBe("") + }) + + test("strips unclosed think tag with content before it", () => { + expect(stripThinkTags("good content trailing reasoning")).toBe("good content ") + }) }) describe("enhance-prompt combined pipeline", () => { From ef215861645c74974475de840a4387c5a7c9499b Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sun, 15 Mar 2026 11:33:26 -0700 Subject: [PATCH 5/6] =?UTF-8?q?fix:=20address=20remaining=20review=20findi?= =?UTF-8?q?ngs=20=E2=80=94=20history,=20debounce,=20tests?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix history storing original text instead of enhanced text by passing `inputText` explicitly to `history.append()` instead of spreading `store.prompt` which may contain stale state - Add concurrency guard (`enhancingInProgress` flag) to prevent multiple concurrent auto-enhance LLM calls from rapid submissions - Consolidate magic string into `ENHANCE_NAME` constant used across agent name, user agent, log service, and ID derivation - Add justifying comment for `as any` cast on synthetic IDs explaining why branded types are safely bypassed - Add `isAutoEnhanceEnabled()` tests (5 cases): config absent, present but missing flag, false, true, undefined - Add `enhancePrompt()` tests (10 cases): empty input, whitespace, successful enhancement, think tag stripping, code fence stripping, stream.text failure, stream init failure, empty LLM response, think tags with no content, combined pipeline Test count: 32 -> 48 Co-Authored-By: Claude Opus 4.6 (1M context) --- .../opencode/src/altimate/enhance-prompt.ts | 14 +- .../cli/cmd/tui/component/prompt/index.tsx | 8 +- .../test/altimate/enhance-prompt.test.ts | 158 +++++++++++++++++- 3 files changed, 172 insertions(+), 8 deletions(-) diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts index 3ef43e4e16..13e8377ee2 100644 --- a/packages/opencode/src/altimate/enhance-prompt.ts +++ b/packages/opencode/src/altimate/enhance-prompt.ts @@ -6,11 +6,13 @@ import { Config } from "@/config/config" import { Log } from "@/util/log" import { MessageV2 } from "@/session/message-v2" -const log = Log.create({ service: "enhance-prompt" }) - +const ENHANCE_NAME = "enhance-prompt" const ENHANCE_TIMEOUT_MS = 15_000 -// Synthetic ID for enhancement requests — not a real session/message -const ENHANCE_ID = "enhance-prompt" as any +// MessageV2.User requires branded MessageID/SessionID types, but this is a +// synthetic message that never enters the session store — cast is safe here. +const ENHANCE_ID = ENHANCE_NAME as any + +const log = Log.create({ service: ENHANCE_NAME }) // Research-backed enhancement prompt based on: // - AutoPrompter (arxiv 2504.20196): 5 missing info categories that cause 27% lower edit correctness @@ -93,7 +95,7 @@ export async function enhancePrompt(text: string): Promise { (await Provider.getModel(defaultModel.providerID, defaultModel.modelID)) const agent: Agent.Info = { - name: "enhance-prompt", + name: ENHANCE_NAME, mode: "primary", hidden: true, options: {}, @@ -107,7 +109,7 @@ export async function enhancePrompt(text: string): Promise { sessionID: ENHANCE_ID, role: "user", time: { created: Date.now() }, - agent: "enhance-prompt", + agent: ENHANCE_NAME, model: { providerID: model.providerID, modelID: model.id, diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index d9224deee1..921cdf444b 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -36,6 +36,7 @@ import { useTextareaKeybindings } from "../textarea-keybindings" import { DialogSkill } from "../dialog-skill" // altimate_change start - import prompt enhancement import { enhancePrompt, isAutoEnhanceEnabled } from "@/altimate/enhance-prompt" +let enhancingInProgress = false // altimate_change end export type PromptProps = { @@ -613,10 +614,12 @@ export function Prompt(props: PromptProps) { // altimate_change start - auto-enhance prompt before expanding paste text // Only enhance the raw user text, not shell commands or slash commands - if (store.mode === "normal" && !inputText.startsWith("/")) { + // Guard prevents concurrent enhancement calls from rapid submissions + if (store.mode === "normal" && !inputText.startsWith("/") && !enhancingInProgress) { try { const autoEnhance = await isAutoEnhanceEnabled() if (autoEnhance) { + enhancingInProgress = true toast.show({ message: "Enhancing prompt...", variant: "info", duration: 2000 }) const enhanced = await enhancePrompt(inputText) if (enhanced !== inputText) { @@ -627,6 +630,8 @@ export function Prompt(props: PromptProps) { } catch (err) { // Enhancement failure should never block prompt submission console.error("auto-enhance failed, using original prompt", err) + } finally { + enhancingInProgress = false } } // altimate_change end @@ -720,6 +725,7 @@ export function Prompt(props: PromptProps) { } history.append({ ...store.prompt, + input: inputText, mode: currentMode, }) input.extmarks.clear() diff --git a/packages/opencode/test/altimate/enhance-prompt.test.ts b/packages/opencode/test/altimate/enhance-prompt.test.ts index 26fee42d12..774035dd24 100644 --- a/packages/opencode/test/altimate/enhance-prompt.test.ts +++ b/packages/opencode/test/altimate/enhance-prompt.test.ts @@ -1,6 +1,62 @@ -import { describe, expect, test } from "bun:test" +import { describe, expect, test, mock, beforeEach } from "bun:test" import { clean, stripThinkTags } from "../../src/altimate/enhance-prompt" +// Mock Config for isAutoEnhanceEnabled tests +let mockConfig: any = {} +mock.module("@/config/config", () => ({ + Config: { + get: () => Promise.resolve(mockConfig), + }, +})) + +// Mock Provider and LLM for enhancePrompt tests +let mockStreamResult: string | undefined = "enhanced result" +let mockStreamShouldThrow = false +mock.module("@/provider/provider", () => ({ + Provider: { + defaultModel: () => + Promise.resolve({ providerID: "test-provider", modelID: "test-model" }), + getSmallModel: () => + Promise.resolve({ providerID: "test-provider", id: "test-small", modelID: "test-small" }), + getModel: () => + Promise.resolve({ providerID: "test-provider", id: "test-model", modelID: "test-model" }), + }, +})) + +mock.module("@/session/llm", () => ({ + LLM: { + stream: () => { + if (mockStreamShouldThrow) return Promise.reject(new Error("stream init failed")) + return Promise.resolve({ + text: mockStreamResult !== undefined + ? Promise.resolve(mockStreamResult) + : Promise.reject(new Error("stream text failed")), + }) + }, + }, +})) + +mock.module("@/util/log", () => ({ + Log: { + create: () => ({ + info: () => {}, + error: () => {}, + debug: () => {}, + }), + }, +})) + +mock.module("@/agent/agent", () => ({ + Agent: {}, +})) + +mock.module("@/session/message-v2", () => ({ + MessageV2: {}, +})) + +// Import after mocking +const { enhancePrompt, isAutoEnhanceEnabled } = await import("../../src/altimate/enhance-prompt") + describe("enhance-prompt clean()", () => { test("strips markdown code fences", () => { expect(clean("```\nfixed prompt\n```")).toBe("fixed prompt") @@ -149,3 +205,103 @@ describe("enhance-prompt combined pipeline", () => { expect(result).toBe("Add a created_at timestamp column to the users dbt model.") }) }) + +describe("isAutoEnhanceEnabled()", () => { + beforeEach(() => { + mockConfig = {} + }) + + test("returns false when experimental config is absent", async () => { + mockConfig = {} + expect(await isAutoEnhanceEnabled()).toBe(false) + }) + + test("returns false when experimental exists but auto_enhance_prompt is missing", async () => { + mockConfig = { experimental: {} } + expect(await isAutoEnhanceEnabled()).toBe(false) + }) + + test("returns false when auto_enhance_prompt is false", async () => { + mockConfig = { experimental: { auto_enhance_prompt: false } } + expect(await isAutoEnhanceEnabled()).toBe(false) + }) + + test("returns true when auto_enhance_prompt is true", async () => { + mockConfig = { experimental: { auto_enhance_prompt: true } } + expect(await isAutoEnhanceEnabled()).toBe(true) + }) + + test("returns false when auto_enhance_prompt is undefined", async () => { + mockConfig = { experimental: { auto_enhance_prompt: undefined } } + expect(await isAutoEnhanceEnabled()).toBe(false) + }) +}) + +describe("enhancePrompt()", () => { + beforeEach(() => { + mockStreamResult = "enhanced result" + mockStreamShouldThrow = false + }) + + test("returns original text for empty input", async () => { + expect(await enhancePrompt("")).toBe("") + }) + + test("returns original text for whitespace-only input", async () => { + expect(await enhancePrompt(" ")).toBe(" ") + }) + + test("returns enhanced text from LLM", async () => { + mockStreamResult = "Investigate the failing test and fix it." + const result = await enhancePrompt("fix the test") + expect(result).toBe("Investigate the failing test and fix it.") + }) + + test("strips think tags from LLM response", async () => { + mockStreamResult = "let me reasonEnhanced prompt here" + const result = await enhancePrompt("do something") + expect(result).toBe("Enhanced prompt here") + }) + + test("strips code fences from LLM response", async () => { + mockStreamResult = '```\nEnhanced prompt here\n```' + const result = await enhancePrompt("do something") + expect(result).toBe("Enhanced prompt here") + }) + + test("returns original text when LLM stream.text fails", async () => { + mockStreamResult = undefined // causes stream.text to reject + const result = await enhancePrompt("fix the bug") + expect(result).toBe("fix the bug") + }) + + test("returns original text when LLM stream init fails", async () => { + mockStreamShouldThrow = true + const result = await enhancePrompt("fix the bug") + expect(result).toBe("fix the bug") + }) + + test("returns original text when LLM returns empty string", async () => { + mockStreamResult = "" + const result = await enhancePrompt("fix the bug") + expect(result).toBe("fix the bug") + }) + + test("handles LLM response with only think tags (no content)", async () => { + mockStreamResult = "I should enhance this" + const result = await enhancePrompt("fix the bug") + expect(result).toBe("fix the bug") + }) + + test("handles unclosed think tag in LLM response", async () => { + mockStreamResult = "reasoning cut off by token limit" + const result = await enhancePrompt("fix the bug") + expect(result).toBe("fix the bug") + }) + + test("handles combined think tags + code fences + quotes", async () => { + mockStreamResult = 'reasoning```\n"Investigate the failing test."\n```' + const result = await enhancePrompt("fix test") + expect(result).toBe("Investigate the failing test.") + }) +}) From a44bd49587583b83d04deb47757677f7822d5c74 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sun, 15 Mar 2026 11:37:56 -0700 Subject: [PATCH 6/6] =?UTF-8?q?fix:=20address=20Sentry=20findings=20?= =?UTF-8?q?=E2=80=94=20stream=20consumption=20and=20race=20condition?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Explicitly consume `stream.fullStream` before awaiting `stream.text` to prevent potential hangs from Vercel AI SDK stream not being drained - Add race condition guard to manual enhance command: if user edits the prompt while enhancement is in-flight, discard the stale result - Add same guard to auto-enhance path in `submit()` for consistency - Update LLM mock to include `fullStream` async iterable Co-Authored-By: Claude Opus 4.6 (1M context) --- packages/opencode/src/altimate/enhance-prompt.ts | 5 +++++ .../opencode/src/cli/cmd/tui/component/prompt/index.tsx | 5 +++++ packages/opencode/test/altimate/enhance-prompt.test.ts | 6 ++++++ 3 files changed, 16 insertions(+) diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts index 13e8377ee2..44e3fd24ae 100644 --- a/packages/opencode/src/altimate/enhance-prompt.ts +++ b/packages/opencode/src/altimate/enhance-prompt.ts @@ -134,6 +134,11 @@ export async function enhancePrompt(text: string): Promise { ], }) + // Consume the stream explicitly to avoid potential SDK hangs where + // .text never resolves if the stream isn't drained (Vercel AI SDK caveat) + for await (const _ of stream.fullStream) { + // drain + } const result = await stream.text.catch((err) => { log.error("failed to enhance prompt", { error: err }) return undefined diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx index 921cdf444b..c3fc796f5e 100644 --- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx @@ -216,6 +216,9 @@ export function Prompt(props: PromptProps) { }) try { const enhanced = await enhancePrompt(original) + // Guard against race condition: if user edited the prompt while + // enhancement was in-flight, discard the stale enhanced result + if (store.prompt.input !== original) return if (enhanced !== original) { input.setText(enhanced) setStore("prompt", "input", enhanced) @@ -622,6 +625,8 @@ export function Prompt(props: PromptProps) { enhancingInProgress = true toast.show({ message: "Enhancing prompt...", variant: "info", duration: 2000 }) const enhanced = await enhancePrompt(inputText) + // Discard if user changed the prompt during enhancement + if (store.prompt.input !== inputText) return if (enhanced !== inputText) { inputText = enhanced setStore("prompt", "input", enhanced) diff --git a/packages/opencode/test/altimate/enhance-prompt.test.ts b/packages/opencode/test/altimate/enhance-prompt.test.ts index 774035dd24..1548bd9046 100644 --- a/packages/opencode/test/altimate/enhance-prompt.test.ts +++ b/packages/opencode/test/altimate/enhance-prompt.test.ts @@ -28,6 +28,12 @@ mock.module("@/session/llm", () => ({ stream: () => { if (mockStreamShouldThrow) return Promise.reject(new Error("stream init failed")) return Promise.resolve({ + // fullStream must be an async iterable (consumed by for-await in enhancePrompt) + fullStream: { + [Symbol.asyncIterator]: () => ({ + next: () => Promise.resolve({ done: true, value: undefined }), + }), + }, text: mockStreamResult !== undefined ? Promise.resolve(mockStreamResult) : Promise.reject(new Error("stream text failed")),