diff --git a/packages/opencode/src/altimate/enhance-prompt.ts b/packages/opencode/src/altimate/enhance-prompt.ts
new file mode 100644
index 0000000000..44e3fd24ae
--- /dev/null
+++ b/packages/opencode/src/altimate/enhance-prompt.ts
@@ -0,0 +1,157 @@
+// altimate_change - new file
+import { Provider } from "@/provider/provider"
+import { LLM } from "@/session/llm"
+import { Agent } from "@/agent/agent"
+import { Config } from "@/config/config"
+import { Log } from "@/util/log"
+import { MessageV2 } from "@/session/message-v2"
+
+const ENHANCE_NAME = "enhance-prompt"
+const ENHANCE_TIMEOUT_MS = 15_000
+// MessageV2.User requires branded MessageID/SessionID types, but this is a
+// synthetic message that never enters the session store — cast is safe here.
+const ENHANCE_ID = ENHANCE_NAME as any
+
+const log = Log.create({ service: ENHANCE_NAME })
+
+// Research-backed enhancement prompt based on:
+// - AutoPrompter (arxiv 2504.20196): 5 missing info categories that cause 27% lower edit correctness
+// - Meta-prompting best practices: clear role, structural scaffolding, few-shot examples
+// - KiloCode's enhance-prompt implementation: lightweight model, preserve intent, no wrapping
+const ENHANCE_SYSTEM_PROMPT = `You are a prompt rewriter for a data engineering coding agent. The agent can read/write files, run SQL, manage dbt models, inspect schemas, and execute shell commands.
+
+Your task: rewrite the user's rough prompt into a clearer version that will produce better results. Reply with ONLY the enhanced prompt — no explanations, no wrapping in quotes or code fences.
+
+## What to improve
+
+Research shows developer prompts commonly lack these five categories of information. Add them when missing:
+
+1. **Specifics** — Add concrete details the agent needs: table names, column names, file paths, SQL dialects, error messages. If the user references "the model" or "the table", keep the reference but clarify what the agent should look for.
+2. **Action plan** — When the prompt is vague ("fix this"), add explicit steps: investigate first, then modify, then verify. Structure as a logical sequence.
+3. **Scope** — Clarify what files, models, or queries are in scope. If ambiguous, instruct the agent to identify the scope first.
+4. **Verification** — Add a verification step when the user implies correctness matters (fixes, migrations, refactors). E.g. "run the query to confirm results" or "run dbt test after changes".
+5. **Intent clarification** — When the request could be interpreted multiple ways, pick the most likely interpretation and make it explicit.
+
+## Rules
+
+- Preserve the user's intent exactly — never add requirements they didn't ask for
+- Keep it concise — a good enhancement adds 1-3 sentences, not paragraphs
+- If the prompt is already clear and specific, return it unchanged
+- Write in the same tone/style as the user (casual stays casual, technical stays technical)
+- Never add generic filler like "please ensure best practices" or "follow coding standards"
+- Do not mention yourself or the enhancement process
+
+## Examples
+
+User: "fix the failing test"
+Enhanced: "Investigate the failing test — run the test suite first to identify which test is failing and why, then examine the relevant source code, apply a fix, and re-run the test to confirm it passes."
+
+User: "add a created_at column to the users model"
+Enhanced: "Add a created_at timestamp column to the users dbt model. Update the SQL definition and the schema.yml entry. Use the appropriate timestamp type for the target warehouse."
+
+User: "why is this query slow"
+Enhanced: "Analyze why the query is slow. Run EXPLAIN/query profile to identify bottlenecks (full table scans, missing indexes, expensive joins). Suggest specific optimizations based on the findings."
+
+User: "migrate this from snowflake to bigquery"
+Enhanced: "Migrate the SQL from Snowflake dialect to BigQuery dialect. Convert Snowflake-specific functions (e.g. DATEADD, IFF, QUALIFY) to BigQuery equivalents. Preserve the query logic and verify the translated query is syntactically valid."`
+
+export function stripThinkTags(text: string) {
+ // Match closed ... blocks, and also unclosed ... to end of string
+ // (unclosed tags happen when the model hits token limit mid-generation)
+ return text.replace(/[\s\S]*?(?:<\/think>\s*|$)/g, "")
+}
+
+export function clean(text: string) {
+ return text
+ .trim()
+ .replace(/^```\w*\n([\s\S]*?)\n```$/, "$1")
+ .trim()
+ .replace(/^(['"])([\s\S]*)\1$/, "$2")
+ .trim()
+}
+
+/**
+ * Check if auto-enhance is enabled in config.
+ * Defaults to false — user must explicitly opt in.
+ */
+export async function isAutoEnhanceEnabled(): Promise {
+ const cfg = await Config.get()
+ return cfg.experimental?.auto_enhance_prompt === true
+}
+
+export async function enhancePrompt(text: string): Promise {
+ const trimmed = text.trim()
+ if (!trimmed) return text
+
+ log.info("enhancing", { length: trimmed.length })
+
+ const controller = new AbortController()
+ const timeout = setTimeout(() => controller.abort(), ENHANCE_TIMEOUT_MS)
+
+ try {
+ const defaultModel = await Provider.defaultModel()
+ const model =
+ (await Provider.getSmallModel(defaultModel.providerID)) ??
+ (await Provider.getModel(defaultModel.providerID, defaultModel.modelID))
+
+ const agent: Agent.Info = {
+ name: ENHANCE_NAME,
+ mode: "primary",
+ hidden: true,
+ options: {},
+ permission: [],
+ prompt: ENHANCE_SYSTEM_PROMPT,
+ temperature: 0.7,
+ }
+
+ const user: MessageV2.User = {
+ id: ENHANCE_ID,
+ sessionID: ENHANCE_ID,
+ role: "user",
+ time: { created: Date.now() },
+ agent: ENHANCE_NAME,
+ model: {
+ providerID: model.providerID,
+ modelID: model.id,
+ },
+ }
+
+ const stream = await LLM.stream({
+ agent,
+ user,
+ system: [],
+ small: true,
+ tools: {},
+ model,
+ abort: controller.signal,
+ sessionID: ENHANCE_ID,
+ retries: 2,
+ messages: [
+ {
+ role: "user",
+ content: trimmed,
+ },
+ ],
+ })
+
+ // Consume the stream explicitly to avoid potential SDK hangs where
+ // .text never resolves if the stream isn't drained (Vercel AI SDK caveat)
+ for await (const _ of stream.fullStream) {
+ // drain
+ }
+ const result = await stream.text.catch((err) => {
+ log.error("failed to enhance prompt", { error: err })
+ return undefined
+ })
+
+ if (!result) return text
+
+ const cleaned = clean(stripThinkTags(result).trim())
+ return cleaned || text
+ } catch (err) {
+ log.error("enhance prompt failed", { error: err })
+ return text
+ } finally {
+ clearTimeout(timeout)
+ }
+}
diff --git a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
index c85426cc24..c3fc796f5e 100644
--- a/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
+++ b/packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
@@ -34,6 +34,10 @@ import { useToast } from "../../ui/toast"
import { useKV } from "../../context/kv"
import { useTextareaKeybindings } from "../textarea-keybindings"
import { DialogSkill } from "../dialog-skill"
+// altimate_change start - import prompt enhancement
+import { enhancePrompt, isAutoEnhanceEnabled } from "@/altimate/enhance-prompt"
+let enhancingInProgress = false
+// altimate_change end
export type PromptProps = {
sessionID?: string
@@ -194,6 +198,53 @@ export function Prompt(props: PromptProps) {
dialog.clear()
},
},
+ // altimate_change start - add prompt enhance command
+ {
+ title: "Enhance prompt",
+ value: "prompt.enhance",
+ keybind: "prompt_enhance",
+ category: "Prompt",
+ enabled: !!store.prompt.input,
+ onSelect: async (dialog) => {
+ if (!store.prompt.input.trim()) return
+ dialog.clear()
+ const original = store.prompt.input
+ toast.show({
+ message: "Enhancing prompt...",
+ variant: "info",
+ duration: 2000,
+ })
+ try {
+ const enhanced = await enhancePrompt(original)
+ // Guard against race condition: if user edited the prompt while
+ // enhancement was in-flight, discard the stale enhanced result
+ if (store.prompt.input !== original) return
+ if (enhanced !== original) {
+ input.setText(enhanced)
+ setStore("prompt", "input", enhanced)
+ input.gotoBufferEnd()
+ toast.show({
+ message: "Prompt enhanced",
+ variant: "success",
+ duration: 2000,
+ })
+ } else {
+ toast.show({
+ message: "Prompt already looks good",
+ variant: "info",
+ duration: 2000,
+ })
+ }
+ } catch {
+ toast.show({
+ message: "Failed to enhance prompt",
+ variant: "error",
+ duration: 3000,
+ })
+ }
+ },
+ },
+ // altimate_change end
{
title: "Paste",
value: "prompt.paste",
@@ -564,6 +615,32 @@ export function Prompt(props: PromptProps) {
const messageID = MessageID.ascending()
let inputText = store.prompt.input
+ // altimate_change start - auto-enhance prompt before expanding paste text
+ // Only enhance the raw user text, not shell commands or slash commands
+ // Guard prevents concurrent enhancement calls from rapid submissions
+ if (store.mode === "normal" && !inputText.startsWith("/") && !enhancingInProgress) {
+ try {
+ const autoEnhance = await isAutoEnhanceEnabled()
+ if (autoEnhance) {
+ enhancingInProgress = true
+ toast.show({ message: "Enhancing prompt...", variant: "info", duration: 2000 })
+ const enhanced = await enhancePrompt(inputText)
+ // Discard if user changed the prompt during enhancement
+ if (store.prompt.input !== inputText) return
+ if (enhanced !== inputText) {
+ inputText = enhanced
+ setStore("prompt", "input", enhanced)
+ }
+ }
+ } catch (err) {
+ // Enhancement failure should never block prompt submission
+ console.error("auto-enhance failed, using original prompt", err)
+ } finally {
+ enhancingInProgress = false
+ }
+ }
+ // altimate_change end
+
// Expand pasted text inline before submitting
const allExtmarks = input.extmarks.getAllForTypeId(promptPartTypeId)
const sortedExtmarks = allExtmarks.sort((a: { start: number }, b: { start: number }) => b.start - a.start)
@@ -653,6 +730,7 @@ export function Prompt(props: PromptProps) {
}
history.append({
...store.prompt,
+ input: inputText,
mode: currentMode,
})
input.extmarks.clear()
@@ -1155,6 +1233,11 @@ export function Prompt(props: PromptProps) {
{keybind.print("command_list")} commands
+ {/* altimate_change start - show enhance hint */}
+
+ {keybind.print("prompt_enhance")} enhance
+
+ {/* altimate_change end */}
diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts
index 1ab39ad18a..fca9982cc9 100644
--- a/packages/opencode/src/config/config.ts
+++ b/packages/opencode/src/config/config.ts
@@ -866,6 +866,9 @@ export namespace Config {
agent_cycle: z.string().optional().default("tab").describe("Next agent"),
agent_cycle_reverse: z.string().optional().default("shift+tab").describe("Previous agent"),
variant_cycle: z.string().optional().default("ctrl+t").describe("Cycle model variants"),
+ // altimate_change start - add prompt enhance keybind
+ prompt_enhance: z.string().optional().default("i").describe("Enhance prompt with AI before sending"),
+ // altimate_change end
input_clear: z.string().optional().default("ctrl+c").describe("Clear input field"),
input_paste: z.string().optional().default("ctrl+v").describe("Paste from clipboard"),
input_submit: z.string().optional().default("return").describe("Submit input"),
@@ -1226,6 +1229,14 @@ export namespace Config {
.positive()
.optional()
.describe("Timeout in milliseconds for model context protocol (MCP) requests"),
+ // altimate_change start - auto-enhance prompt config
+ auto_enhance_prompt: z
+ .boolean()
+ .optional()
+ .describe(
+ "Automatically enhance prompts with AI before sending (default: false). Uses a small model to rewrite rough prompts into clearer versions.",
+ ),
+ // altimate_change end
})
.optional(),
})
diff --git a/packages/opencode/test/altimate/enhance-prompt.test.ts b/packages/opencode/test/altimate/enhance-prompt.test.ts
new file mode 100644
index 0000000000..1548bd9046
--- /dev/null
+++ b/packages/opencode/test/altimate/enhance-prompt.test.ts
@@ -0,0 +1,313 @@
+import { describe, expect, test, mock, beforeEach } from "bun:test"
+import { clean, stripThinkTags } from "../../src/altimate/enhance-prompt"
+
+// Mock Config for isAutoEnhanceEnabled tests
+let mockConfig: any = {}
+mock.module("@/config/config", () => ({
+ Config: {
+ get: () => Promise.resolve(mockConfig),
+ },
+}))
+
+// Mock Provider and LLM for enhancePrompt tests
+let mockStreamResult: string | undefined = "enhanced result"
+let mockStreamShouldThrow = false
+mock.module("@/provider/provider", () => ({
+ Provider: {
+ defaultModel: () =>
+ Promise.resolve({ providerID: "test-provider", modelID: "test-model" }),
+ getSmallModel: () =>
+ Promise.resolve({ providerID: "test-provider", id: "test-small", modelID: "test-small" }),
+ getModel: () =>
+ Promise.resolve({ providerID: "test-provider", id: "test-model", modelID: "test-model" }),
+ },
+}))
+
+mock.module("@/session/llm", () => ({
+ LLM: {
+ stream: () => {
+ if (mockStreamShouldThrow) return Promise.reject(new Error("stream init failed"))
+ return Promise.resolve({
+ // fullStream must be an async iterable (consumed by for-await in enhancePrompt)
+ fullStream: {
+ [Symbol.asyncIterator]: () => ({
+ next: () => Promise.resolve({ done: true, value: undefined }),
+ }),
+ },
+ text: mockStreamResult !== undefined
+ ? Promise.resolve(mockStreamResult)
+ : Promise.reject(new Error("stream text failed")),
+ })
+ },
+ },
+}))
+
+mock.module("@/util/log", () => ({
+ Log: {
+ create: () => ({
+ info: () => {},
+ error: () => {},
+ debug: () => {},
+ }),
+ },
+}))
+
+mock.module("@/agent/agent", () => ({
+ Agent: {},
+}))
+
+mock.module("@/session/message-v2", () => ({
+ MessageV2: {},
+}))
+
+// Import after mocking
+const { enhancePrompt, isAutoEnhanceEnabled } = await import("../../src/altimate/enhance-prompt")
+
+describe("enhance-prompt clean()", () => {
+ test("strips markdown code fences", () => {
+ expect(clean("```\nfixed prompt\n```")).toBe("fixed prompt")
+ })
+
+ test("strips code fences with language tag", () => {
+ expect(clean("```text\nenhanced prompt\n```")).toBe("enhanced prompt")
+ })
+
+ test("strips surrounding single quotes", () => {
+ expect(clean("'enhanced prompt'")).toBe("enhanced prompt")
+ })
+
+ test("strips surrounding double quotes", () => {
+ expect(clean('"enhanced prompt"')).toBe("enhanced prompt")
+ })
+
+ test("trims whitespace", () => {
+ expect(clean(" enhanced prompt ")).toBe("enhanced prompt")
+ })
+
+ test("handles combined wrapping", () => {
+ expect(clean('```\n"enhanced prompt"\n```')).toBe("enhanced prompt")
+ })
+
+ test("returns plain text unchanged", () => {
+ expect(clean("fix the auth bug")).toBe("fix the auth bug")
+ })
+
+ test("handles empty string", () => {
+ expect(clean("")).toBe("")
+ })
+
+ test("handles multiline content", () => {
+ const input = "```\nFirst do X.\nThen do Y.\n```"
+ expect(clean(input)).toBe("First do X.\nThen do Y.")
+ })
+
+ test("handles code fences with trailing whitespace", () => {
+ expect(clean(" ```\nenhanced prompt\n``` ")).toBe("enhanced prompt")
+ })
+
+ test("preserves inner code blocks", () => {
+ const input = "Run this:\n```sql\nSELECT 1\n```\nThen verify."
+ expect(clean(input)).toBe("Run this:\n```sql\nSELECT 1\n```\nThen verify.")
+ })
+
+ test("handles whitespace-only string", () => {
+ expect(clean(" ")).toBe("")
+ })
+
+ test("handles code fence with no newline before content", () => {
+ expect(clean("```enhanced prompt```")).toBe("```enhanced prompt```")
+ })
+
+ test("handles single backtick quotes (not code fences)", () => {
+ expect(clean("`enhanced prompt`")).toBe("`enhanced prompt`")
+ })
+
+ test("strips quotes from multiline content", () => {
+ expect(clean('"First line.\nSecond line."')).toBe("First line.\nSecond line.")
+ })
+
+ test("does not strip mismatched quotes", () => {
+ expect(clean("'enhanced prompt\"")).toBe("'enhanced prompt\"")
+ })
+
+ test("handles nested quotes inside code fences", () => {
+ // After fence stripping, quote stripping also triggers on surrounding quotes
+ expect(clean('```\n\'inner quoted\'\n```')).toBe("inner quoted")
+ })
+})
+
+describe("enhance-prompt stripThinkTags()", () => {
+ test("removes single think block", () => {
+ expect(stripThinkTags("reasoning hereactual prompt")).toBe("actual prompt")
+ })
+
+ test("removes think block with trailing whitespace", () => {
+ expect(stripThinkTags("reasoning\n\nactual prompt")).toBe("actual prompt")
+ })
+
+ test("removes multiple think blocks", () => {
+ const input = "firstpart one secondpart two"
+ expect(stripThinkTags(input)).toBe("part one part two")
+ })
+
+ test("handles multiline think content", () => {
+ const input = "\nStep 1: analyze\nStep 2: rewrite\n\nEnhanced prompt here"
+ expect(stripThinkTags(input)).toBe("Enhanced prompt here")
+ })
+
+ test("returns text unchanged when no think tags", () => {
+ expect(stripThinkTags("fix the auth bug")).toBe("fix the auth bug")
+ })
+
+ test("handles empty string", () => {
+ expect(stripThinkTags("")).toBe("")
+ })
+
+ test("handles think tags with no content after", () => {
+ expect(stripThinkTags("reasoning only")).toBe("")
+ })
+
+ test("handles nested angle brackets inside think tags", () => {
+ expect(stripThinkTags("check if x < 5 and y > 3result")).toBe("result")
+ })
+
+ test("strips unclosed think tag (model hit token limit)", () => {
+ expect(stripThinkTags("reasoning that got cut off")).toBe("")
+ })
+
+ test("strips unclosed think tag with content before it", () => {
+ expect(stripThinkTags("good content trailing reasoning")).toBe("good content ")
+ })
+})
+
+describe("enhance-prompt combined pipeline", () => {
+ test("strips think tags then code fences then quotes", () => {
+ const input = 'reasoning```\n"enhanced prompt"\n```'
+ const result = clean(stripThinkTags(input).trim())
+ expect(result).toBe("enhanced prompt")
+ })
+
+ test("strips think tags and preserves plain text", () => {
+ const input = "let me think about thisFix the failing dbt test by checking the schema."
+ const result = clean(stripThinkTags(input).trim())
+ expect(result).toBe("Fix the failing dbt test by checking the schema.")
+ })
+
+ test("handles think tags with code-fenced response", () => {
+ const input = "The user wants to fix a test\n```text\nInvestigate the failing test.\n```"
+ const result = clean(stripThinkTags(input).trim())
+ expect(result).toBe("Investigate the failing test.")
+ })
+
+ test("handles clean output that is empty after stripping", () => {
+ const input = 'everything is reasoning```\n\n```'
+ const result = clean(stripThinkTags(input).trim())
+ expect(result).toBe("")
+ })
+
+ test("preserves content when no wrapping detected", () => {
+ const input = "Add a created_at timestamp column to the users dbt model."
+ const result = clean(stripThinkTags(input).trim())
+ expect(result).toBe("Add a created_at timestamp column to the users dbt model.")
+ })
+})
+
+describe("isAutoEnhanceEnabled()", () => {
+ beforeEach(() => {
+ mockConfig = {}
+ })
+
+ test("returns false when experimental config is absent", async () => {
+ mockConfig = {}
+ expect(await isAutoEnhanceEnabled()).toBe(false)
+ })
+
+ test("returns false when experimental exists but auto_enhance_prompt is missing", async () => {
+ mockConfig = { experimental: {} }
+ expect(await isAutoEnhanceEnabled()).toBe(false)
+ })
+
+ test("returns false when auto_enhance_prompt is false", async () => {
+ mockConfig = { experimental: { auto_enhance_prompt: false } }
+ expect(await isAutoEnhanceEnabled()).toBe(false)
+ })
+
+ test("returns true when auto_enhance_prompt is true", async () => {
+ mockConfig = { experimental: { auto_enhance_prompt: true } }
+ expect(await isAutoEnhanceEnabled()).toBe(true)
+ })
+
+ test("returns false when auto_enhance_prompt is undefined", async () => {
+ mockConfig = { experimental: { auto_enhance_prompt: undefined } }
+ expect(await isAutoEnhanceEnabled()).toBe(false)
+ })
+})
+
+describe("enhancePrompt()", () => {
+ beforeEach(() => {
+ mockStreamResult = "enhanced result"
+ mockStreamShouldThrow = false
+ })
+
+ test("returns original text for empty input", async () => {
+ expect(await enhancePrompt("")).toBe("")
+ })
+
+ test("returns original text for whitespace-only input", async () => {
+ expect(await enhancePrompt(" ")).toBe(" ")
+ })
+
+ test("returns enhanced text from LLM", async () => {
+ mockStreamResult = "Investigate the failing test and fix it."
+ const result = await enhancePrompt("fix the test")
+ expect(result).toBe("Investigate the failing test and fix it.")
+ })
+
+ test("strips think tags from LLM response", async () => {
+ mockStreamResult = "let me reasonEnhanced prompt here"
+ const result = await enhancePrompt("do something")
+ expect(result).toBe("Enhanced prompt here")
+ })
+
+ test("strips code fences from LLM response", async () => {
+ mockStreamResult = '```\nEnhanced prompt here\n```'
+ const result = await enhancePrompt("do something")
+ expect(result).toBe("Enhanced prompt here")
+ })
+
+ test("returns original text when LLM stream.text fails", async () => {
+ mockStreamResult = undefined // causes stream.text to reject
+ const result = await enhancePrompt("fix the bug")
+ expect(result).toBe("fix the bug")
+ })
+
+ test("returns original text when LLM stream init fails", async () => {
+ mockStreamShouldThrow = true
+ const result = await enhancePrompt("fix the bug")
+ expect(result).toBe("fix the bug")
+ })
+
+ test("returns original text when LLM returns empty string", async () => {
+ mockStreamResult = ""
+ const result = await enhancePrompt("fix the bug")
+ expect(result).toBe("fix the bug")
+ })
+
+ test("handles LLM response with only think tags (no content)", async () => {
+ mockStreamResult = "I should enhance this"
+ const result = await enhancePrompt("fix the bug")
+ expect(result).toBe("fix the bug")
+ })
+
+ test("handles unclosed think tag in LLM response", async () => {
+ mockStreamResult = "reasoning cut off by token limit"
+ const result = await enhancePrompt("fix the bug")
+ expect(result).toBe("fix the bug")
+ })
+
+ test("handles combined think tags + code fences + quotes", async () => {
+ mockStreamResult = 'reasoning```\n"Investigate the failing test."\n```'
+ const result = await enhancePrompt("fix test")
+ expect(result).toBe("Investigate the failing test.")
+ })
+})