Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 157 additions & 0 deletions packages/opencode/src/altimate/enhance-prompt.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
// altimate_change - new file
import { Provider } from "@/provider/provider"
import { LLM } from "@/session/llm"
import { Agent } from "@/agent/agent"
import { Config } from "@/config/config"
import { Log } from "@/util/log"
import { MessageV2 } from "@/session/message-v2"

const ENHANCE_NAME = "enhance-prompt"
const ENHANCE_TIMEOUT_MS = 15_000
// MessageV2.User requires branded MessageID/SessionID types, but this is a
// synthetic message that never enters the session store — cast is safe here.
const ENHANCE_ID = ENHANCE_NAME as any

const log = Log.create({ service: ENHANCE_NAME })

// Research-backed enhancement prompt based on:
// - AutoPrompter (arxiv 2504.20196): 5 missing info categories that cause 27% lower edit correctness
// - Meta-prompting best practices: clear role, structural scaffolding, few-shot examples
// - KiloCode's enhance-prompt implementation: lightweight model, preserve intent, no wrapping
const ENHANCE_SYSTEM_PROMPT = `You are a prompt rewriter for a data engineering coding agent. The agent can read/write files, run SQL, manage dbt models, inspect schemas, and execute shell commands.

Your task: rewrite the user's rough prompt into a clearer version that will produce better results. Reply with ONLY the enhanced prompt — no explanations, no wrapping in quotes or code fences.

## What to improve

Research shows developer prompts commonly lack these five categories of information. Add them when missing:

1. **Specifics** — Add concrete details the agent needs: table names, column names, file paths, SQL dialects, error messages. If the user references "the model" or "the table", keep the reference but clarify what the agent should look for.
2. **Action plan** — When the prompt is vague ("fix this"), add explicit steps: investigate first, then modify, then verify. Structure as a logical sequence.
3. **Scope** — Clarify what files, models, or queries are in scope. If ambiguous, instruct the agent to identify the scope first.
4. **Verification** — Add a verification step when the user implies correctness matters (fixes, migrations, refactors). E.g. "run the query to confirm results" or "run dbt test after changes".
5. **Intent clarification** — When the request could be interpreted multiple ways, pick the most likely interpretation and make it explicit.

## Rules

- Preserve the user's intent exactly — never add requirements they didn't ask for
- Keep it concise — a good enhancement adds 1-3 sentences, not paragraphs
- If the prompt is already clear and specific, return it unchanged
- Write in the same tone/style as the user (casual stays casual, technical stays technical)
- Never add generic filler like "please ensure best practices" or "follow coding standards"
- Do not mention yourself or the enhancement process

## Examples

User: "fix the failing test"
Enhanced: "Investigate the failing test — run the test suite first to identify which test is failing and why, then examine the relevant source code, apply a fix, and re-run the test to confirm it passes."

User: "add a created_at column to the users model"
Enhanced: "Add a created_at timestamp column to the users dbt model. Update the SQL definition and the schema.yml entry. Use the appropriate timestamp type for the target warehouse."

User: "why is this query slow"
Enhanced: "Analyze why the query is slow. Run EXPLAIN/query profile to identify bottlenecks (full table scans, missing indexes, expensive joins). Suggest specific optimizations based on the findings."

User: "migrate this from snowflake to bigquery"
Enhanced: "Migrate the SQL from Snowflake dialect to BigQuery dialect. Convert Snowflake-specific functions (e.g. DATEADD, IFF, QUALIFY) to BigQuery equivalents. Preserve the query logic and verify the translated query is syntactically valid."`

export function stripThinkTags(text: string) {
// Match closed <think>...</think> blocks, and also unclosed <think>... to end of string
// (unclosed tags happen when the model hits token limit mid-generation)
return text.replace(/<think>[\s\S]*?(?:<\/think>\s*|$)/g, "")
}

export function clean(text: string) {
return text
.trim()
.replace(/^```\w*\n([\s\S]*?)\n```$/, "$1")
.trim()
.replace(/^(['"])([\s\S]*)\1$/, "$2")
.trim()
}

/**
* Check if auto-enhance is enabled in config.
* Defaults to false — user must explicitly opt in.
*/
export async function isAutoEnhanceEnabled(): Promise<boolean> {
const cfg = await Config.get()
return cfg.experimental?.auto_enhance_prompt === true
}

export async function enhancePrompt(text: string): Promise<string> {
const trimmed = text.trim()
if (!trimmed) return text

log.info("enhancing", { length: trimmed.length })

const controller = new AbortController()
const timeout = setTimeout(() => controller.abort(), ENHANCE_TIMEOUT_MS)

try {
const defaultModel = await Provider.defaultModel()
const model =
(await Provider.getSmallModel(defaultModel.providerID)) ??
(await Provider.getModel(defaultModel.providerID, defaultModel.modelID))

const agent: Agent.Info = {
name: ENHANCE_NAME,
mode: "primary",
hidden: true,
options: {},
permission: [],
prompt: ENHANCE_SYSTEM_PROMPT,
temperature: 0.7,
}

const user: MessageV2.User = {
id: ENHANCE_ID,
sessionID: ENHANCE_ID,
role: "user",
time: { created: Date.now() },
agent: ENHANCE_NAME,
model: {
providerID: model.providerID,
modelID: model.id,
},
}

const stream = await LLM.stream({
agent,
user,
system: [],
small: true,
tools: {},
model,
abort: controller.signal,
sessionID: ENHANCE_ID,
retries: 2,
messages: [
{
role: "user",
content: trimmed,
},
],
})

// Consume the stream explicitly to avoid potential SDK hangs where
// .text never resolves if the stream isn't drained (Vercel AI SDK caveat)
for await (const _ of stream.fullStream) {
// drain
}
const result = await stream.text.catch((err) => {
log.error("failed to enhance prompt", { error: err })
return undefined
})

if (!result) return text

const cleaned = clean(stripThinkTags(result).trim())
return cleaned || text
} catch (err) {
log.error("enhance prompt failed", { error: err })
return text
} finally {
clearTimeout(timeout)
}
}
83 changes: 83 additions & 0 deletions packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ import { useToast } from "../../ui/toast"
import { useKV } from "../../context/kv"
import { useTextareaKeybindings } from "../textarea-keybindings"
import { DialogSkill } from "../dialog-skill"
// altimate_change start - import prompt enhancement
import { enhancePrompt, isAutoEnhanceEnabled } from "@/altimate/enhance-prompt"
let enhancingInProgress = false
// altimate_change end

export type PromptProps = {
sessionID?: string
Expand Down Expand Up @@ -194,6 +198,53 @@ export function Prompt(props: PromptProps) {
dialog.clear()
},
},
// altimate_change start - add prompt enhance command
{
title: "Enhance prompt",
value: "prompt.enhance",
keybind: "prompt_enhance",
category: "Prompt",
enabled: !!store.prompt.input,
onSelect: async (dialog) => {
if (!store.prompt.input.trim()) return
dialog.clear()
const original = store.prompt.input
toast.show({
message: "Enhancing prompt...",
variant: "info",
duration: 2000,
})
try {
Comment on lines +208 to +217

This comment was marked as outdated.

const enhanced = await enhancePrompt(original)
// Guard against race condition: if user edited the prompt while
// enhancement was in-flight, discard the stale enhanced result
if (store.prompt.input !== original) return
if (enhanced !== original) {
input.setText(enhanced)
setStore("prompt", "input", enhanced)
input.gotoBufferEnd()
toast.show({
message: "Prompt enhanced",
variant: "success",
duration: 2000,
})
} else {
toast.show({
message: "Prompt already looks good",
variant: "info",
duration: 2000,
})
}
} catch {
toast.show({
message: "Failed to enhance prompt",
variant: "error",
duration: 3000,
})
}
},
},
// altimate_change end
{
title: "Paste",
value: "prompt.paste",
Expand Down Expand Up @@ -564,6 +615,32 @@ export function Prompt(props: PromptProps) {
const messageID = MessageID.ascending()
let inputText = store.prompt.input

// altimate_change start - auto-enhance prompt before expanding paste text
// Only enhance the raw user text, not shell commands or slash commands
// Guard prevents concurrent enhancement calls from rapid submissions
if (store.mode === "normal" && !inputText.startsWith("/") && !enhancingInProgress) {
try {
const autoEnhance = await isAutoEnhanceEnabled()
if (autoEnhance) {
enhancingInProgress = true
toast.show({ message: "Enhancing prompt...", variant: "info", duration: 2000 })
const enhanced = await enhancePrompt(inputText)
// Discard if user changed the prompt during enhancement
if (store.prompt.input !== inputText) return
if (enhanced !== inputText) {
Comment on lines +628 to +630
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: An early return in the auto-enhance logic silently abandons prompt submission if the user edits the input while enhancement is running, orphaning the session.
Severity: HIGH

Suggested Fix

Remove the early return statement at line 629 within the submit() function. This will allow the submission process to continue with the original inputText that initiated the enhancement, preventing the silent failure and ensuring the user's prompt is processed as intended, even if they have typed additional text since.

Prompt for AI Agent
Review the code at the location below. A potential bug has been identified by an AI
agent.
Verify if this is a real issue. If it is, propose a fix; if not, explain why it's not
valid.

Location: packages/opencode/src/cli/cmd/tui/component/prompt/index.tsx#L628-L630

Potential issue: When the `auto_enhance_prompt` feature is enabled, if a user modifies
their prompt text while the enhancement is in progress, the `submit()` function will
exit prematurely. This occurs because a check `if (store.prompt.input !== inputText)` at
line 629 triggers an early `return`. This happens after a new session has been created
but before the prompt is sent to the server, resulting in an orphaned session with no
message. The user receives no feedback that the submission failed, and the input text
remains in the prompt field, leading to a silent failure of a core user action.

inputText = enhanced
setStore("prompt", "input", enhanced)
}
}
} catch (err) {
// Enhancement failure should never block prompt submission
console.error("auto-enhance failed, using original prompt", err)
} finally {
enhancingInProgress = false
}
}
// altimate_change end
Comment on lines +630 to +642

This comment was marked as outdated.


// Expand pasted text inline before submitting
const allExtmarks = input.extmarks.getAllForTypeId(promptPartTypeId)
const sortedExtmarks = allExtmarks.sort((a: { start: number }, b: { start: number }) => b.start - a.start)
Expand Down Expand Up @@ -653,6 +730,7 @@ export function Prompt(props: PromptProps) {
}
history.append({
...store.prompt,
input: inputText,
mode: currentMode,
})
input.extmarks.clear()
Expand Down Expand Up @@ -1155,6 +1233,11 @@ export function Prompt(props: PromptProps) {
<text fg={theme.text}>
{keybind.print("command_list")} <span style={{ fg: theme.textMuted }}>commands</span>
</text>
{/* altimate_change start - show enhance hint */}
<text fg={theme.text}>
{keybind.print("prompt_enhance")} <span style={{ fg: theme.textMuted }}>enhance</span>
</text>
{/* altimate_change end */}
</Match>
<Match when={store.mode === "shell"}>
<text fg={theme.text}>
Expand Down
11 changes: 11 additions & 0 deletions packages/opencode/src/config/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -866,6 +866,9 @@ export namespace Config {
agent_cycle: z.string().optional().default("tab").describe("Next agent"),
agent_cycle_reverse: z.string().optional().default("shift+tab").describe("Previous agent"),
variant_cycle: z.string().optional().default("ctrl+t").describe("Cycle model variants"),
// altimate_change start - add prompt enhance keybind
prompt_enhance: z.string().optional().default("<leader>i").describe("Enhance prompt with AI before sending"),
// altimate_change end
input_clear: z.string().optional().default("ctrl+c").describe("Clear input field"),
input_paste: z.string().optional().default("ctrl+v").describe("Paste from clipboard"),
input_submit: z.string().optional().default("return").describe("Submit input"),
Expand Down Expand Up @@ -1226,6 +1229,14 @@ export namespace Config {
.positive()
.optional()
.describe("Timeout in milliseconds for model context protocol (MCP) requests"),
// altimate_change start - auto-enhance prompt config
auto_enhance_prompt: z
.boolean()
.optional()
.describe(
"Automatically enhance prompts with AI before sending (default: false). Uses a small model to rewrite rough prompts into clearer versions.",
),
// altimate_change end
})
.optional(),
})
Expand Down
Loading
Loading