diff --git a/.github/meta/commit.txt b/.github/meta/commit.txt index f754bd0d45..f376b9108d 100644 --- a/.github/meta/commit.txt +++ b/.github/meta/commit.txt @@ -1,6 +1,22 @@ +<<<<<<< Updated upstream fix: add missing `altimate_change` markers for `experimental` block in `opencode.jsonc` The `experimental` config added in #311 was missing upstream markers, causing the Marker Guard CI check to fail on main. +======= +fix: add try/catch and input sanitization to TUI install/create (#341) + +Root cause of silent failures: `onConfirm` async callbacks had no +try/catch, so any thrown error was swallowed and no result toast shown. + +Fixes: +- Wrap all install/create logic in try/catch with error toast +- Strip trailing dots from input (textarea was appending `.`) +- Strip `.git` suffix from URLs (users paste from browser) +- Trim whitespace and validate before proceeding +- "Installing..." toast now shows 60s duration with helpful text + ("This may take a moment while the repo is cloned") +- Empty input shows immediate error instead of proceeding +>>>>>>> Stashed changes Co-Authored-By: Claude Opus 4.6 (1M context) diff --git a/bun.lock b/bun.lock index 6507410dbe..11c1e238de 100644 --- a/bun.lock +++ b/bun.lock @@ -15,6 +15,7 @@ "@types/pg": "8.18.0", "@typescript/native-preview": "catalog:", "husky": "9.1.7", + "playwright-core": "1.58.2", "prettier": "3.6.2", "semver": "^7.6.0", "turbo": "2.8.13", diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md index 38c9ee6be6..4c8c7950fc 100644 --- a/docs/docs/quickstart.md +++ b/docs/docs/quickstart.md @@ -69,21 +69,43 @@ Auto-detects your dbt projects, warehouse credentials, and installed tools. See --- -## Step 4: Build Your First Artifact +## Step 4: Verify It Works -In the TUI, try these prompts or describe your own use case: +In the TUI, type a simple prompt to confirm everything is connected: +``` +What SQL anti-patterns does this query have: SELECT * FROM orders o JOIN customers c ON o.id = c.order_id WHERE UPPER(c.name) = 'ACME' ``` -Look at my snowflake account and do a comprehensive Analysis our Snowflake credit consumption over the last 30 days. After doing this generate a dashboard for my consumption. +If you connected a warehouse with `/discover`, try: ``` - +Show me the tables in my warehouse ``` -Build me a real time, interactive dashboard for my macbook system metrics and health. Use python, iceberg, dbt for various time slices. +If you have a dbt project, try: ``` +Scan my dbt project and summarize the models +``` + +--- + +## Step 5: Explore Data Engineering Features + +Once basics are working, explore these commands: + +| Command | What it does | +|---------|-------------| +| `/sql-review` | Review SQL for correctness, performance, and best practices | +| `/cost-report` | Analyze warehouse spending and find optimization opportunities | +| `/dbt-docs` | Generate or improve dbt model documentation | +| `/generate-tests` | Auto-generate dbt tests for your models | +| `/migrate-sql` | Translate SQL between warehouse dialects | +| `/ci-check` | Run pre-merge SQL quality validation on changed files | +| `/train @docs/style-guide.md` | Import team standards from documentation | + +**Pro tip:** Use `impact_analysis` before making breaking changes to understand which downstream dbt models will be affected. --- @@ -92,3 +114,4 @@ Build me a real time, interactive dashboard for my macbook system metrics and he - [Full Setup](getting-started.md): All warehouse configs, LLM providers, advanced setup - [Agent Modes](data-engineering/agent-modes.md): Choose the right agent for your task - [CI & Automation](data-engineering/guides/ci-headless.md): Run altimate in automated pipelines +- Train your AI teammate: Use `/teach` and `/train` to build team-specific knowledge that persists across sessions diff --git a/packages/opencode/src/altimate/tools/impact-analysis.ts b/packages/opencode/src/altimate/tools/impact-analysis.ts new file mode 100644 index 0000000000..6cc71d838f --- /dev/null +++ b/packages/opencode/src/altimate/tools/impact-analysis.ts @@ -0,0 +1,294 @@ +// altimate_change - Impact analysis tool for dbt DAG-aware change assessment +// +// Combines dbt manifest parsing with column-level lineage to show downstream +// impact of model/column changes across the entire DAG. +import z from "zod" +import { Tool } from "../../tool/tool" +import { Dispatcher } from "../native" + +export const ImpactAnalysisTool = Tool.define("impact_analysis", { + description: [ + "Analyze the downstream impact of a model or column change across the dbt DAG.", + "Combines dbt manifest parsing with column-level lineage to show all affected", + "models, tests, exposures, and sources. Use before making breaking changes to", + "understand blast radius.", + "", + "Examples:", + '- impact_analysis({ model: "stg_orders", change_type: "remove" })', + '- impact_analysis({ model: "stg_orders", column: "order_id", change_type: "rename" })', + '- impact_analysis({ manifest_path: "target/manifest.json", model: "dim_customers", change_type: "retype" })', + ].join("\n"), + parameters: z.object({ + model: z + .string() + .describe("dbt model name to analyze impact for (e.g., 'stg_orders', 'dim_customers')"), + column: z + .string() + .optional() + .describe("Specific column to trace impact for. If omitted, analyzes model-level impact."), + change_type: z + .enum(["remove", "rename", "retype", "add", "modify"]) + .describe("Type of change being considered"), + manifest_path: z + .string() + .optional() + .default("target/manifest.json") + .describe("Path to dbt manifest.json file"), + dialect: z + .string() + .optional() + .default("snowflake") + .describe("SQL dialect for lineage analysis"), + }), + // @ts-expect-error tsgo TS2719 false positive — identical pattern works in other tools + async execute(args, ctx) { + try { + // Step 1: Parse the dbt manifest to get the full DAG + const manifest = await Dispatcher.call("dbt.manifest", { path: args.manifest_path }) + + if (!manifest.models || manifest.models.length === 0) { + return { + title: "Impact: NO MANIFEST", + metadata: { success: false }, + output: `No models found in manifest at ${args.manifest_path}. Run \`dbt compile\` first to generate the manifest.`, + } + } + + // Step 2: Find the target model and its downstream dependents + const targetModel = manifest.models.find( + (m: { name: string }) => m.name === args.model || m.name.endsWith(`.${args.model}`), + ) + + if (!targetModel) { + const available = manifest.models + .slice(0, 10) + .map((m: { name: string }) => m.name) + .join(", ") + return { + title: "Impact: MODEL NOT FOUND", + metadata: { success: false }, + output: `Model "${args.model}" not found in manifest. Available models: ${available}${manifest.models.length > 10 ? ` (+${manifest.models.length - 10} more)` : ""}`, + } + } + + // Step 3: Build the dependency graph and find all downstream models + const modelsByName = new Map() + for (const m of manifest.models) { + modelsByName.set(m.name, m) + } + + // Find all models that depend on the target (direct + transitive) + const downstream = findDownstream(args.model, manifest.models) + const direct = downstream.filter((d) => d.depth === 1) + const transitive = downstream.filter((d) => d.depth > 1) + + // Step 4: Report test count (manifest has test_count but not individual tests) + const affectedTestCount = manifest.test_count ?? 0 + + // Step 5: If column specified, attempt column-level lineage + let columnImpact: string[] = [] + if (args.column) { + try { + const lineageResult = await Dispatcher.call("lineage.check", { + sql: `SELECT * FROM ${args.model}`, // Use model reference for lineage tracing + dialect: args.dialect, + }) + if (lineageResult.data?.column_dict) { + // Find which downstream columns reference our target column + for (const [outCol, sources] of Object.entries(lineageResult.data.column_dict)) { + const srcArray = Array.isArray(sources) ? sources : [sources] + if (srcArray.some((s: any) => JSON.stringify(s).includes(args.column!))) { + columnImpact.push(outCol) + } + } + } + } catch { + // Column lineage is best-effort — continue without it + } + } + + // Step 6: Format the impact report + const output = formatImpactReport({ + model: args.model, + column: args.column, + changeType: args.change_type, + direct, + transitive, + affectedTestCount, + columnImpact, + totalModels: manifest.model_count, + }) + + const totalAffected = downstream.length + const severity = + totalAffected === 0 + ? "SAFE" + : totalAffected <= 3 + ? "LOW" + : totalAffected <= 10 + ? "MEDIUM" + : "HIGH" + + return { + title: `Impact: ${severity} — ${totalAffected} downstream model${totalAffected !== 1 ? "s" : ""} affected`, + metadata: { + success: true, + severity, + direct_count: direct.length, + transitive_count: transitive.length, + test_count: affectedTestCount, + column_impact: columnImpact.length, + }, + output, + } + } catch (e) { + const msg = e instanceof Error ? e.message : String(e) + return { + title: "Impact: ERROR", + metadata: { success: false }, + output: `Failed to analyze impact: ${msg}\n\nEnsure the dbt manifest exists (run \`dbt compile\`) and the dispatcher is running.`, + } + } + }, +}) + +interface DownstreamModel { + name: string + depth: number + materialized?: string + path: string[] +} + +function findDownstream( + targetName: string, + models: Array<{ name: string; depends_on: string[]; materialized?: string }>, +): DownstreamModel[] { + const results: DownstreamModel[] = [] + const visited = new Set() + + function walk(name: string, depth: number, path: string[]) { + for (const model of models) { + if (visited.has(model.name)) continue + const deps = model.depends_on.map((d) => d.split(".").pop()) + if (deps.includes(name)) { + visited.add(model.name) + const newPath = [...path, model.name] + results.push({ + name: model.name, + depth, + materialized: model.materialized, + path: newPath, + }) + walk(model.name, depth + 1, newPath) + } + } + } + + walk(targetName, 1, [targetName]) + return results +} + +function formatImpactReport(data: { + model: string + column?: string + changeType: string + direct: DownstreamModel[] + transitive: DownstreamModel[] + affectedTestCount: number + columnImpact: string[] + totalModels: number +}): string { + const lines: string[] = [] + + // Header + const target = data.column ? `${data.model}.${data.column}` : data.model + lines.push(`Impact Analysis: ${data.changeType.toUpperCase()} ${target}`) + lines.push("".padEnd(60, "=")) + + const totalAffected = data.direct.length + data.transitive.length + const pct = data.totalModels > 0 ? ((totalAffected / data.totalModels) * 100).toFixed(1) : "0" + lines.push(`Blast radius: ${totalAffected}/${data.totalModels} models (${pct}%)`) + lines.push("") + + // Risk assessment + if (data.changeType === "remove" && totalAffected > 0) { + lines.push("WARNING: This is a BREAKING change. All downstream models will fail.") + lines.push("") + } else if (data.changeType === "rename" && totalAffected > 0) { + lines.push("WARNING: Rename requires updating all downstream references.") + lines.push("") + } else if (data.changeType === "retype" && totalAffected > 0) { + lines.push("CAUTION: Type change may cause implicit casts or failures in downstream models.") + lines.push("") + } + + // Direct dependents + if (data.direct.length > 0) { + lines.push(`Direct Dependents (${data.direct.length})`) + lines.push("".padEnd(40, "-")) + for (const d of data.direct) { + const mat = d.materialized ? ` [${d.materialized}]` : "" + lines.push(` ${d.name}${mat}`) + } + lines.push("") + } + + // Transitive dependents + if (data.transitive.length > 0) { + lines.push(`Transitive Dependents (${data.transitive.length})`) + lines.push("".padEnd(40, "-")) + for (const d of data.transitive) { + const mat = d.materialized ? ` [${d.materialized}]` : "" + const path = d.path.join(" → ") + lines.push(` ${d.name}${mat} (via: ${path})`) + } + lines.push("") + } + + // Column impact + if (data.column && data.columnImpact.length > 0) { + lines.push(`Affected Output Columns (${data.columnImpact.length})`) + lines.push("".padEnd(40, "-")) + for (const col of data.columnImpact) { + lines.push(` ${col}`) + } + lines.push("") + } + + // Affected tests + if (data.affectedTestCount > 0) { + lines.push(`Tests in project: ${data.affectedTestCount}`) + lines.push("".padEnd(40, "-")) + lines.push(` Run \`dbt test\` to verify all ${data.affectedTestCount} tests still pass after this change.`) + lines.push("") + } + + // No impact + if (totalAffected === 0) { + lines.push("No downstream models depend on this. Change is safe to make.") + } + + // Recommendations + if (totalAffected > 0) { + lines.push("Recommended Actions") + lines.push("".padEnd(40, "-")) + if (data.changeType === "remove") { + lines.push("1. Update all downstream models to remove references") + lines.push("2. Run `dbt test` to verify no broken references") + lines.push("3. Consider deprecation period before removal") + } else if (data.changeType === "rename") { + lines.push("1. Update all downstream SQL references to new name") + lines.push("2. Run `dbt compile` to verify all models compile") + lines.push("3. Run `dbt test` to verify correctness") + } else if (data.changeType === "retype") { + lines.push("1. Check downstream models for implicit type casts") + lines.push("2. Verify aggregations and joins still work correctly") + lines.push("3. Run `dbt test` with data validation") + } else { + lines.push("1. Review downstream models for compatibility") + lines.push("2. Run `dbt compile` and `dbt test`") + } + } + + return lines.join("\n") +} diff --git a/packages/opencode/src/altimate/tools/training-import.ts b/packages/opencode/src/altimate/tools/training-import.ts new file mode 100644 index 0000000000..194a81d141 --- /dev/null +++ b/packages/opencode/src/altimate/tools/training-import.ts @@ -0,0 +1,232 @@ +// altimate_change - Bulk training import from markdown documents +// +// Enables enterprise teams to import existing style guides, naming conventions, +// glossaries, and standards from markdown documents into the training system. +import z from "zod" +import { Tool } from "../../tool/tool" +import { Log } from "../../util/log" +import { TrainingStore, TrainingPrompt } from "../training" +import { TrainingKind, TRAINING_MAX_PATTERNS_PER_KIND } from "../training/types" + +const log = Log.create({ service: "tool.training_import" }) + +export const TrainingImportTool = Tool.define("training_import", { + description: [ + "Import training entries from a markdown document (style guide, naming conventions, glossary, playbook).", + "Parses markdown headings as entry names and content as training material.", + "", + "Use this to bulk-load team standards from existing documentation. Each H2 (##) section", + "becomes a separate training entry. H1 (#) sections are used as context prefixes.", + "", + "Examples:", + '- training_import({ file_path: "docs/sql-style-guide.md", kind: "standard" })', + '- training_import({ file_path: "docs/glossary.md", kind: "glossary" })', + '- training_import({ file_path: "docs/incident-playbook.md", kind: "playbook", dry_run: true })', + ].join("\n"), + parameters: z.object({ + file_path: z.string().describe("Path to markdown document to import"), + kind: TrainingKind.describe("What kind of training entries to extract"), + scope: z + .enum(["global", "project"]) + .default("project") + .describe("'project' to share with team via git, 'global' for personal preferences"), + dry_run: z + .boolean() + .default(true) + .describe("Preview what would be imported without saving. Set to false to actually import."), + max_entries: z + .number() + .optional() + .default(20) + .describe("Maximum number of entries to import from the document"), + }), + // @ts-expect-error tsgo TS2719 false positive — identical pattern works in other tools + async execute(args, ctx) { + try { + // Read the markdown file + const fs = await import("fs/promises") + const content = await fs.readFile(args.file_path, "utf-8") + + // Parse markdown sections + const sections = parseMarkdownSections(content) + + if (sections.length === 0) { + return { + title: "Import: NO SECTIONS FOUND", + metadata: { success: false, count: 0 }, + output: `No importable sections found in ${args.file_path}.\n\nExpected format: Use ## headings to define sections. Each ## heading becomes a training entry.`, + } + } + + // Check current capacity + const scopeForCount = args.scope === "global" ? "global" : "project" + const existing = await TrainingStore.count({ kind: args.kind, scope: scopeForCount }) + const currentCount = existing[args.kind] ?? 0 + const available = TRAINING_MAX_PATTERNS_PER_KIND - currentCount + const toImport = sections.slice(0, Math.min(args.max_entries, sections.length)) + + if (args.dry_run) { + // Preview mode + const lines: string[] = [ + `Dry run — preview of import from ${args.file_path}`, + `Kind: ${args.kind} | Scope: ${args.scope}`, + `Sections found: ${sections.length} | Will import: ${Math.min(toImport.length, available)}`, + `Current entries: ${currentCount}/${TRAINING_MAX_PATTERNS_PER_KIND}`, + "", + ] + + if (toImport.length > available) { + lines.push(`WARNING: Only ${available} slots available. ${toImport.length - available} entries will be skipped.`) + lines.push("") + } + + for (let i = 0; i < toImport.length; i++) { + const s = toImport[i] + const willImport = i < available + const prefix = willImport ? "+" : "SKIP" + const preview = s.content.length > 120 ? s.content.slice(0, 120) + "..." : s.content + lines.push(`[${prefix}] ${s.name} (${s.content.length} chars)`) + lines.push(` ${preview}`) + lines.push("") + } + + lines.push("Set dry_run=false to import these entries.") + + return { + title: `Import preview: ${Math.min(toImport.length, available)} entries from ${args.file_path}`, + metadata: { success: true, count: Math.min(toImport.length, available), dry_run: true }, + output: lines.join("\n"), + } + } + + // Actual import + let imported = 0 + let skipped = 0 + const results: string[] = [] + + for (const section of toImport) { + if (imported >= available) { + skipped++ + continue + } + + try { + await TrainingStore.save({ + kind: args.kind, + name: section.name, + scope: args.scope, + content: section.content.slice(0, 1800), // Enforce max content length + source: args.file_path, + }) + imported++ + results.push(` + ${section.name}`) + } catch (e) { + const msg = e instanceof Error ? e.message : String(e) + results.push(` FAIL ${section.name}: ${msg}`) + skipped++ + } + } + + // Budget usage + const budgetUsed = await TrainingPrompt.budgetUsage() + + const output = [ + `Imported ${imported} ${args.kind} entries from ${args.file_path}`, + skipped > 0 ? `Skipped: ${skipped} (limit reached or errors)` : "", + "", + ...results, + "", + `Training usage: ${budgetUsed.used}/${budgetUsed.budget} chars (${budgetUsed.percent}% full).`, + args.scope === "project" ? "These entries will be shared with your team when committed to git." : "", + ] + .filter(Boolean) + .join("\n") + + return { + title: `Import: ${imported} ${args.kind} entries saved`, + metadata: { success: true, count: imported, skipped }, + output, + } + } catch (e) { + const msg = e instanceof Error ? e.message : String(e) + log.error("failed to import training", { file: args.file_path, kind: args.kind, error: msg }) + return { + title: "Import: ERROR", + metadata: { success: false, count: 0 }, + output: `Failed to import training from ${args.file_path}: ${msg}`, + } + } + }, +}) + +interface MarkdownSection { + name: string + content: string +} + +function parseMarkdownSections(markdown: string): MarkdownSection[] { + const sections: MarkdownSection[] = [] + const lines = markdown.split("\n") + let currentH1 = "" + let currentName = "" + let currentContent: string[] = [] + + for (const line of lines) { + // H1 — used as context prefix + if (line.match(/^#\s+/)) { + // Save previous section if any + if (currentName && currentContent.length > 0) { + sections.push({ + name: slugify(currentName), + content: currentContent.join("\n").trim(), + }) + } + currentH1 = line.replace(/^#\s+/, "").trim() + currentName = "" + currentContent = [] + continue + } + + // H2 — each becomes a training entry + if (line.match(/^##\s+/)) { + // Save previous section + if (currentName && currentContent.length > 0) { + sections.push({ + name: slugify(currentName), + content: currentContent.join("\n").trim(), + }) + } + currentName = line.replace(/^##\s+/, "").trim() + if (currentH1) { + currentContent = [`Context: ${currentH1}`, ""] + } else { + currentContent = [] + } + continue + } + + // H3+ — include as content within current section + if (currentName) { + currentContent.push(line) + } + } + + // Save last section + if (currentName && currentContent.length > 0) { + sections.push({ + name: slugify(currentName), + content: currentContent.join("\n").trim(), + }) + } + + return sections +} + +function slugify(text: string): string { + return text + .toLowerCase() + .replace(/[^a-z0-9\s-]/g, "") + .replace(/\s+/g, "-") + .replace(/^-+|-+$/g, "") + .slice(0, 64) +} diff --git a/packages/opencode/src/altimate/training/types.ts b/packages/opencode/src/altimate/training/types.ts index 1a813d6dc3..a5e90f1985 100644 --- a/packages/opencode/src/altimate/training/types.ts +++ b/packages/opencode/src/altimate/training/types.ts @@ -3,9 +3,15 @@ import z from "zod" export const TRAINING_TAG = "training" export const TRAINING_ID_PREFIX = "training" -export const TRAINING_MAX_PATTERNS_PER_KIND = 20 -// Budget scales with available context. Default is generous; users can override via config. -export const TRAINING_BUDGET = 16000 +// altimate_change start — increase training limits for enterprise teams +// 20 entries per kind is too restrictive for teams with 200+ dbt models spanning +// multiple domains. 50 entries accommodates real enterprise glossaries, style guides, +// and domain-specific conventions. +export const TRAINING_MAX_PATTERNS_PER_KIND = 50 +// Budget scales with available context. 48KB accommodates enterprise teams with +// rich glossaries, standards, and playbooks across multiple data domains. +export const TRAINING_BUDGET = 48000 +// altimate_change end export const TrainingKind = z.enum(["pattern", "rule", "glossary", "standard", "context", "playbook"]) export type TrainingKind = z.infer diff --git a/packages/opencode/src/cli/cmd/run.ts b/packages/opencode/src/cli/cmd/run.ts index cc8fd5dc0c..08a9bbb7a3 100644 --- a/packages/opencode/src/cli/cmd/run.ts +++ b/packages/opencode/src/cli/cmd/run.ts @@ -351,6 +351,12 @@ export const RunCommand = cmd({ describe: "enable session tracing (default: true, disable with --no-trace)", default: true, }) + // altimate_change start — budget limits for CI/enterprise governance + .option("max-turns", { + type: "number", + describe: "maximum number of assistant turns before aborting the session", + }) + // altimate_change end }, handler: async (args) => { let message = [...args.message, ...(args["--"] || [])] @@ -549,6 +555,10 @@ You are speaking to a non-technical business executive. Follow these rules stric async function loop() { const toggles = new Map() + // altimate_change start — max-turns budget enforcement + let turnCount = 0 + const maxTurns = args.maxTurns + // altimate_change end for await (const event of events.stream) { if ( @@ -603,6 +613,18 @@ You are speaking to a non-technical business executive. Follow these rules stric if (part.type === "step-start") { tracer?.logStepStart(part) + // altimate_change start — enforce max-turns budget + turnCount++ + if (maxTurns && turnCount > maxTurns) { + error = `Budget exceeded: reached ${maxTurns} assistant turn${maxTurns !== 1 ? "s" : ""} limit` + UI.println( + UI.Style.TEXT_DANGER_BOLD + "!", + UI.Style.TEXT_NORMAL + ` ${error}. Aborting session.`, + ) + await sdk.session.abort({ sessionID }) + break + } + // altimate_change end if (emit("step_start", { part })) continue } @@ -664,18 +686,40 @@ You are speaking to a non-technical business executive. Follow these rules stric if (event.type === "permission.asked") { const permission = event.properties if (permission.sessionID !== sessionID) continue - // altimate_change start - yolo mode: auto-approve instead of auto-reject + // altimate_change start - yolo mode: auto-approve but respect explicit deny rules const yolo = args.yolo || Flag.ALTIMATE_CLI_YOLO if (yolo) { - UI.println( - UI.Style.TEXT_WARNING_BOLD + "!", - UI.Style.TEXT_NORMAL + - `yolo mode: auto-approved ${permission.permission} (${permission.patterns.join(", ")})`, + // Check if any pattern matches an explicit deny rule from the session config + const isDenied = rules.some( + (r) => + r.action === "deny" && + r.permission === permission.permission && + permission.patterns.some((p) => { + if (r.pattern === "*") return true + return p.includes(r.pattern) || r.pattern.includes(p) + }), ) - await sdk.permission.reply({ - requestID: permission.id, - reply: "once", - }) + if (isDenied) { + UI.println( + UI.Style.TEXT_DANGER_BOLD + "!", + UI.Style.TEXT_NORMAL + + `yolo mode: BLOCKED by deny rule: ${permission.permission} (${permission.patterns.join(", ")})`, + ) + await sdk.permission.reply({ + requestID: permission.id, + reply: "reject", + }) + } else { + UI.println( + UI.Style.TEXT_WARNING_BOLD + "!", + UI.Style.TEXT_NORMAL + + `yolo mode: auto-approved ${permission.permission} (${permission.patterns.join(", ")})`, + ) + await sdk.permission.reply({ + requestID: permission.id, + reply: "once", + }) + } } else { UI.println( UI.Style.TEXT_WARNING_BOLD + "!", diff --git a/packages/opencode/src/cli/cmd/tui/component/tips.tsx b/packages/opencode/src/cli/cmd/tui/component/tips.tsx index a005d0b2a4..d3b9d9cc09 100644 --- a/packages/opencode/src/cli/cmd/tui/component/tips.tsx +++ b/packages/opencode/src/cli/cmd/tui/component/tips.tsx @@ -30,9 +30,29 @@ function parse(tip: string): TipPart[] { return parts } -export function Tips() { +// altimate_change start — prioritized beginner & data engineering tips +const BEGINNER_TIPS = [ + "Run {highlight}/connect{/highlight} to add your API key and get started", + "Run {highlight}/discover{/highlight} to auto-detect your dbt project and warehouse connections", + "Press {highlight}Ctrl+P{/highlight} to see all available commands", + "Press {highlight}Tab{/highlight} to cycle between Build and Plan agents", + "Use {highlight}/cost-report{/highlight} to analyze warehouse spending", + "Use {highlight}/dbt-docs{/highlight} to generate dbt model documentation", + "Use {highlight}/generate-tests{/highlight} to auto-generate dbt tests for your models", + "Use {highlight}/sql-review{/highlight} to review SQL for correctness and performance", + "Use {highlight}/migrate-sql{/highlight} to translate SQL between warehouse dialects", + "Use {highlight}/ci-check{/highlight} to run pre-merge SQL validation on changed files", + "Ask me to analyze a SQL query for anti-patterns — I'll detect 19+ issue types with zero false positives", + "Ask me to trace column-level lineage for any SQL query across dialects", +] +// altimate_change end + +// altimate_change start — first-time user beginner tips +export function Tips(props: { isFirstTime?: boolean }) { const theme = useTheme().theme - const parts = parse(TIPS[Math.floor(Math.random() * TIPS.length)]) + const pool = props.isFirstTime ? BEGINNER_TIPS : TIPS + const parts = parse(pool[Math.floor(Math.random() * pool.length)]) + // altimate_change end return ( diff --git a/packages/opencode/src/cli/cmd/tui/routes/home.tsx b/packages/opencode/src/cli/cmd/tui/routes/home.tsx index 865553899f..d16bc5d15a 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/home.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/home.tsx @@ -41,8 +41,7 @@ export function Home() { const isFirstTimeUser = createMemo(() => sync.data.session.length === 0) const tipsHidden = createMemo(() => kv.get("tips_hidden", false)) const showTips = createMemo(() => { - // Don't show tips for first-time users - if (isFirstTimeUser()) return false + // Always show tips — first-time users need guidance the most return !tipsHidden() }) @@ -127,9 +126,28 @@ export function Home() { workspaceID={route.workspaceID} /> + {/* altimate_change start — first-time onboarding hint */} + + + + Get started: + /connect + to add your API key + · + /discover + to detect your data stack + · + Ctrl+P + for all commands + + + + {/* altimate_change end */} - + {/* altimate_change start — pass first-time flag for beginner tips */} + + {/* altimate_change end */} diff --git a/packages/opencode/src/command/template/ci-check.txt b/packages/opencode/src/command/template/ci-check.txt new file mode 100644 index 0000000000..9a4e5d263a --- /dev/null +++ b/packages/opencode/src/command/template/ci-check.txt @@ -0,0 +1,51 @@ +You are running altimate-code in CI/pre-merge validation mode. Analyze all changed SQL and dbt files for issues. + +Step 1 — Identify changed files: +Run bash to find changed files: `git diff --name-only HEAD~1...HEAD -- '*.sql' '*.yml' '*.yaml'` +If a base branch is available (e.g., main), use: `git diff --name-only origin/main...HEAD -- '*.sql' '*.yml' '*.yaml'` + +If no files changed, report "No SQL or dbt files changed — all clear" and stop. + +Step 2 — Analyze each changed SQL file: +For each `.sql` file found: +- Read the file contents +- Run `sql_analyze` with the appropriate dialect (auto-detect from project config or default to snowflake) +- Run `lineage_check` to verify lineage is not broken +- If any WARNING or ERROR severity issues found, collect them + +Step 3 — Check dbt project integrity (if dbt detected): +- Run `project_scan` to detect dbt project +- If manifest exists, run `dbt_manifest` to parse it +- For each changed model, run `impact_analysis` to assess downstream risk +- Flag any changes with HIGH blast radius (>10 downstream models) + +Step 4 — Run schema validation: +For each changed SQL file: +- Run `altimate_core_validate` to check syntax correctness +- Run `altimate_core_check` for additional quality checks + +Step 5 — Generate summary report: +Present results in a CI-friendly format: + +### SQL Quality Report + +**Files analyzed:** N +**Issues found:** N (X errors, Y warnings) +**Downstream impact:** N models affected + +#### Issues by File +For each file with issues: +- File path +- Issue type, severity, message +- Recommendation + +#### Impact Assessment +For models with downstream dependents: +- Model name +- Number of affected downstream models +- Risk level (LOW/MEDIUM/HIGH) + +If all checks pass, report: "All SQL quality checks passed. Safe to merge." +If blocking issues found, clearly state: "BLOCKING: N issues must be resolved before merge." + +$ARGUMENTS diff --git a/packages/opencode/src/command/template/discover.txt b/packages/opencode/src/command/template/discover.txt index 3b459c00cf..6fec8b9dbd 100644 --- a/packages/opencode/src/command/template/discover.txt +++ b/packages/opencode/src/command/template/discover.txt @@ -13,7 +13,20 @@ Summarize the scan results in a friendly way: - Installed data tools (dbt, sqlfluff, etc.) - Configuration files found -Step 3 — Set up new connections: +altimate_change start — detect additional cloud credentials +Step 3 — Check for additional cloud warehouse credentials: +Beyond what project_scan found, also check for: +- `~/.snowsql/config` for Snowflake connections (parse [connections] sections) +- `GOOGLE_APPLICATION_CREDENTIALS` env var for BigQuery service accounts +- `DATABASE_URL` env var for PostgreSQL/MySQL/Redshift connection strings +- `PGHOST`/`PGUSER`/`PGDATABASE` env vars for PostgreSQL connections +- `DATABRICKS_HOST`/`DATABRICKS_TOKEN` env vars for Databricks +- `~/.bigqueryrc` or `gcloud` config for BigQuery project ID +- `~/.aws/credentials` for Redshift connections (if combined with cluster endpoint env vars) + +Present any new credentials found and ask the user if they want to add them. + +Step 4 — Set up new connections: For each NEW warehouse connection discovered (not already configured): - Present the connection details and ask the user if they want to add it - If yes, call `warehouse_add` with the detected configuration @@ -23,7 +36,7 @@ For each NEW warehouse connection discovered (not already configured): Skip this step if there are no new connections to add. -Step 4 — Index schemas: +Step 5 — Index schemas: If any warehouses are connected but not yet indexed in the schema cache: - Ask the user if they want to index schemas now (explain this enables autocomplete, search, and context-aware analysis) - If yes, call `schema_index` for each selected warehouse @@ -31,7 +44,7 @@ If any warehouses are connected but not yet indexed in the schema cache: Skip this step if all connected warehouses are already indexed or if no warehouses are connected. -Step 5 — Show next steps: +Step 6 — Show next steps: Present a summary of what was set up, then suggest what the user can do next: **Available skills:** @@ -40,6 +53,8 @@ Present a summary of what was set up, then suggest what the user can do next: - `/generate-tests` — Auto-generate dbt tests for your models - `/sql-review` — Review SQL for correctness, performance, and best practices - `/migrate-sql` — Translate SQL between warehouse dialects +- `/ci-check` — Run pre-merge SQL quality validation on changed files +- `/train @docs/style-guide.md` — Import team standards from documentation **Agent modes to explore:** - `analyst` — Deep-dive into data quality, lineage, and schema questions @@ -53,3 +68,4 @@ Present a summary of what was set up, then suggest what the user can do next: - `sql_execute` — Run queries against any connected warehouse $ARGUMENTS +altimate_change end diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts index b96192693a..075291248f 100644 --- a/packages/opencode/src/tool/registry.ts +++ b/packages/opencode/src/tool/registry.ts @@ -112,6 +112,10 @@ import { TrainingSaveTool } from "../altimate/tools/training-save" import { TrainingListTool } from "../altimate/tools/training-list" import { TrainingRemoveTool } from "../altimate/tools/training-remove" // altimate_change end +// altimate_change start - import impact analysis and training import tools +import { ImpactAnalysisTool } from "../altimate/tools/impact-analysis" +import { TrainingImportTool } from "../altimate/tools/training-import" +// altimate_change end export namespace ToolRegistry { const log = Log.create({ service: "tool.registry" }) @@ -278,7 +282,10 @@ export namespace ToolRegistry { ...(!Flag.ALTIMATE_DISABLE_MEMORY ? [MemoryReadTool, MemoryWriteTool, MemoryDeleteTool, MemoryAuditTool, ...(Flag.ALTIMATE_MEMORY_AUTO_EXTRACT ? [MemoryExtractTool] : [])] : []), // altimate_change end // altimate_change start - register training tools for AI teammate - ...(!Flag.ALTIMATE_DISABLE_TRAINING ? [TrainingSaveTool, TrainingListTool, TrainingRemoveTool] : []), + ...(!Flag.ALTIMATE_DISABLE_TRAINING ? [TrainingSaveTool, TrainingListTool, TrainingRemoveTool, TrainingImportTool] : []), + // altimate_change end + // altimate_change start - register impact analysis tool + ImpactAnalysisTool, // altimate_change end ...custom, ] diff --git a/packages/opencode/test/altimate/tracing-viewer-e2e.test.ts b/packages/opencode/test/altimate/tracing-viewer-e2e.test.ts index 6e9170ecaa..571e064ef5 100644 --- a/packages/opencode/test/altimate/tracing-viewer-e2e.test.ts +++ b/packages/opencode/test/altimate/tracing-viewer-e2e.test.ts @@ -173,7 +173,7 @@ async function activeView(page: Page) { /** Count JS errors on the page */ function collectErrors(page: Page): string[] { const errors: string[] = [] - page.on("pageerror", (err) => errors.push(err.message)) + page.on("pageerror", (err: Error) => errors.push(err.message)) return errors } @@ -322,7 +322,7 @@ describe.skipIf(!canRunBrowserTests)("Trace Viewer E2E", () => { // Check each entry points to the right span for (let i = 0; i < entries.length; i++) { const detail = await clickLogEntry(page, i) - expect(detail).toBe(entries[i].name) + expect(detail).toBe(entries[i]!.name as string | null) } await page.close() }) diff --git a/packages/opencode/test/training/tools.test.ts b/packages/opencode/test/training/tools.test.ts index 236a5020fc..25909e3d31 100644 --- a/packages/opencode/test/training/tools.test.ts +++ b/packages/opencode/test/training/tools.test.ts @@ -141,9 +141,11 @@ describe("training meta roundtrip through content", () => { describe("TRAINING_MAX_PATTERNS_PER_KIND", () => { test("is a reasonable limit", () => { - expect(TRAINING_MAX_PATTERNS_PER_KIND).toBe(20) + // altimate_change start — limit increased from 20 to 50 for enterprise teams + expect(TRAINING_MAX_PATTERNS_PER_KIND).toBe(50) expect(TRAINING_MAX_PATTERNS_PER_KIND).toBeGreaterThan(0) - expect(TRAINING_MAX_PATTERNS_PER_KIND).toBeLessThanOrEqual(50) + expect(TRAINING_MAX_PATTERNS_PER_KIND).toBeLessThanOrEqual(100) + // altimate_change end }) })