diff --git a/.github/workflows/no-deadlock-fitness-gates.yaml b/.github/workflows/no-deadlock-fitness-gates.yaml new file mode 100644 index 000000000..6c573d0a6 --- /dev/null +++ b/.github/workflows/no-deadlock-fitness-gates.yaml @@ -0,0 +1,71 @@ +name: No-Deadlock Fitness Gates + +on: + pull_request: + paths: + - "packages/bytebot-workflow-orchestrator/**" + - "packages/bytebot-temporal-worker/**" + push: + branches: + - main + paths: + - "packages/bytebot-workflow-orchestrator/**" + - "packages/bytebot-temporal-worker/**" + +permissions: + contents: read + +jobs: + orchestrator-internal-contracts: + name: Orchestrator internal API contracts + runs-on: ubuntu-22.04 + timeout-minutes: 20 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: "npm" + cache-dependency-path: | + packages/bytebot-workflow-orchestrator/package-lock.json + + - name: Install orchestrator dependencies + run: | + cd packages/bytebot-workflow-orchestrator + npm ci + + - name: Run internal controller contract tests + run: | + cd packages/bytebot-workflow-orchestrator + npm test -- internal.controller.contracts.spec.ts + + temporal-execution-contracts: + name: Temporal worker execution contracts + runs-on: ubuntu-22.04 + timeout-minutes: 20 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: "npm" + cache-dependency-path: | + packages/bytebot-temporal-worker/package-lock.json + + - name: Install temporal-worker dependencies + run: | + cd packages/bytebot-temporal-worker + npm ci + + - name: Run execution activity contract tests + run: | + cd packages/bytebot-temporal-worker + npm test -- execution.activities.spec.ts diff --git a/.github/workflows/temporal-update-contract.yaml b/.github/workflows/temporal-update-contract.yaml new file mode 100644 index 000000000..897c0029d --- /dev/null +++ b/.github/workflows/temporal-update-contract.yaml @@ -0,0 +1,45 @@ +name: Temporal Update Contract + +on: + pull_request: + paths: + - "packages/bytebot-temporal-worker/**" + - "packages/bytebot-workflow-orchestrator/**" + push: + branches: + - main + paths: + - "packages/bytebot-temporal-worker/**" + - "packages/bytebot-workflow-orchestrator/**" + +permissions: + contents: read + +jobs: + temporal-update-contract: + name: userPromptResolved Update contract + runs-on: ubuntu-22.04 + timeout-minutes: 20 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: "npm" + cache-dependency-path: | + packages/bytebot-temporal-worker/package-lock.json + + - name: Install temporal-worker dependencies + run: | + cd packages/bytebot-temporal-worker + npm ci + + - name: Run Temporal workflow contract tests + run: | + cd packages/bytebot-temporal-worker + npm run test:workflow + diff --git a/packages/bytebot-agent/package-lock.json b/packages/bytebot-agent/package-lock.json index f54f0995b..2a3cdfd8d 100644 --- a/packages/bytebot-agent/package-lock.json +++ b/packages/bytebot-agent/package-lock.json @@ -1,12 +1,12 @@ { "name": "bytebot-agent", - "version": "0.0.1", + "version": "2.5.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "bytebot-agent", - "version": "0.0.1", + "version": "2.5.0", "license": "UNLICENSED", "dependencies": { "@anthropic-ai/sdk": "^0.39.0", @@ -21,10 +21,14 @@ "@nestjs/schedule": "^6.0.0", "@nestjs/websockets": "^11.1.1", "@prisma/client": "^6.16.1", + "@socket.io/redis-adapter": "^8.3.0", "@thallesp/nestjs-better-auth": "^1.0.0", + "async-mutex": "^0.5.0", "class-transformer": "^0.5.1", "class-validator": "^0.14.2", "openai": "^5.8.2", + "prom-client": "^15.1.3", + "redis": "^4.7.0", "reflect-metadata": "^0.2.2", "rxjs": "^7.8.1", "socket.io": "^4.8.1", @@ -2907,6 +2911,15 @@ "npm": ">=5.10.0" } }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "license": "Apache-2.0", + "engines": { + "node": ">=8.0.0" + } + }, "node_modules/@paralleldrive/cuid2": { "version": "2.2.2", "resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.2.2.tgz", @@ -3073,6 +3086,64 @@ "@prisma/debug": "6.16.1" } }, + "node_modules/@redis/bloom": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@redis/bloom/-/bloom-1.2.0.tgz", + "integrity": "sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/client": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@redis/client/-/client-1.6.1.tgz", + "integrity": "sha512-/KCsg3xSlR+nCK8/8ZYSknYxvXHwubJrU82F3Lm1Fp6789VQ0/3RJKfsmRXjqfaTA++23CvC3hqmqe/2GEt6Kw==", + "dependencies": { + "cluster-key-slot": "1.1.2", + "generic-pool": "3.9.0", + "yallist": "4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@redis/client/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/@redis/graph": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@redis/graph/-/graph-1.1.1.tgz", + "integrity": "sha512-FEMTcTHZozZciLRl6GiiIB4zGm5z5F3F6a6FZCyrfxdKOhFlGkiAqlexWMBzCi4DcRoyiOsuLfW+cjlGWyExOw==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/json": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@redis/json/-/json-1.0.7.tgz", + "integrity": "sha512-6UyXfjVaTBTJtKNG4/9Z8PSpKE6XgSyEb8iwaqDcy+uKrd/DGYHTWkUdnQDyzm727V7p21WUMhsqz5oy65kPcQ==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/search": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@redis/search/-/search-1.2.0.tgz", + "integrity": "sha512-tYoDBbtqOVigEDMAcTGsRlMycIIjwMCgD8eR2t0NANeQmgK/lvxNAvYyb6bZDD4frHRhIHkJu2TBRvB0ERkOmw==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, + "node_modules/@redis/time-series": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@redis/time-series/-/time-series-1.1.0.tgz", + "integrity": "sha512-c1Q99M5ljsIuc4YdaCwfUEXsofakb9c8+Zse2qxTadu8TalLXuAESzLvFAvNVbkmSlvlzIQOLpBCmWI9wTOt+g==", + "peerDependencies": { + "@redis/client": "^1.0.0" + } + }, "node_modules/@sec-ant/readable-stream": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", @@ -3149,6 +3220,38 @@ "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==" }, + "node_modules/@socket.io/redis-adapter": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/@socket.io/redis-adapter/-/redis-adapter-8.3.0.tgz", + "integrity": "sha512-ly0cra+48hDmChxmIpnESKrc94LjRL80TEmZVscuQ/WWkRP81nNj8W8cCGMqbI4L6NCuAaPRSzZF1a9GlAxxnA==", + "dependencies": { + "debug": "~4.3.1", + "notepack.io": "~3.0.1", + "uid2": "1.0.0" + }, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "socket.io-adapter": "^2.5.4" + } + }, + "node_modules/@socket.io/redis-adapter/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, "node_modules/@standard-schema/spec": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", @@ -5209,6 +5312,14 @@ "dev": true, "license": "MIT" }, + "node_modules/async-mutex": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.5.0.tgz", + "integrity": "sha512-1A94B18jkJ3DYq284ohPxoXbfTA5HsQ7/Mf4DEhcyLx3Bz27Rh59iScbB6EPiP+B+joue6YCxcMXSbFC1tZKwA==", + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", @@ -5490,6 +5601,12 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/bintrees": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bintrees/-/bintrees-1.0.2.tgz", + "integrity": "sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw==", + "license": "MIT" + }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", @@ -6038,6 +6155,14 @@ "node": ">=0.8" } }, + "node_modules/cluster-key-slot": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz", + "integrity": "sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/co": { "version": "4.6.0", "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", @@ -7836,6 +7961,14 @@ "node": ">=14" } }, + "node_modules/generic-pool": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/generic-pool/-/generic-pool-3.9.0.tgz", + "integrity": "sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==", + "engines": { + "node": ">= 4" + } + }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", @@ -10050,6 +10183,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/notepack.io": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/notepack.io/-/notepack.io-3.0.1.tgz", + "integrity": "sha512-TKC/8zH5pXIAMVQio2TvVDTtPRX+DJPHDqjRbxogtFiByHyzKmy96RA0JtCQJ+WouyyL4A10xomQzgbUT+1jCg==" + }, "node_modules/npm-run-path": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", @@ -10684,6 +10822,19 @@ } } }, + "node_modules/prom-client": { + "version": "15.1.3", + "resolved": "https://registry.npmjs.org/prom-client/-/prom-client-15.1.3.tgz", + "integrity": "sha512-6ZiOBfCywsD4k1BN9IX0uZhF+tJkV8q8llP64G5Hajs4JOeVLPCwpPVcpXy3BwYiUGgyJzsJJQeOIv7+hDSq8g==", + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api": "^1.4.0", + "tdigest": "^0.1.1" + }, + "engines": { + "node": "^16 || ^18 || >=20" + } + }, "node_modules/prompts": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", @@ -10885,6 +11036,19 @@ "url": "https://paulmillr.com/funding/" } }, + "node_modules/redis": { + "version": "4.7.1", + "resolved": "https://registry.npmjs.org/redis/-/redis-4.7.1.tgz", + "integrity": "sha512-S1bJDnqLftzHXHP8JsT5II/CtHWQrASX5K96REjWjlmWKrviSOLWmM7QnRLstAWsu1VBBV1ffV6DzCvxNP0UJQ==", + "dependencies": { + "@redis/bloom": "1.2.0", + "@redis/client": "1.6.1", + "@redis/graph": "1.1.1", + "@redis/json": "1.0.7", + "@redis/search": "1.2.0", + "@redis/time-series": "1.1.0" + } + }, "node_modules/reflect-metadata": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", @@ -12029,6 +12193,15 @@ "streamx": "^2.15.0" } }, + "node_modules/tdigest": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/tdigest/-/tdigest-0.1.2.tgz", + "integrity": "sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==", + "license": "MIT", + "dependencies": { + "bintrees": "1.0.2" + } + }, "node_modules/terser": { "version": "5.39.0", "resolved": "https://registry.npmjs.org/terser/-/terser-5.39.0.tgz", @@ -12611,6 +12784,14 @@ "node": ">=8" } }, + "node_modules/uid2": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/uid2/-/uid2-1.0.0.tgz", + "integrity": "sha512-+I6aJUv63YAcY9n4mQreLUt0d4lvwkkopDNmpomkAUz0fAkEMV9pRWxN0EjhW1YfRhcuyHg2v3mwddCDW1+LFQ==", + "engines": { + "node": ">= 4.0.0" + } + }, "node_modules/uint8array-extras": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.4.0.tgz", diff --git a/packages/bytebot-agent/package.json b/packages/bytebot-agent/package.json index a229eccd8..15264d5ad 100644 --- a/packages/bytebot-agent/package.json +++ b/packages/bytebot-agent/package.json @@ -1,6 +1,6 @@ { "name": "bytebot-agent", - "version": "0.0.1", + "version": "2.5.0", "description": "", "author": "", "private": true, @@ -8,12 +8,14 @@ "scripts": { "prisma:dev": "npx prisma migrate dev && npx prisma generate", "prisma:prod": "npx prisma migrate deploy && npx prisma generate", + "migrate:deploy": "npx prisma migrate deploy", "build": "npm run build --prefix ../shared && npx prisma generate && nest build", "format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"", "start": "npm run build --prefix ../shared && nest start", "start:dev": "npm run build --prefix ../shared && nest start --watch", "start:debug": "npm run build --prefix ../shared && nest start --debug --watch", - "start:prod": "npm run build --prefix ../shared && npx prisma migrate deploy && npx prisma generate && node dist/main", + "start:prod": "node dist/main", + "start:prod:migrate": "npx prisma migrate deploy && node dist/main", "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix", "test": "jest", "test:watch": "jest --watch", @@ -24,6 +26,7 @@ "dependencies": { "@anthropic-ai/sdk": "^0.39.0", "@bytebot/shared": "../shared", + "async-mutex": "^0.5.0", "@google/genai": "^1.8.0", "@nestjs/common": "^11.0.1", "@nestjs/config": "^4.0.2", @@ -38,10 +41,13 @@ "class-transformer": "^0.5.1", "class-validator": "^0.14.2", "openai": "^5.8.2", + "prom-client": "^15.1.3", "reflect-metadata": "^0.2.2", "rxjs": "^7.8.1", "socket.io": "^4.8.1", "socket.io-client": "^4.8.1", + "@socket.io/redis-adapter": "^8.3.0", + "redis": "^4.7.0", "zod": "^4.0.5" }, "devDependencies": { diff --git a/packages/bytebot-agent/prisma/migrations/20251204200000_add_task_version_field/migration.sql b/packages/bytebot-agent/prisma/migrations/20251204200000_add_task_version_field/migration.sql new file mode 100644 index 000000000..6eac66078 --- /dev/null +++ b/packages/bytebot-agent/prisma/migrations/20251204200000_add_task_version_field/migration.sql @@ -0,0 +1,5 @@ +-- v2.0.28: Add version field for optimistic locking +-- This migration adds a version column to the Task table for detecting concurrent modifications +-- Safe for PostgreSQL 11+ as adding a column with a constant default is a metadata-only operation + +ALTER TABLE "Task" ADD COLUMN "version" INTEGER NOT NULL DEFAULT 0; diff --git a/packages/bytebot-agent/prisma/migrations/20251209150000_add_lease_columns/migration.sql b/packages/bytebot-agent/prisma/migrations/20251209150000_add_lease_columns/migration.sql new file mode 100644 index 000000000..fc447b658 --- /dev/null +++ b/packages/bytebot-agent/prisma/migrations/20251209150000_add_lease_columns/migration.sql @@ -0,0 +1,9 @@ +-- v2.2.5: Add lease columns for orphaned task recovery +-- claimedBy - which pod claimed the task (null if not claimed) +-- leaseExpiresAt - when the lease expires (task can be reclaimed after this time) + +ALTER TABLE "Task" ADD COLUMN "claimedBy" VARCHAR(255); +ALTER TABLE "Task" ADD COLUMN "leaseExpiresAt" TIMESTAMP(3); + +-- Create index for efficient orphaned task queries +CREATE INDEX "Task_leaseExpiresAt_status_idx" ON "Task"("leaseExpiresAt", "status"); diff --git a/packages/bytebot-agent/prisma/migrations/20251212160000_add_task_title/migration.sql b/packages/bytebot-agent/prisma/migrations/20251212160000_add_task_title/migration.sql new file mode 100644 index 000000000..a66310f14 --- /dev/null +++ b/packages/bytebot-agent/prisma/migrations/20251212160000_add_task_title/migration.sql @@ -0,0 +1,5 @@ +-- v2.2.16: Add title column for AI-generated task summaries +-- title - short (max 100 chars) AI-generated summary of the task description +-- Used for task list display instead of full description + +ALTER TABLE "Task" ADD COLUMN "title" VARCHAR(100); diff --git a/packages/bytebot-agent/prisma/migrations/20251218010000_add_workspace_id/migration.sql b/packages/bytebot-agent/prisma/migrations/20251218010000_add_workspace_id/migration.sql new file mode 100644 index 000000000..308545356 --- /dev/null +++ b/packages/bytebot-agent/prisma/migrations/20251218010000_add_workspace_id/migration.sql @@ -0,0 +1,5 @@ +-- AlterTable +ALTER TABLE "Task" ADD COLUMN "workspaceId" VARCHAR(255); + +-- CreateIndex +CREATE INDEX "Task_workspaceId_idx" ON "Task"("workspaceId"); diff --git a/packages/bytebot-agent/prisma/migrations/20260108180000_add_execution_surface/migration.sql b/packages/bytebot-agent/prisma/migrations/20260108180000_add_execution_surface/migration.sql new file mode 100644 index 000000000..b619ee744 --- /dev/null +++ b/packages/bytebot-agent/prisma/migrations/20260108180000_add_execution_surface/migration.sql @@ -0,0 +1,8 @@ +-- PR5: ExecutionSurface propagation (agent DB) +-- Adds an explicit execution surface to enforce TEXT_ONLY vs DESKTOP execution paths. + +CREATE TYPE "ExecutionSurface" AS ENUM ('TEXT_ONLY', 'DESKTOP'); + +ALTER TABLE "Task" +ADD COLUMN "executionSurface" "ExecutionSurface"; + diff --git a/packages/bytebot-agent/prisma/schema.prisma b/packages/bytebot-agent/prisma/schema.prisma index ae03635bd..adfe5b5fd 100644 --- a/packages/bytebot-agent/prisma/schema.prisma +++ b/packages/bytebot-agent/prisma/schema.prisma @@ -11,6 +11,9 @@ generator client { datasource db { provider = "postgresql" url = env("DATABASE_URL") + // v2.2.6: Direct URL for migrations (bypasses PgBouncer) + // Required because Prisma migrations use features incompatible with PgBouncer transaction mode + directUrl = env("DIRECT_DATABASE_URL") } enum TaskStatus { @@ -40,9 +43,18 @@ enum TaskType { SCHEDULED } +// PR5: Explicit execution surface for enforcing text-only vs desktop +// Nullable for backward compatibility; agent computes fallback from requiresDesktop if unset. +enum ExecutionSurface { + TEXT_ONLY + DESKTOP +} + model Task { id String @id @default(uuid()) description String + // v2.2.16: AI-generated short title for task list display (max 100 chars) + title String? @db.VarChar(100) type TaskType @default(IMMEDIATE) status TaskStatus @default(PENDING) priority TaskPriority @default(MEDIUM) @@ -56,12 +68,51 @@ model Task { queuedAt DateTime? error String? result Json? - // Example: + // Example: // { "provider": "anthropic", "name": "claude-opus-4-20250514", "title": "Claude Opus 4" } model Json messages Message[] summaries Summary[] files File[] + // v2.0.28: Added for optimistic locking to prevent race conditions + // Incremented on each update to detect concurrent modifications + version Int @default(0) + + // v2.2.5: Lease-based orphaned task recovery + // claimedBy - which pod claimed the task (null if not claimed) + // leaseExpiresAt - when the lease expires (task can be reclaimed after this time) + claimedBy String? @db.VarChar(255) + leaseExpiresAt DateTime? + + // v2.3.0 M4: Workflow context (null for Product 1 Tasks, set for Product 2 Workflows) + // workspaceId - persistent workspace ID from workflow orchestrator + // nodeRunId - specific workflow node run this task is executing for + // These enable workspace-aware desktop resolution and granular locking + workspaceId String? @db.VarChar(255) + nodeRunId String? @db.VarChar(255) + + // v2.3.0 M4: Tool configuration from workflow node definition + // Passed from orchestrator to control which tools are available + allowedTools String[] @default([]) + gatewayToolsOnly Boolean @default(false) + highRiskTools String[] @default([]) + + // Phase 4: Execution surface constraint + // If true, task MUST have a valid desktop pod before executing desktop tools + // Prevents silent downgrade to non-desktop execution + requiresDesktop Boolean @default(false) + + // PR5: Explicit execution surface (TEXT_ONLY vs DESKTOP) + // When null, agent falls back to requiresDesktop. + executionSurface ExecutionSurface? + + // v2.1.0 Phase 6.0: Artifact and action log relations + artifacts TaskArtifact[] + actionLogs TaskActionLog[] + + // Indexes for workflow queries + @@index([workspaceId]) + @@index([nodeRunId]) } model Summary { @@ -106,9 +157,115 @@ model File { data String // Base64 encoded file data createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - + // Relations task Task @relation(fields: [taskId], references: [id], onDelete: Cascade) taskId String } +// v2.1.0 Phase 6.0: Artifact types for S3/MinIO storage +enum ArtifactType { + RECORDING // VNC session recordings (webm) + SCREENSHOT // Desktop screenshots (png) + ACTION_LOG // Agent action logs (jsonl) + OUTPUT // Task output files + TEMP // Temporary files (auto-expire) +} + +// v2.1.0 Phase 6.0: Task artifacts stored in MinIO +// References objects in bytebot-artifacts bucket on store cluster +model TaskArtifact { + id String @id @default(uuid()) + taskId String + tenantId String @db.VarChar(100) + + // S3/MinIO Reference + bucketName String @default("bytebot-artifacts") @db.VarChar(255) + objectKey String @db.Text // e.g., "tenant-1/task-abc/recordings/session.webm" + + // Artifact Metadata + artifactType ArtifactType + fileName String @db.VarChar(1024) + contentType String @db.VarChar(255) // MIME type + sizeBytes BigInt + checksum String? @db.VarChar(64) // SHA256 hash + + // Lifecycle + retentionDays Int @default(30) + expiresAt DateTime? + deletedAt DateTime? // Soft delete + + // Flexible metadata (JSONB) + // Examples: { "duration": 3600, "resolution": "1920x1080" } + metadata Json? + tags String[] @default([]) + + // Timestamps + createdAt DateTime @default(now()) + accessedAt DateTime? + + // Relations + task Task @relation(fields: [taskId], references: [id], onDelete: Cascade) + + // Indexes for common queries + @@index([taskId]) + @@index([tenantId]) + @@index([createdAt]) + @@index([expiresAt]) + @@index([deletedAt]) + @@index([taskId, artifactType]) + @@index([tenantId, createdAt]) + + @@map("task_artifacts") +} + +// v2.1.0 Phase 6.0: Agent action log entry +// Stores structured logs of all agent actions for audit and retraining +model TaskActionLog { + id String @id @default(uuid()) + taskId String + + // Action Details + actionType String @db.VarChar(100) // click, type, scroll, screenshot, etc. + actionStatus String @default("success") @db.VarChar(50) // success, failed, skipped + + // Timing + timestamp DateTime @default(now()) + durationMs Int? // Action duration in milliseconds + + // Action Context + // Element interacted with (button, input, link, etc.) + elementDescription String? @db.Text + // Coordinates if applicable + coordinates Json? // { "x": 450, "y": 320 } + // Keyboard input (masked for passwords) + maskedInput String? @db.Text + + // Screenshot reference (links to TaskArtifact) + screenshotKey String? @db.VarChar(500) + + // LLM Context + // Why the agent took this action + llmReasoning String? @db.Text + llmModel String? @db.VarChar(100) + llmTokenCount Int? + + // Flexible action data (JSONB) + // Examples: { "selector": "button.submit", "value": "***" } + actionData Json? + + // Error info if failed + errorMessage String? @db.Text + + // Relations + task Task @relation(fields: [taskId], references: [id], onDelete: Cascade) + + // Indexes for common queries + @@index([taskId]) + @@index([timestamp]) + @@index([actionType]) + @@index([actionStatus]) + @@index([taskId, timestamp]) + + @@map("task_action_logs") +} diff --git a/packages/bytebot-agent/src/action-logging/action-logging.module.ts b/packages/bytebot-agent/src/action-logging/action-logging.module.ts new file mode 100644 index 000000000..eb0e34401 --- /dev/null +++ b/packages/bytebot-agent/src/action-logging/action-logging.module.ts @@ -0,0 +1,18 @@ +/** + * Action Logging Module + * Phase 6.4: Agent Integration + * + * Provides action logging to the Desktop Router for audit and retraining. + */ + +import { Module } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { ActionLoggingService } from './action-logging.service'; +import { TaskControllerModule } from '../task-controller/task-controller.module'; + +@Module({ + imports: [ConfigModule, TaskControllerModule], + providers: [ActionLoggingService], + exports: [ActionLoggingService], +}) +export class ActionLoggingModule {} diff --git a/packages/bytebot-agent/src/action-logging/action-logging.service.ts b/packages/bytebot-agent/src/action-logging/action-logging.service.ts new file mode 100644 index 000000000..b5cad2662 --- /dev/null +++ b/packages/bytebot-agent/src/action-logging/action-logging.service.ts @@ -0,0 +1,301 @@ +/** + * Action Logging Service + * Phase 6.4: Agent Integration + * + * Logs agent actions to the Desktop Router for audit and retraining. + * Actions are batched and sent asynchronously to avoid blocking task execution. + */ + +import { Injectable, Logger, OnModuleDestroy } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { TaskControllerService } from '../task-controller/task-controller.service'; + +/** + * Action log entry to be sent to the Desktop Router + */ +export interface ActionLogEntry { + taskId: string; + actionType: string; + actionStatus: 'success' | 'failed' | 'skipped'; + coordinates?: { x: number; y: number }; + elementDescription?: string; + maskedInput?: string; + llmReasoning?: string; + llmModel?: string; + llmTokenCount?: number; + durationMs?: number; + errorMessage?: string; + screenshotKey?: string; + actionData?: Record; +} + +/** + * Pending action with retry metadata + */ +interface PendingAction { + entry: ActionLogEntry; + attempts: number; + lastAttempt: number; +} + +@Injectable() +export class ActionLoggingService implements OnModuleDestroy { + private readonly logger = new Logger(ActionLoggingService.name); + private readonly enabled: boolean; + private readonly batchSize: number; + private readonly flushIntervalMs: number; + private readonly maxRetries: number; + private readonly internalToken: string; + private readonly pendingActions: Map = new Map(); + private flushInterval: NodeJS.Timeout | null = null; + + constructor( + private readonly configService: ConfigService, + private readonly taskControllerService: TaskControllerService, + ) { + this.enabled = this.configService.get('ACTION_LOGGING_ENABLED', 'true') === 'true'; + this.batchSize = parseInt( + this.configService.get('ACTION_LOG_BATCH_SIZE', '10'), + 10, + ); + this.flushIntervalMs = parseInt( + this.configService.get('ACTION_LOG_FLUSH_INTERVAL_MS', '5000'), + 10, + ); + this.maxRetries = parseInt( + this.configService.get('ACTION_LOG_MAX_RETRIES', '3'), + 10, + ); + // Phase 3: Internal service token for router authentication + this.internalToken = this.configService.get('INTERNAL_SERVICE_TOKEN', ''); + + if (this.enabled) { + this.startFlushInterval(); + this.logger.log( + `Action logging enabled: batch=${this.batchSize}, flush=${this.flushIntervalMs}ms`, + ); + } else { + this.logger.log('Action logging disabled'); + } + } + + onModuleDestroy() { + if (this.flushInterval) { + clearInterval(this.flushInterval); + this.flushInterval = null; + } + + // Flush remaining actions synchronously on shutdown + for (const taskId of this.pendingActions.keys()) { + this.flushActions(taskId).catch((error) => { + this.logger.error(`Failed to flush actions for ${taskId} on shutdown: ${error.message}`); + }); + } + } + + /** + * Log an action asynchronously + * Actions are queued and sent in batches to avoid blocking + */ + async logAction(entry: ActionLogEntry): Promise { + if (!this.enabled) { + return; + } + + const { taskId } = entry; + + // Initialize queue for this task if needed + if (!this.pendingActions.has(taskId)) { + this.pendingActions.set(taskId, []); + } + + const pending: PendingAction = { + entry: { + ...entry, + // Mask sensitive input + maskedInput: entry.maskedInput ? this.maskSensitiveInput(entry.maskedInput) : undefined, + }, + attempts: 0, + lastAttempt: 0, + }; + + this.pendingActions.get(taskId)!.push(pending); + + // If batch is full, flush immediately + if (this.pendingActions.get(taskId)!.length >= this.batchSize) { + this.flushActions(taskId).catch((error) => { + this.logger.error(`Failed to flush actions for ${taskId}: ${error.message}`); + }); + } + } + + /** + * Log action synchronously (blocks until sent) + * Use sparingly - only for critical actions that must be logged + */ + async logActionSync(entry: ActionLogEntry): Promise { + if (!this.enabled) { + return true; + } + + try { + const routerUrl = await this.taskControllerService.getRouterUrl(entry.taskId); + if (!routerUrl) { + this.logger.debug(`No router URL for ${entry.taskId}, skipping action log`); + return false; + } + + await this.sendActionToRouter(routerUrl, entry); + return true; + } catch (error: any) { + this.logger.error(`Failed to log action sync: ${error.message}`); + return false; + } + } + + /** + * Flush all pending actions for a task + */ + async flushActions(taskId: string): Promise { + const pending = this.pendingActions.get(taskId); + if (!pending || pending.length === 0) { + return; + } + + const routerUrl = await this.taskControllerService.getRouterUrl(taskId); + if (!routerUrl) { + this.logger.debug(`No router URL for ${taskId}, clearing ${pending.length} pending actions`); + this.pendingActions.set(taskId, []); + return; + } + + // Take all pending actions + const toSend = [...pending]; + this.pendingActions.set(taskId, []); + + const failed: PendingAction[] = []; + + for (const action of toSend) { + try { + await this.sendActionToRouter(routerUrl, action.entry); + this.logger.debug(`Logged action: ${action.entry.actionType} for ${taskId}`); + } catch (error: any) { + action.attempts++; + action.lastAttempt = Date.now(); + + if (action.attempts < this.maxRetries) { + failed.push(action); + this.logger.warn( + `Action log failed (attempt ${action.attempts}/${this.maxRetries}): ${error.message}`, + ); + } else { + this.logger.error( + `Action log permanently failed after ${this.maxRetries} attempts: ${error.message}`, + ); + } + } + } + + // Re-queue failed actions for retry + if (failed.length > 0) { + const existing = this.pendingActions.get(taskId) || []; + this.pendingActions.set(taskId, [...failed, ...existing]); + } + } + + /** + * Flush all pending actions for all tasks + */ + async flushAll(): Promise { + const taskIds = Array.from(this.pendingActions.keys()); + await Promise.all(taskIds.map((taskId) => this.flushActions(taskId))); + } + + /** + * Clear pending actions for a task (on task completion) + */ + clearPending(taskId: string): void { + const pending = this.pendingActions.get(taskId); + if (pending && pending.length > 0) { + this.logger.warn(`Clearing ${pending.length} pending actions for ${taskId}`); + } + this.pendingActions.delete(taskId); + } + + /** + * Get count of pending actions + */ + getPendingCount(taskId?: string): number { + if (taskId) { + return this.pendingActions.get(taskId)?.length || 0; + } + let total = 0; + for (const pending of this.pendingActions.values()) { + total += pending.length; + } + return total; + } + + /** + * Send action to the Desktop Router + * Phase 3: Added X-Internal-Token for service-to-service authentication + */ + private async sendActionToRouter(routerUrl: string, entry: ActionLogEntry): Promise { + const url = `${routerUrl}/desktop/${entry.taskId}/action`; + + // Build headers with internal service token for authentication + const headers: Record = { + 'Content-Type': 'application/json', + }; + if (this.internalToken) { + headers['X-Internal-Token'] = this.internalToken; + } + + const response = await fetch(url, { + method: 'POST', + headers, + body: JSON.stringify({ + taskId: entry.taskId, + actionType: entry.actionType, + actionStatus: entry.actionStatus, + coordinates: entry.coordinates, + elementDescription: entry.elementDescription, + maskedInput: entry.maskedInput, + llmReasoning: entry.llmReasoning, + llmModel: entry.llmModel, + llmTokenCount: entry.llmTokenCount, + durationMs: entry.durationMs, + errorMessage: entry.errorMessage, + screenshotKey: entry.screenshotKey, + actionData: entry.actionData, + timestamp: new Date().toISOString(), + }), + }); + + if (!response.ok) { + throw new Error(`Router returned ${response.status}: ${response.statusText}`); + } + } + + /** + * Start periodic flush interval + */ + private startFlushInterval(): void { + this.flushInterval = setInterval(() => { + this.flushAll().catch((error) => { + this.logger.error(`Periodic flush failed: ${error.message}`); + }); + }, this.flushIntervalMs); + } + + /** + * Mask sensitive input (passwords, tokens, etc.) + */ + private maskSensitiveInput(input: string): string { + // Simple masking - replace with asterisks but preserve length indication + if (input.length <= 4) { + return '****'; + } + return `${input.substring(0, 2)}${'*'.repeat(Math.min(input.length - 4, 20))}${input.substring(input.length - 2)}`; + } +} diff --git a/packages/bytebot-agent/src/adapters/redis-io.adapter.ts b/packages/bytebot-agent/src/adapters/redis-io.adapter.ts new file mode 100644 index 000000000..b54322de8 --- /dev/null +++ b/packages/bytebot-agent/src/adapters/redis-io.adapter.ts @@ -0,0 +1,142 @@ +/** + * Redis IO Adapter for Socket.IO + * v2.2.12: Enables WebSocket event broadcasting across multiple replicas + * + * This adapter uses Redis pub/sub to synchronize Socket.IO events across + * all bytebot-agent instances, ensuring that task updates reach all connected + * clients regardless of which replica they're connected to. + * + * @see https://socket.io/docs/v4/redis-adapter/ + * @see https://docs.nestjs.com/websockets/adapter + */ + +import { IoAdapter } from '@nestjs/platform-socket.io'; +import { ServerOptions } from 'socket.io'; +import { createAdapter } from '@socket.io/redis-adapter'; +import { createClient, RedisClientType } from 'redis'; +import { Logger } from '@nestjs/common'; + +export class RedisIoAdapter extends IoAdapter { + private readonly logger = new Logger(RedisIoAdapter.name); + private adapterConstructor: ReturnType; + private pubClient: RedisClientType; + private subClient: RedisClientType; + + /** + * Connect to Redis and create the Socket.IO adapter + * + * Must be called before the application starts listening. + * Uses REDIS_URL environment variable or defaults to localhost. + */ + async connectToRedis(): Promise { + const redisUrl = process.env.REDIS_URL || 'redis://localhost:6379'; + + this.logger.log(`Connecting to Redis at ${redisUrl}...`); + + try { + // Create pub/sub clients with socket configuration + // The pub client publishes events, the sub client subscribes to receive events + // v2.2.12: Added socket options to fix connection timeout issues + this.pubClient = createClient({ + url: redisUrl, + socket: { + connectTimeout: 30000, // 30 second connection timeout + reconnectStrategy: (retries: number) => { + // Exponential backoff with max of 10 seconds + const delay = Math.min(retries * 100, 10000); + this.logger.log(`Redis reconnect attempt ${retries}, waiting ${delay}ms`); + return delay; + }, + }, + }); + this.subClient = this.pubClient.duplicate(); + + // Set up event handlers before connecting + this.pubClient.on('error', (err) => { + this.logger.error(`Redis pub client error: ${err.message}`); + }); + this.pubClient.on('connect', () => { + this.logger.log('Redis pub client connecting...'); + }); + this.pubClient.on('ready', () => { + this.logger.log('Redis pub client ready'); + }); + + this.subClient.on('error', (err) => { + this.logger.error(`Redis sub client error: ${err.message}`); + }); + this.subClient.on('connect', () => { + this.logger.log('Redis sub client connecting...'); + }); + this.subClient.on('ready', () => { + this.logger.log('Redis sub client ready'); + }); + + // Connect both clients + await Promise.all([this.pubClient.connect(), this.subClient.connect()]); + + // Create the Socket.IO adapter with the Redis clients + this.adapterConstructor = createAdapter(this.pubClient, this.subClient); + + this.logger.log('Successfully connected to Redis for Socket.IO adapter'); + } catch (error) { + this.logger.error(`Failed to connect to Redis: ${error.message}`); + this.logger.warn( + 'Socket.IO will run without Redis adapter - events will NOT broadcast across replicas', + ); + // Don't throw - allow the app to start without Redis (degraded mode) + // This ensures the app doesn't crash if Redis is temporarily unavailable + } + } + + /** + * Create the Socket.IO server with the Redis adapter + * + * If Redis connection failed, falls back to default adapter (no cross-replica events) + */ + createIOServer(port: number, options?: ServerOptions): any { + const server = super.createIOServer(port, options); + + // Only use Redis adapter if we successfully connected + if (this.adapterConstructor) { + server.adapter(this.adapterConstructor); + this.logger.log( + 'Socket.IO server using Redis adapter for cross-replica events', + ); + } else { + this.logger.warn( + 'Socket.IO server running WITHOUT Redis adapter - single replica mode', + ); + } + + return server; + } + + /** + * Clean up Redis connections on shutdown + */ + async close(): Promise { + this.logger.log('Closing Redis connections...'); + + const closePromises: Promise[] = []; + + if (this.pubClient?.isOpen) { + closePromises.push( + this.pubClient.quit().then(() => {}).catch((err) => { + this.logger.error(`Error closing pub client: ${err.message}`); + }), + ); + } + + if (this.subClient?.isOpen) { + closePromises.push( + this.subClient.quit().then(() => {}).catch((err) => { + this.logger.error(`Error closing sub client: ${err.message}`); + }), + ); + } + + await Promise.all(closePromises); + this.logger.log('Redis connections closed'); + } +} diff --git a/packages/bytebot-agent/src/agent/agent.analytics.ts b/packages/bytebot-agent/src/agent/agent.analytics.ts index 7abd07b4c..9d4fce94c 100644 --- a/packages/bytebot-agent/src/agent/agent.analytics.ts +++ b/packages/bytebot-agent/src/agent/agent.analytics.ts @@ -3,15 +3,46 @@ import { OnEvent } from '@nestjs/event-emitter'; import { ConfigService } from '@nestjs/config'; import { TasksService } from '../tasks/tasks.service'; import { MessagesService } from '../messages/messages.service'; +import { TaskControllerService } from '../task-controller/task-controller.service'; + +/** + * Structured task event log format for metering service. + * + * v2.2.2: Added structured JSON logging for Loki/Fluent-bit collection. + * The metering service queries Loki with: + * {job="fluent-bit"} |= "task_completed" | json + * + * Required fields: + * - event: "task_completed" (required for LogQL filter) + * - customer_id: Keycloak user ID for billing + * - user_id: Same as customer_id + * - task_id: Task UUID + * - task_type: IMMEDIATE | SCHEDULED + * - duration_ms: Task execution duration + * - status: COMPLETED | FAILED | CANCELLED + */ +interface TaskMeteringEvent { + event: 'task_completed'; + task_id: string; + customer_id: string; + user_id: string; + task_type: string; + duration_ms: number; + status: string; + timestamp: string; +} @Injectable() export class AgentAnalyticsService { private readonly logger = new Logger(AgentAnalyticsService.name); private readonly endpoint?: string; + private readonly customerId: string; + private readonly meteringEnabled: boolean; constructor( private readonly tasksService: TasksService, private readonly messagesService: MessagesService, + private readonly taskControllerService: TaskControllerService, configService: ConfigService, ) { this.endpoint = configService.get('BYTEBOT_ANALYTICS_ENDPOINT'); @@ -20,26 +51,111 @@ export class AgentAnalyticsService { 'BYTEBOT_ANALYTICS_ENDPOINT is not set. Analytics service disabled.', ); } + + // v2.2.2: Get customer ID for metering from environment or fallback + // In multi-tenant deployment, this is set per-user namespace + this.customerId = configService.get('BYTEBOT_CUSTOMER_ID') + || configService.get('BYTEBOT_TENANT_ID') + || 'unknown'; + + // Enable metering logs by default (can be disabled via env var) + this.meteringEnabled = configService.get('BYTEBOT_METERING_ENABLED', 'true') === 'true'; + + if (this.meteringEnabled) { + this.logger.log(`Metering enabled for customer: ${this.customerId}`); + } + } + + /** + * v2.2.2: Emit structured JSON log for metering service collection. + * + * This outputs a JSON line to stdout that Fluent-bit collects and sends to Loki. + * The metering service then queries Loki to aggregate task counts per customer. + */ + private emitMeteringLog(event: TaskMeteringEvent): void { + // Output structured JSON to stdout for Fluent-bit collection + // Using console.log ensures it goes to stdout with newline + console.log(JSON.stringify(event)); + } + + /** + * Get customer ID for a task. + * + * Priority: + * 1. TaskController tenantId (if Phase 6 enabled) + * 2. Environment variable BYTEBOT_CUSTOMER_ID + * 3. Fallback to 'unknown' + */ + private async getCustomerId(taskId: string): Promise { + try { + // Try to get tenant ID from task controller (Phase 6) + if (this.taskControllerService.isPhase6Enabled()) { + const taskInfo = await this.taskControllerService.getTaskInfo(taskId); + if (taskInfo?.tenantId && taskInfo.tenantId !== 'default') { + return taskInfo.tenantId; + } + } + } catch (error) { + // Ignore errors, fall back to environment-based customer ID + } + + return this.customerId; + } + + /** + * Calculate task duration in milliseconds. + */ + private calculateDurationMs(executedAt: Date | null, completedAt: Date | null): number { + if (!executedAt || !completedAt) { + return 0; + } + return completedAt.getTime() - executedAt.getTime(); } @OnEvent('task.cancel') @OnEvent('task.failed') @OnEvent('task.completed') async handleTaskEvent(payload: { taskId: string }) { - if (!this.endpoint) return; - try { const task = await this.tasksService.findById(payload.taskId); - const messages = await this.messagesService.findEvery(payload.taskId); - await fetch(this.endpoint, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ ...task, messages }), - }); + // v2.2.2: Emit structured metering log + if (this.meteringEnabled) { + const customerId = await this.getCustomerId(payload.taskId); + const durationMs = this.calculateDurationMs( + task.executedAt, + task.completedAt || new Date() + ); + + const meteringEvent: TaskMeteringEvent = { + event: 'task_completed', + task_id: task.id, + customer_id: customerId, + user_id: customerId, // Same as customer_id in this system + task_type: task.type, + duration_ms: durationMs, + status: task.status, + timestamp: new Date().toISOString(), + }; + + this.emitMeteringLog(meteringEvent); + this.logger.debug( + `Emitted metering event for task ${task.id}: customer=${customerId}, status=${task.status}, duration=${durationMs}ms` + ); + } + + // Original analytics endpoint functionality + if (this.endpoint) { + const messages = await this.messagesService.findEvery(payload.taskId); + await fetch(this.endpoint, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ ...task, messages }), + }); + } } catch (error: any) { this.logger.error( - `Failed to send analytics for task ${payload.taskId}: ${error.message}`, + `Failed to process task event for ${payload.taskId}: ${error.message}`, error.stack, ); } diff --git a/packages/bytebot-agent/src/agent/agent.computer-use.ts b/packages/bytebot-agent/src/agent/agent.computer-use.ts index ec5d7f8ce..3337ee68d 100644 --- a/packages/bytebot-agent/src/agent/agent.computer-use.ts +++ b/packages/bytebot-agent/src/agent/agent.computer-use.ts @@ -22,24 +22,120 @@ import { isReadFileToolUseBlock, } from '@bytebot/shared'; import { Logger } from '@nestjs/common'; +import { buildDesktopActionSignature, isModifierKeyName } from './agent.desktop-safety'; + +/** + * Fallback desktop URL for legacy mode (Phase 6 not deployed) + */ +const FALLBACK_DESKTOP_URL = process.env.BYTEBOT_DESKTOP_BASE_URL as string; + +/** + * Phase 4: Error class for execution surface constraint violations + */ +export class DesktopRequiredError extends Error { + constructor(message: string) { + super(message); + this.name = 'DesktopRequiredError'; + } +} + +/** + * Get the desktop URL to use for API calls + * Phase 4: Now validates desktop requirement before falling back + * @param desktopUrl - Per-task desktop URL from task controller (Phase 6) + * @param requiresDesktop - Whether this task requires a desktop pod (Phase 4) + * @returns The URL to use for desktop API calls + * @throws DesktopRequiredError if desktop is required but not available + */ +function getDesktopUrl(desktopUrl?: string, requiresDesktop?: boolean): string { + // If desktop is explicitly required, we must have a per-task URL + if (requiresDesktop && !desktopUrl) { + throw new DesktopRequiredError( + 'Task requires desktop but no desktop pod is assigned. ' + + 'Ensure task has a desktop pod before executing desktop tools.', + ); + } -const BYTEBOT_DESKTOP_BASE_URL = process.env.BYTEBOT_DESKTOP_BASE_URL as string; + // Log warning when falling back (but allow for non-required tasks) + if (!desktopUrl && FALLBACK_DESKTOP_URL) { + console.warn( + '[Phase 4 Warning] Using fallback desktop URL. ' + + 'For production, ensure desktop pods are properly assigned.', + ); + } + return desktopUrl || FALLBACK_DESKTOP_URL; +} + +/** + * Context for action execution (Phase 6.4) + */ +export interface ActionContext { + taskId: string; + desktopUrl?: string; + // Phase 4: Execution surface constraint + requiresDesktop?: boolean; + onAction?: (action: ActionResult) => void; +} + +/** + * Result of an action execution (for logging) + */ +export interface ActionResult { + actionType: string; + success: boolean; + durationMs: number; + coordinates?: { x: number; y: number }; + errorMessage?: string; + input?: Record; + actionSignature?: string; + screenshotHash?: string; + screenshotCaptured?: boolean; +} + +/** + * Handle computer tool use with optional per-task context + * @param block - The computer tool use content block from LLM + * @param logger - Logger instance + * @param context - Optional action context with per-task desktop URL (Phase 6.4) + */ export async function handleComputerToolUse( block: ComputerToolUseContentBlock, logger: Logger, + context?: ActionContext, ): Promise { + // Phase 4: Pass requiresDesktop to enable fail-fast validation + const desktopUrl = getDesktopUrl(context?.desktopUrl, context?.requiresDesktop); logger.debug( `Handling computer tool use: ${block.name}, tool_use_id: ${block.id}`, ); + // Helper to report action results + const reportAction = (result: ActionResult) => { + if (context?.onAction) { + context.onAction(result); + } + }; + if (isScreenshotToolUseBlock(block)) { logger.debug('Processing screenshot request'); + const actionSignature = buildDesktopActionSignature(block); + const startTime = Date.now(); try { logger.debug('Taking screenshot'); - const image = await screenshot(); + const shot = await screenshot(desktopUrl); logger.debug('Screenshot captured successfully'); + reportAction({ + actionType: block.name, + success: true, + durationMs: Date.now() - startTime, + actionSignature, + screenshotHash: shot.imageHash, + screenshotCaptured: true, + input: { signature: actionSignature }, + }); + return { type: MessageContentType.ToolResult, tool_use_id: block.id, @@ -47,7 +143,7 @@ export async function handleComputerToolUse( { type: MessageContentType.Image, source: { - data: image, + data: shot.image, media_type: 'image/png', type: 'base64', }, @@ -56,6 +152,15 @@ export async function handleComputerToolUse( }; } catch (error) { logger.error(`Screenshot failed: ${error.message}`, error.stack); + reportAction({ + actionType: block.name, + success: false, + durationMs: Date.now() - startTime, + errorMessage: error.message, + actionSignature, + screenshotCaptured: false, + input: { signature: actionSignature }, + }); return { type: MessageContentType.ToolResult, tool_use_id: block.id, @@ -72,11 +177,23 @@ export async function handleComputerToolUse( if (isCursorPositionToolUseBlock(block)) { logger.debug('Processing cursor position request'); + const actionSignature = buildDesktopActionSignature(block); + const startTime = Date.now(); try { logger.debug('Getting cursor position'); - const position = await cursorPosition(); + const position = await cursorPosition(desktopUrl); logger.debug(`Cursor position obtained: ${position.x}, ${position.y}`); + reportAction({ + actionType: block.name, + success: true, + durationMs: Date.now() - startTime, + coordinates: position, + actionSignature, + screenshotCaptured: false, + input: { signature: actionSignature }, + }); + return { type: MessageContentType.ToolResult, tool_use_id: block.id, @@ -92,6 +209,15 @@ export async function handleComputerToolUse( `Getting cursor position failed: ${error.message}`, error.stack, ); + reportAction({ + actionType: block.name, + success: false, + durationMs: Date.now() - startTime, + errorMessage: error.message, + actionSignature, + screenshotCaptured: false, + input: { signature: actionSignature }, + }); return { type: MessageContentType.ToolResult, tool_use_id: block.id, @@ -106,46 +232,75 @@ export async function handleComputerToolUse( } } + const startTime = Date.now(); + let actionType = block.name; + let actionCoordinates: { x: number; y: number } | undefined; + const actionSignature = buildDesktopActionSignature(block); + try { if (isMoveMouseToolUseBlock(block)) { - await moveMouse(block.input); + actionType = 'computer_move_mouse'; + actionCoordinates = block.input.coordinates; + await moveMouse(block.input, desktopUrl); } if (isTraceMouseToolUseBlock(block)) { - await traceMouse(block.input); + actionType = 'computer_trace_mouse'; + await traceMouse(block.input, desktopUrl); } if (isClickMouseToolUseBlock(block)) { - await clickMouse(block.input); + actionType = 'computer_click_mouse'; + actionCoordinates = block.input.coordinates; + await clickMouse(block.input, desktopUrl); } if (isPressMouseToolUseBlock(block)) { - await pressMouse(block.input); + actionType = 'computer_press_mouse'; + actionCoordinates = block.input.coordinates; + await pressMouse(block.input, desktopUrl); } if (isDragMouseToolUseBlock(block)) { - await dragMouse(block.input); + actionType = 'computer_drag_mouse'; + await dragMouse(block.input, desktopUrl); } if (isScrollToolUseBlock(block)) { - await scroll(block.input); + actionType = 'computer_scroll'; + actionCoordinates = block.input.coordinates; + await scroll(block.input, desktopUrl); } if (isTypeKeysToolUseBlock(block)) { - await typeKeys(block.input); + actionType = 'computer_type_keys'; + await typeKeys(block.input, desktopUrl); } if (isPressKeysToolUseBlock(block)) { - await pressKeys(block.input); + actionType = 'computer_press_keys'; + await pressKeys(block.input, desktopUrl); } if (isTypeTextToolUseBlock(block)) { - await typeText(block.input); + actionType = 'computer_type_text'; + await typeText(block.input, desktopUrl); } if (isPasteTextToolUseBlock(block)) { - await pasteText(block.input); + actionType = 'computer_paste_text'; + await pasteText(block.input, desktopUrl); } if (isWaitToolUseBlock(block)) { - await wait(block.input); + actionType = 'computer_wait'; + await wait(block.input, desktopUrl); } if (isApplicationToolUseBlock(block)) { - await application(block.input); + actionType = 'computer_application'; + await application(block.input, desktopUrl); } if (isReadFileToolUseBlock(block)) { + actionType = 'computer_read_file'; logger.debug(`Reading file: ${block.input.path}`); - const result = await readFile(block.input); + const result = await readFile(block.input, desktopUrl); + + reportAction({ + actionType, + success: result.success, + durationMs: Date.now() - startTime, + input: { path: block.input.path }, + }); if (result.success && result.data) { // Return document content block @@ -181,7 +336,10 @@ export async function handleComputerToolUse( } } + const actionDurationMs = Date.now() - startTime; + let image: string | null = null; + let screenshotHash: string | undefined; try { // Wait before taking screenshot to allow UI to settle const delayMs = 750; // 750ms delay @@ -189,7 +347,9 @@ export async function handleComputerToolUse( await new Promise((resolve) => setTimeout(resolve, delayMs)); logger.debug('Taking screenshot'); - image = await screenshot(); + const shot = await screenshot(desktopUrl); + image = shot.image; + screenshotHash = shot.imageHash; logger.debug('Screenshot captured successfully'); } catch (error) { logger.error('Failed to take screenshot', error); @@ -218,12 +378,50 @@ export async function handleComputerToolUse( }); } + // Emit screenshot hash for loop detection / observability (no base64). + reportAction({ + actionType, + success: true, + durationMs: actionDurationMs, + coordinates: actionCoordinates, + actionSignature, + screenshotHash, + screenshotCaptured: Boolean(image), + input: { signature: actionSignature }, + }); + return toolResult; } catch (error) { logger.error( `Error executing ${block.name} tool: ${error.message}`, error.stack, ); + + // Best-effort: ensure no stuck input state if an input-affecting tool failed. + if ( + block.name === 'computer_press_keys' || + block.name === 'computer_press_mouse' || + block.name === 'computer_drag_mouse' + ) { + try { + await resetDesktopInput(desktopUrl); + } catch (resetError: any) { + logger.warn(`Failed to reset desktop input: ${resetError.message}`); + } + } + + // Report failed action + reportAction({ + actionType, + success: false, + durationMs: Date.now() - startTime, + coordinates: actionCoordinates, + errorMessage: error.message, + actionSignature, + screenshotCaptured: false, + input: { signature: actionSignature }, + }); + return { type: MessageContentType.ToolResult, tool_use_id: block.id, @@ -238,20 +436,23 @@ export async function handleComputerToolUse( } } -async function moveMouse(input: { coordinates: Coordinates }): Promise { +/** + * Action functions - all accept optional desktopUrl for Phase 6.4 per-task routing + */ + +async function moveMouse( + input: { coordinates: Coordinates }, + desktopUrl: string, +): Promise { const { coordinates } = input; console.log( `Moving mouse to coordinates: [${coordinates.x}, ${coordinates.y}]`, ); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'move_mouse', - coordinates, - }), + await postComputerUse(desktopUrl, { + action: 'move_mouse', + coordinates, }); } catch (error) { console.error('Error in move_mouse action:', error); @@ -259,24 +460,23 @@ async function moveMouse(input: { coordinates: Coordinates }): Promise { } } -async function traceMouse(input: { - path: Coordinates[]; - holdKeys?: string[]; -}): Promise { +async function traceMouse( + input: { + path: Coordinates[]; + holdKeys?: string[]; + }, + desktopUrl: string, +): Promise { const { path, holdKeys } = input; console.log( `Tracing mouse to path: ${path} ${holdKeys ? `with holdKeys: ${holdKeys}` : ''}`, ); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'trace_mouse', - path, - holdKeys, - }), + await postComputerUse(desktopUrl, { + action: 'trace_mouse', + path, + holdKeys, }); } catch (error) { console.error('Error in trace_mouse action:', error); @@ -284,28 +484,27 @@ async function traceMouse(input: { } } -async function clickMouse(input: { - coordinates?: Coordinates; - button: Button; - holdKeys?: string[]; - clickCount: number; -}): Promise { +async function clickMouse( + input: { + coordinates?: Coordinates; + button: Button; + holdKeys?: string[]; + clickCount: number; + }, + desktopUrl: string, +): Promise { const { coordinates, button, holdKeys, clickCount } = input; console.log( `Clicking mouse ${button} ${clickCount} times ${coordinates ? `at coordinates: [${coordinates.x}, ${coordinates.y}] ` : ''} ${holdKeys ? `with holdKeys: ${holdKeys}` : ''}`, ); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'click_mouse', - coordinates, - button, - holdKeys: holdKeys && holdKeys.length > 0 ? holdKeys : undefined, - clickCount, - }), + await postComputerUse(desktopUrl, { + action: 'click_mouse', + coordinates, + button, + holdKeys: holdKeys && holdKeys.length > 0 ? holdKeys : undefined, + clickCount, }); } catch (error) { console.error('Error in click_mouse action:', error); @@ -313,26 +512,25 @@ async function clickMouse(input: { } } -async function pressMouse(input: { - coordinates?: Coordinates; - button: Button; - press: Press; -}): Promise { +async function pressMouse( + input: { + coordinates?: Coordinates; + button: Button; + press: Press; + }, + desktopUrl: string, +): Promise { const { coordinates, button, press } = input; console.log( `Pressing mouse ${button} ${press} ${coordinates ? `at coordinates: [${coordinates.x}, ${coordinates.y}]` : ''}`, ); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'press_mouse', - coordinates, - button, - press, - }), + await postComputerUse(desktopUrl, { + action: 'press_mouse', + coordinates, + button, + press, }); } catch (error) { console.error('Error in press_mouse action:', error); @@ -340,26 +538,25 @@ async function pressMouse(input: { } } -async function dragMouse(input: { - path: Coordinates[]; - button: Button; - holdKeys?: string[]; -}): Promise { +async function dragMouse( + input: { + path: Coordinates[]; + button: Button; + holdKeys?: string[]; + }, + desktopUrl: string, +): Promise { const { path, button, holdKeys } = input; console.log( `Dragging mouse to path: ${path} ${holdKeys ? `with holdKeys: ${holdKeys}` : ''}`, ); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'drag_mouse', - path, - button, - holdKeys: holdKeys && holdKeys.length > 0 ? holdKeys : undefined, - }), + await postComputerUse(desktopUrl, { + action: 'drag_mouse', + path, + button, + holdKeys: holdKeys && holdKeys.length > 0 ? holdKeys : undefined, }); } catch (error) { console.error('Error in drag_mouse action:', error); @@ -367,28 +564,27 @@ async function dragMouse(input: { } } -async function scroll(input: { - coordinates?: Coordinates; - direction: 'up' | 'down' | 'left' | 'right'; - scrollCount: number; - holdKeys?: string[]; -}): Promise { +async function scroll( + input: { + coordinates?: Coordinates; + direction: 'up' | 'down' | 'left' | 'right'; + scrollCount: number; + holdKeys?: string[]; + }, + desktopUrl: string, +): Promise { const { coordinates, direction, scrollCount, holdKeys } = input; console.log( `Scrolling ${direction} ${scrollCount} times ${coordinates ? `at coordinates: [${coordinates.x}, ${coordinates.y}]` : ''}`, ); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'scroll', - coordinates, - direction, - scrollCount, - holdKeys: holdKeys && holdKeys.length > 0 ? holdKeys : undefined, - }), + await postComputerUse(desktopUrl, { + action: 'scroll', + coordinates, + direction, + scrollCount, + holdKeys: holdKeys && holdKeys.length > 0 ? holdKeys : undefined, }); } catch (error) { console.error('Error in scroll action:', error); @@ -396,22 +592,21 @@ async function scroll(input: { } } -async function typeKeys(input: { - keys: string[]; - delay?: number; -}): Promise { +async function typeKeys( + input: { + keys: string[]; + delay?: number; + }, + desktopUrl: string, +): Promise { const { keys, delay } = input; console.log(`Typing keys: ${keys}`); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'type_keys', - keys, - delay, - }), + await postComputerUse(desktopUrl, { + action: 'type_keys', + keys, + delay, }); } catch (error) { console.error('Error in type_keys action:', error); @@ -419,22 +614,68 @@ async function typeKeys(input: { } } -async function pressKeys(input: { - keys: string[]; - press: Press; -}): Promise { +async function pressKeys( + input: { + keys: string[]; + press: Press; + holdMs?: number; + hold_ms?: number; + }, + desktopUrl: string, +): Promise { const { keys, press } = input; console.log(`Pressing keys: ${keys}`); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ + const holdMsRaw = + typeof (input as any).holdMs === 'number' + ? (input as any).holdMs + : typeof (input as any).hold_ms === 'number' + ? (input as any).hold_ms + : null; + + const holdMs = + typeof holdMsRaw === 'number' && Number.isFinite(holdMsRaw) && holdMsRaw >= 0 + ? Math.min(Math.floor(holdMsRaw), 750) + : null; + + const hasNonModifier = keys.some((k) => !isModifierKeyName(k)); + + // Safety invariant: non-modifier holds are not allowed. Treat as atomic tap. + if (press === 'down' && hasNonModifier) { + await typeKeys({ keys, delay: 75 }, desktopUrl); + return; + } + + // Safety invariant: modifier holds must be bounded. If holdMs isn't provided, treat as tap. + if (press === 'down' && !hasNonModifier && holdMs === null) { + await typeKeys({ keys, delay: 75 }, desktopUrl); + return; + } + + if (press === 'down' && !hasNonModifier && holdMs !== null) { + await postComputerUse(desktopUrl, { action: 'press_keys', keys, - press, - }), + press: 'down', + }); + + try { + await new Promise((resolve) => setTimeout(resolve, holdMs)); + } finally { + await postComputerUse(desktopUrl, { + action: 'press_keys', + keys, + press: 'up', + }); + } + return; + } + + await postComputerUse(desktopUrl, { + action: 'press_keys', + keys, + press, }); } catch (error) { console.error('Error in press_keys action:', error); @@ -442,22 +683,21 @@ async function pressKeys(input: { } } -async function typeText(input: { - text: string; - delay?: number; -}): Promise { +async function typeText( + input: { + text: string; + delay?: number; + }, + desktopUrl: string, +): Promise { const { text, delay } = input; console.log(`Typing text: ${text}`); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'type_text', - text, - delay, - }), + await postComputerUse(desktopUrl, { + action: 'type_text', + text, + delay, }); } catch (error) { console.error('Error in type_text action:', error); @@ -465,18 +705,17 @@ async function typeText(input: { } } -async function pasteText(input: { text: string }): Promise { +async function pasteText( + input: { text: string }, + desktopUrl: string, +): Promise { const { text } = input; console.log(`Pasting text: ${text}`); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'paste_text', - text, - }), + await postComputerUse(desktopUrl, { + action: 'paste_text', + text, }); } catch (error) { console.error('Error in paste_text action:', error); @@ -484,18 +723,17 @@ async function pasteText(input: { text: string }): Promise { } } -async function wait(input: { duration: number }): Promise { +async function wait( + input: { duration: number }, + desktopUrl: string, +): Promise { const { duration } = input; console.log(`Waiting for ${duration}ms`); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'wait', - duration, - }), + await postComputerUse(desktopUrl, { + action: 'wait', + duration, }); } catch (error) { console.error('Error in wait action:', error); @@ -503,19 +741,13 @@ async function wait(input: { duration: number }): Promise { } } -async function cursorPosition(): Promise { +async function cursorPosition(desktopUrl: string): Promise { console.log('Getting cursor position'); try { - const response = await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'cursor_position', - }), + const data = await postComputerUseJson<{ x: number; y: number }>(desktopUrl, { + action: 'cursor_position', }); - - const data = await response.json(); return { x: data.x, y: data.y }; } catch (error) { console.error('Error in cursor_position action:', error); @@ -523,49 +755,41 @@ async function cursorPosition(): Promise { } } -async function screenshot(): Promise { +async function screenshot( + desktopUrl: string, +): Promise<{ image: string; imageHash?: string }> { console.log('Taking screenshot'); try { - const requestBody = { - action: 'screenshot', - }; - - const response = await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify(requestBody), - }); - - if (!response.ok) { - throw new Error(`Failed to take screenshot: ${response.statusText}`); - } - - const data = await response.json(); + const data = await postComputerUseJson<{ image?: string; imageHash?: string }>( + desktopUrl, + { + action: 'screenshot', + }, + ); if (!data.image) { throw new Error('Failed to take screenshot: No image data received'); } - return data.image; // Base64 encoded image + return { image: data.image, imageHash: data.imageHash }; } catch (error) { console.error('Error in screenshot action:', error); throw error; } } -async function application(input: { application: string }): Promise { - const { application } = input; - console.log(`Opening application: ${application}`); +async function application( + input: { application: string }, + desktopUrl: string, +): Promise { + const { application: app } = input; + console.log(`Opening application: ${app}`); try { - await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'application', - application, - }), + await postComputerUse(desktopUrl, { + action: 'application', + application: app, }); } catch (error) { console.error('Error in application action:', error); @@ -573,7 +797,10 @@ async function application(input: { application: string }): Promise { } } -async function readFile(input: { path: string }): Promise<{ +async function readFile( + input: { path: string }, + desktopUrl: string, +): Promise<{ success: boolean; data?: string; name?: string; @@ -585,20 +812,10 @@ async function readFile(input: { path: string }): Promise<{ console.log(`Reading file: ${path}`); try { - const response = await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'read_file', - path, - }), + const data = await postComputerUseJson(desktopUrl, { + action: 'read_file', + path, }); - - if (!response.ok) { - throw new Error(`Failed to read file: ${response.statusText}`); - } - - const data = await response.json(); return data; } catch (error) { console.error('Error in read_file action:', error); @@ -609,32 +826,29 @@ async function readFile(input: { path: string }): Promise<{ } } -export async function writeFile(input: { - path: string; - content: string; -}): Promise<{ success: boolean; message?: string }> { +/** + * Write file to desktop - also updated for Phase 6.4 + */ +export async function writeFile( + input: { + path: string; + content: string; + }, + desktopUrl?: string, +): Promise<{ success: boolean; message?: string }> { const { path, content } = input; + const url = getDesktopUrl(desktopUrl); console.log(`Writing file: ${path}`); try { // Content is always base64 encoded const base64Data = content; - const response = await fetch(`${BYTEBOT_DESKTOP_BASE_URL}/computer-use`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - action: 'write_file', - path, - data: base64Data, - }), + const data = await postComputerUseJson(url, { + action: 'write_file', + path, + data: base64Data, }); - - if (!response.ok) { - throw new Error(`Failed to write file: ${response.statusText}`); - } - - const data = await response.json(); return data; } catch (error) { console.error('Error in write_file action:', error); @@ -644,3 +858,125 @@ export async function writeFile(input: { }; } } + +type DesktopCapabilities = { + resetInput?: boolean; + screenshotHash?: boolean; +}; + +const DESKTOP_CAPABILITIES_TTL_MS = parseInt( + process.env.BYTEBOT_DESKTOP_CAPABILITIES_TTL_MS || '300000', + 10, +); + +const desktopCapabilitiesCache = new Map< + string, + { checkedAt: number; capabilities: DesktopCapabilities } +>(); + +async function getDesktopCapabilities(desktopUrl: string): Promise { + const now = Date.now(); + const cached = desktopCapabilitiesCache.get(desktopUrl); + if (cached && now - cached.checkedAt < DESKTOP_CAPABILITIES_TTL_MS) { + return cached.capabilities; + } + + try { + const response = await fetch(`${desktopUrl}/computer-use/capabilities`, { + method: 'GET', + headers: { 'Content-Type': 'application/json' }, + }); + + if (!response.ok) { + // Backward compatibility: older daemons may not expose a capabilities endpoint yet. + // Do not treat 404 as "feature unsupported" because reset-input may still exist. + if (response.status === 404) return null; + return null; + } + + const json = (await response.json()) as DesktopCapabilities; + const capabilities: DesktopCapabilities = { + resetInput: Boolean((json as any)?.resetInput), + screenshotHash: Boolean((json as any)?.screenshotHash), + }; + desktopCapabilitiesCache.set(desktopUrl, { checkedAt: now, capabilities }); + return capabilities; + } catch { + return null; + } +} + +export async function resetDesktopInput(desktopUrl: string): Promise { + const capabilities = await getDesktopCapabilities(desktopUrl); + if (capabilities && capabilities.resetInput === false) { + return; + } + + const response = await fetch(`${desktopUrl}/computer-use/reset-input`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + }); + + // Backward compatibility with older daemon images. + if (response.status === 404) { + desktopCapabilitiesCache.set(desktopUrl, { + checkedAt: Date.now(), + capabilities: { resetInput: false, screenshotHash: false }, + }); + return; + } + + if (!response.ok) { + const text = await safeReadResponseText(response); + throw new Error( + `Failed to reset desktop input: ${response.status} ${response.statusText} ${text}`.trim(), + ); + } +} + +async function postComputerUse( + desktopUrl: string, + body: Record, +): Promise { + const response = await fetch(`${desktopUrl}/computer-use`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body), + }); + + if (!response.ok) { + const text = await safeReadResponseText(response); + throw new Error( + `Desktop action failed: ${response.status} ${response.statusText} ${text}`.trim(), + ); + } +} + +async function postComputerUseJson( + desktopUrl: string, + body: Record, +): Promise { + const response = await fetch(`${desktopUrl}/computer-use`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body), + }); + + if (!response.ok) { + const text = await safeReadResponseText(response); + throw new Error( + `Desktop action failed: ${response.status} ${response.statusText} ${text}`.trim(), + ); + } + + return (await response.json()) as T; +} + +async function safeReadResponseText(response: Response): Promise { + try { + const text = await response.text(); + return text.length > 2048 ? `${text.slice(0, 2048)}…` : text; + } catch { + return ''; + } +} diff --git a/packages/bytebot-agent/src/agent/agent.constants.ts b/packages/bytebot-agent/src/agent/agent.constants.ts index 5b3d4e0d3..36e03ac3e 100644 --- a/packages/bytebot-agent/src/agent/agent.constants.ts +++ b/packages/bytebot-agent/src/agent/agent.constants.ts @@ -48,9 +48,16 @@ CORE WORKING PRINCIPLES • Type realistic, context-appropriate text with \`computer_type_text\` (for short strings) or \`computer_paste_text\` (for long strings), or shortcuts with \`computer_type_keys\`. 4. **Valid Keys Only** - Use **exactly** the identifiers listed in **VALID KEYS** below when supplying \`keys\` to \`computer_type_keys\` or \`computer_press_keys\`. All identifiers come from nut-tree's \`Key\` enum; they are case-sensitive and contain *no spaces*. +4a. **Keyboard Safety (Tap vs Hold)** - + • Non-modifier keys (Enter/Tab/Escape/arrows/letters/digits) are **atomic taps**. Use \`computer_type_keys\` (e.g. keys=["Enter"]) or \`computer_type_text\` (e.g. text="\\n"). + • Never use \`computer_press_keys\` with press="down" for non-modifier keys. + • \`computer_press_keys\` is for **modifier holds only** (Shift/Ctrl/Alt/Meta) and must include a bounded \`holdMs\` (<= 750ms). You must never leave keys held down across tool calls. 5. **Verify Every Step** - After each action: a. Take another screenshot. - b. Confirm the expected state before continuing. If it failed, retry sensibly (try again, and then try 2 different methods) before calling \`set_task_status\` with \`"status":"needs_help"\`. + b. Confirm the expected state before continuing. If it failed, retry sensibly (try again, and then try 2 different methods) before calling \`set_task_status\` with \`"status":"needs_help"\` and a specific \`errorCode\` (only when truly blocked). +5a. **No Strategy Prompts** - + • Do NOT ask the user to choose between equivalent websites/tools (e.g., "Google Flights vs Kayak vs Expedia"). Pick a reliable default and continue. + • Only use \`needs_help\` for genuine external input, approvals, or human takeover (e.g., credentials required, UI blocked by sign-in, or a popup you cannot dismiss). 6. **Efficiency & Clarity** - Combine related key presses; prefer scrolling or dragging over many small moves; minimise unnecessary waits. 7. **Stay Within Scope** - Do nothing the user didn't request; don't suggest unrelated tasks. For form and login fields, don't fill in random data, unless explicitly told to do so. 8. **Security** - If you see a password, secret key, or other sensitive information (or the user shares it with you), do not repeat it in conversation. When typing sensitive information, use \`computer_type_text\` with \`isSensitive\` set to \`true\`. @@ -126,7 +133,14 @@ TASK LIFECYCLE TEMPLATE This tool reads files and returns them as document content blocks with base64 data, supporting various file types including documents (PDF, DOCX, TXT, etc.) and images (PNG, JPG, etc.). 8. **Ask for Help** - If you need clarification, or if you are unable to fully complete the task, invoke \`\`\`json - { "name": "set_task_status", "input": { "status": "needs_help", "description": "Summary of help or clarification needed" } } + { + "name": "set_task_status", + "input": { + "status": "needs_help", + "errorCode": "UI_BLOCKED_SIGNIN", + "description": "Blocked by sign-in flow; requires human action to proceed." + } + } \`\`\` 9. **Cleanup** - When the user's goal is met: • Close every window, file, or app you opened so the desktop is tidy. diff --git a/packages/bytebot-agent/src/agent/agent.desktop-loop.fixture.spec.ts b/packages/bytebot-agent/src/agent/agent.desktop-loop.fixture.spec.ts new file mode 100644 index 000000000..a1c631dfa --- /dev/null +++ b/packages/bytebot-agent/src/agent/agent.desktop-loop.fixture.spec.ts @@ -0,0 +1,69 @@ +import * as fs from 'node:fs'; +import * as path from 'node:path'; + +import { MessageContentType } from '@bytebot/shared'; +import { + DesktopLoopDetector, + buildDesktopActionSignature, +} from './agent.desktop-safety'; + +type FixtureRow = { + tool: string; + coordinates?: { x: number; y: number }; + button?: string; + clickCount?: number; + screenshotHash: string; +}; + +describe('desktop loop detector fixture replay', () => { + it('replays the Google Flights modal stall fixture and trips quickly', () => { + const fixturePath = path.join( + __dirname, + 'fixtures', + 'desktop-loop-google-flights.jsonl', + ); + const lines = fs + .readFileSync(fixturePath, 'utf8') + .split('\n') + .map((l) => l.trim()) + .filter(Boolean); + + const detector = new DesktopLoopDetector(); + + let lastResult: ReturnType | null = null; + let steps = 0; + + for (const line of lines) { + const row = JSON.parse(line) as FixtureRow; + steps++; + + const block: any = { + type: MessageContentType.ToolUse, + id: `fx-${steps}`, + name: row.tool, + input: {}, + }; + + if (row.coordinates) { + block.input.coordinates = row.coordinates; + } + if (row.tool === 'computer_click_mouse') { + block.input.button = row.button || 'left'; + block.input.clickCount = row.clickCount || 1; + } + + const signature = buildDesktopActionSignature(block); + lastResult = detector.record({ + atMs: Date.now(), + signature, + screenshotHash: row.screenshotHash, + }); + + if (lastResult.interrupt) break; + } + + expect(lastResult?.interrupt).toBe(true); + expect(lastResult?.rule).toBe('repeat_in_window_no_progress'); + expect(steps).toBeLessThanOrEqual(12); + }); +}); diff --git a/packages/bytebot-agent/src/agent/agent.desktop-repair.spec.ts b/packages/bytebot-agent/src/agent/agent.desktop-repair.spec.ts new file mode 100644 index 000000000..3342da53e --- /dev/null +++ b/packages/bytebot-agent/src/agent/agent.desktop-repair.spec.ts @@ -0,0 +1,118 @@ +import { + decodePngDimensionsFromBase64, + parseUiRepairCandidate, + validateUiRepairCandidate, +} from './agent.desktop-repair'; + +describe('agent.desktop-repair', () => { + describe('parseUiRepairCandidate', () => { + it('parses valid JSON with rationale_code', () => { + const res = parseUiRepairCandidate( + JSON.stringify({ + x: 10, + y: 20, + confidence: 0.9, + rationale_code: 'CLOSE_X', + }), + ); + expect(res.ok).toBe(true); + if (res.ok) { + expect(res.candidate.rationaleCode).toBe('CLOSE_X'); + expect(res.candidate.x).toBe(10); + expect(res.candidate.y).toBe(20); + } + }); + + it('rejects missing JSON', () => { + const res = parseUiRepairCandidate('nope'); + expect(res.ok).toBe(false); + }); + + it('rejects invalid rationale_code', () => { + const res = parseUiRepairCandidate( + JSON.stringify({ + x: 10, + y: 20, + confidence: 0.9, + rationale_code: 'DELETE_ALL', + }), + ); + expect(res.ok).toBe(false); + }); + }); + + describe('decodePngDimensionsFromBase64', () => { + it('extracts width/height from a tiny PNG', () => { + // 1x1 transparent PNG + const base64 = + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PF2K1QAAAABJRU5ErkJggg=='; + const dims = decodePngDimensionsFromBase64(base64); + expect(dims).toEqual({ width: 1, height: 1 }); + }); + }); + + describe('validateUiRepairCandidate', () => { + it('accepts CLOSE_X in top-right region across multiple viewport sizes', () => { + const viewports = [ + { width: 100, height: 100 }, + { width: 800, height: 600 }, + { width: 1366, height: 768 }, + { width: 1920, height: 1080 }, + ]; + + for (const vp of viewports) { + const res = validateUiRepairCandidate({ + candidate: { + x: Math.floor(vp.width * 0.9), + y: Math.floor(vp.height * 0.1), + confidence: 0.9, + rationaleCode: 'CLOSE_X', + }, + dimensions: vp, + minConfidence: 0.7, + }); + expect(res.ok).toBe(true); + } + }); + + it('rejects CLOSE_X outside top-right region', () => { + const res = validateUiRepairCandidate({ + candidate: { x: 10, y: 90, confidence: 0.9, rationaleCode: 'CLOSE_X' }, + dimensions: { width: 100, height: 100 }, + minConfidence: 0.7, + }); + expect(res.ok).toBe(false); + }); + + it('accepts DISMISS_BUTTON only in conservative right-side region', () => { + const viewports = [ + { width: 320, height: 240 }, + { width: 1024, height: 768 }, + { width: 1920, height: 1080 }, + ]; + + for (const vp of viewports) { + const res = validateUiRepairCandidate({ + candidate: { + x: Math.floor(vp.width * 0.85), + y: Math.floor(vp.height * 0.5), + confidence: 0.95, + rationaleCode: 'DISMISS_BUTTON', + }, + dimensions: vp, + minConfidence: 0.7, + }); + expect(res.ok).toBe(true); + } + }); + + it('rejects low confidence', () => { + const res = validateUiRepairCandidate({ + candidate: { x: 90, y: 10, confidence: 0.2, rationaleCode: 'CLOSE_X' }, + dimensions: { width: 100, height: 100 }, + minConfidence: 0.7, + }); + expect(res.ok).toBe(false); + }); + }); +}); diff --git a/packages/bytebot-agent/src/agent/agent.desktop-repair.ts b/packages/bytebot-agent/src/agent/agent.desktop-repair.ts new file mode 100644 index 000000000..9228f83f9 --- /dev/null +++ b/packages/bytebot-agent/src/agent/agent.desktop-repair.ts @@ -0,0 +1,236 @@ +import { MessageContentType } from '@bytebot/shared'; +import type { MessageContentBlock } from '@bytebot/shared'; +import { Role } from '@prisma/client'; +import type { Message } from '@prisma/client'; + +export type UiRepairRationaleCode = 'CLOSE_X' | 'DISMISS_BUTTON' | 'CLICK_AWAY'; + +export type UiRepairCandidate = { + x: number; + y: number; + confidence: number; + rationaleCode: UiRepairRationaleCode; +}; + +export type UiRepairCandidateParseResult = + | { ok: true; candidate: UiRepairCandidate } + | { ok: false; error: string }; + +export type PngDimensions = { width: number; height: number }; + +export function buildUiRepairClassifierSystemPrompt(): string { + return ` +You are a strict UI repair classifier for a desktop automation system. + +Task: +- You will receive a single desktop screenshot. +- Determine whether there is a SAFE, non-destructive action to dismiss a blocking modal/popup/overlay. + +Output: +- Reply with ONLY valid JSON (no Markdown, no prose). +- Schema: + { + "x": number, + "y": number, + "confidence": number, // 0.0 to 1.0 + "rationale_code": "CLOSE_X" | "DISMISS_BUTTON" | "CLICK_AWAY" + } + +Rules: +- Prefer CLOSE_X (top-right close icon on a modal/popup). +- Use DISMISS_BUTTON only if it clearly DISMISSES the overlay (e.g., "Close", "Dismiss", "Not now", "No thanks", "Cancel"). +- Use CLICK_AWAY only if the overlay is a click-away popover and there is a safe empty area to click that will not navigate. +- If you are not confident, set confidence to 0.0 and use CLOSE_X with x=0,y=0. +`.trim(); +} + +export function buildUiRepairClassifierMessage(input: { + taskId: string; + screenshotBase64Png: string; +}): Message { + const blocks: MessageContentBlock[] = [ + { + type: MessageContentType.Text, + text: 'Find a safe dismiss action for any blocking modal/popup in this screenshot.', + } as any, + { + type: MessageContentType.Image, + source: { + type: 'base64', + media_type: 'image/png', + data: input.screenshotBase64Png, + }, + } as any, + ]; + + return { + id: '', + createdAt: new Date(), + updatedAt: new Date(), + taskId: input.taskId, + summaryId: null, + role: Role.USER, + content: blocks as any, + } as Message; +} + +function extractJsonObject(text: string): string | null { + const trimmed = text.trim(); + if (!trimmed) return null; + + const start = trimmed.indexOf('{'); + const end = trimmed.lastIndexOf('}'); + if (start < 0 || end < 0 || end <= start) return null; + return trimmed.slice(start, end + 1); +} + +export function parseUiRepairCandidate( + text: string, +): UiRepairCandidateParseResult { + const jsonText = extractJsonObject(text); + if (!jsonText) { + return { ok: false, error: 'No JSON object found in classifier response' }; + } + + let parsed: any; + try { + parsed = JSON.parse(jsonText); + } catch (error: any) { + return { ok: false, error: `Invalid JSON: ${error.message}` }; + } + + const x = typeof parsed?.x === 'number' ? parsed.x : NaN; + const y = typeof parsed?.y === 'number' ? parsed.y : NaN; + const confidence = + typeof parsed?.confidence === 'number' ? parsed.confidence : NaN; + const rationaleCodeRaw = + typeof parsed?.rationale_code === 'string' ? parsed.rationale_code : ''; + + const rationaleCode = rationaleCodeRaw.trim().toUpperCase(); + if ( + !Number.isFinite(x) || + !Number.isFinite(y) || + !Number.isFinite(confidence) + ) { + return { + ok: false, + error: 'Missing or invalid numeric fields (x,y,confidence)', + }; + } + + const allowed: UiRepairRationaleCode[] = [ + 'CLOSE_X', + 'DISMISS_BUTTON', + 'CLICK_AWAY', + ]; + if (!allowed.includes(rationaleCode as UiRepairRationaleCode)) { + return { ok: false, error: `Invalid rationale_code: ${rationaleCodeRaw}` }; + } + + return { + ok: true, + candidate: { + x, + y, + confidence, + rationaleCode: rationaleCode as UiRepairRationaleCode, + }, + }; +} + +export function decodePngDimensionsFromBase64( + base64Png: string, +): PngDimensions | null { + try { + const buffer = Buffer.from(base64Png, 'base64'); + // PNG signature (8 bytes) + if (buffer.length < 24) return null; + const signature = buffer.subarray(0, 8); + const pngSig = Buffer.from([ + 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, + ]); + if (!signature.equals(pngSig)) return null; + + // IHDR chunk starts at byte 8: + // 4 bytes length, 4 bytes type ("IHDR"), then 4 width, 4 height + const ihdrType = buffer.subarray(12, 16).toString('ascii'); + if (ihdrType !== 'IHDR') return null; + + const width = buffer.readUInt32BE(16); + const height = buffer.readUInt32BE(20); + if (width <= 0 || height <= 0) return null; + return { width, height }; + } catch { + return null; + } +} + +export function validateUiRepairCandidate(input: { + candidate: UiRepairCandidate; + dimensions: PngDimensions; + minConfidence: number; +}): { ok: true } | { ok: false; error: string } { + const { candidate, dimensions, minConfidence } = input; + const { width, height } = dimensions; + + if (candidate.confidence < minConfidence) { + return { + ok: false, + error: `confidence ${candidate.confidence} < ${minConfidence}`, + }; + } + + if ( + !Number.isFinite(candidate.x) || + !Number.isFinite(candidate.y) || + candidate.x < 0 || + candidate.y < 0 || + candidate.x >= width || + candidate.y >= height + ) { + return { ok: false, error: 'coordinates out of bounds' }; + } + + // Conservative “danger zone” rules: + // - Prefer top-right close icons. + // - Only accept other actions if they still land in conservative, low-risk regions. + const xNorm = candidate.x / width; + const yNorm = candidate.y / height; + + switch (candidate.rationaleCode) { + case 'CLOSE_X': { + // Close icons are typically in the top-right corner of a modal/popup. + // Accept only a tight safe region: x ∈ [0.80..1.00), y ∈ [0.00..0.25]. + if (xNorm < 0.8 || yNorm > 0.25) { + return { + ok: false, + error: 'CLOSE_X must be in tight top-right region', + }; + } + break; + } + case 'DISMISS_BUTTON': { + // Dismiss buttons are riskier than a close icon; keep this very conservative. + // Accept only a right-side region: x ∈ [0.65..1.00), y ∈ [0.00..0.70]. + if (xNorm < 0.65 || yNorm > 0.7) { + return { + ok: false, + error: 'DISMISS_BUTTON must be in conservative right-side region', + }; + } + break; + } + case 'CLICK_AWAY': { + // Only allow click-away in a small top-left corner region to reduce risk of clicking results/buttons. + if (xNorm > 0.12 || yNorm > 0.12) { + return { + ok: false, + error: 'CLICK_AWAY must be in conservative corner region', + }; + } + break; + } + } + + return { ok: true }; +} diff --git a/packages/bytebot-agent/src/agent/agent.desktop-safety.spec.ts b/packages/bytebot-agent/src/agent/agent.desktop-safety.spec.ts new file mode 100644 index 000000000..00d13c769 --- /dev/null +++ b/packages/bytebot-agent/src/agent/agent.desktop-safety.spec.ts @@ -0,0 +1,221 @@ +import { + DESKTOP_MAX_KEY_HOLD_MS, + DesktopLoopDetector, + buildDesktopActionSignature, + hammingDistanceHex, + normalizeComputerToolUseBlock, +} from './agent.desktop-safety'; +import { MessageContentType } from '@bytebot/shared'; + +describe('agent.desktop-safety', () => { + describe('normalizeComputerToolUseBlock', () => { + it('rewrites non-modifier press_keys down into a tap (type_keys)', () => { + const block: any = { + type: MessageContentType.ToolUse, + id: 't1', + name: 'computer_press_keys', + input: { keys: ['Enter'], press: 'down' }, + }; + + const normalized = normalizeComputerToolUseBlock(block); + expect(normalized.normalizedBlock.name).toBe('computer_type_keys'); + expect(normalized.rewriteReason).toBe('non_modifier_down_to_tap'); + }); + + it('rewrites modifier press_keys down without holdMs into a tap (type_keys)', () => { + const block: any = { + type: MessageContentType.ToolUse, + id: 't2', + name: 'computer_press_keys', + input: { keys: ['Shift'], press: 'down' }, + }; + + const normalized = normalizeComputerToolUseBlock(block); + expect(normalized.normalizedBlock.name).toBe('computer_type_keys'); + expect(normalized.rewriteReason).toBe( + 'modifier_down_missing_holdms_to_tap', + ); + }); + + it('clamps holdMs for modifier holds', () => { + const block: any = { + type: MessageContentType.ToolUse, + id: 't3', + name: 'computer_press_keys', + input: { keys: ['Shift'], press: 'down', holdMs: 5000 }, + }; + + const normalized = normalizeComputerToolUseBlock(block); + expect(normalized.normalizedBlock.name).toBe('computer_press_keys'); + expect((normalized.normalizedBlock as any).input.holdMs).toBe( + DESKTOP_MAX_KEY_HOLD_MS, + ); + expect(normalized.rewriteReason).toBe('modifier_holdms_clamped'); + }); + }); + + describe('hammingDistanceHex', () => { + it('computes correct nibble popcount distance', () => { + expect(hammingDistanceHex('ff', '00')).toBe(8); + expect(hammingDistanceHex('0f', '00')).toBe(4); + expect(hammingDistanceHex('0f', '0f')).toBe(0); + }); + }); + + describe('DesktopLoopDetector', () => { + it('interrupts on repeated same action with no meaningful hash change', () => { + const detector = new DesktopLoopDetector(); + + let interrupted = false; + for (let i = 0; i < 6; i++) { + const res = detector.record({ + atMs: Date.now(), + signature: 'computer_type_keys(keys=Enter)', + screenshotHash: 'ffffffffffffffff', + }); + interrupted = res.interrupt; + } + + expect(interrupted).toBe(true); + }); + + it('does not interrupt if the screen hash meaningfully changes during the streak', () => { + const detector = new DesktopLoopDetector(); + + const hashes = [ + 'ffffffffffffffff', + '0000000000000000', + 'ffffffffffffffff', + '0000000000000000', + 'ffffffffffffffff', + '0000000000000000', + ]; + + let interrupted = false; + for (let i = 0; i < hashes.length; i++) { + const res = detector.record({ + atMs: Date.now(), + signature: 'computer_type_keys(keys=Enter)', + screenshotHash: hashes[i], + }); + interrupted = res.interrupt; + } + + expect(interrupted).toBe(false); + }); + + it('interrupts on repeated waits even if the screen hash changes (spinner/animation case)', () => { + const detector = new DesktopLoopDetector(); + + const hashes = [ + 'ffffffffffffffff', + '0000000000000000', + 'ffffffffffffffff', + '0000000000000000', + 'ffffffffffffffff', + '0000000000000000', + ]; + + let interrupted = false; + for (let i = 0; i < hashes.length; i++) { + const res = detector.record({ + atMs: Date.now(), + signature: 'computer_wait', + screenshotHash: hashes[i], + }); + interrupted = res.interrupt; + } + + expect(interrupted).toBe(true); + }); + + it('interrupts on alternating click/move loops within a window (non-consecutive repeats)', () => { + const detector = new DesktopLoopDetector(); + + const clickSig = + 'computer_click_mouse(bucket=38,29,button=left,clickCount=1)'; + const moveSig = 'computer_move_mouse(bucket=38,29)'; + + let interrupted = false; + for (let i = 0; i < 12; i++) { + const signature = i % 2 === 0 ? moveSig : clickSig; + const res = detector.record({ + atMs: Date.now(), + signature, + screenshotHash: 'ffffffffffffffff', + }); + interrupted = res.interrupt; + if (interrupted) break; + } + + expect(interrupted).toBe(true); + }); + + it('interrupts on alternating click/move loops even when hashes jitter (animated page case)', () => { + const detector = new DesktopLoopDetector(); + + const clickSig = + 'computer_click_mouse(bucket=38,29,button=left,clickCount=1)'; + const moveSig = 'computer_move_mouse(bucket=38,29)'; + + // Hamming distance between these is 8 (fails "no change"<=6, passes animated<=12). + const h1 = 'ffffffffffffffff'; + const h2 = '00ffffffffffffff'; + + let lastRule: string | undefined; + for (let i = 0; i < 12; i++) { + const signature = i % 2 === 0 ? moveSig : clickSig; + const res = detector.record({ + atMs: Date.now(), + signature, + screenshotHash: i % 2 === 0 ? h1 : h2, + }); + if (res.interrupt) { + lastRule = res.rule; + break; + } + } + + expect(lastRule).toBe('repeat_in_window_animated_no_progress'); + }); + + it('interrupts on excessive waits within a window when the screen jitters (non-consecutive waits)', () => { + const detector = new DesktopLoopDetector(); + + const h1 = 'ffffffffffffffff'; + const h2 = '00ffffffffffffff'; + + let lastRule: string | undefined; + for (let i = 0; i < 24; i++) { + const signature = + i % 2 === 0 ? 'computer_wait' : 'computer_move_mouse(bucket=0,0)'; + const res = detector.record({ + atMs: Date.now(), + signature, + screenshotHash: i % 2 === 0 ? h1 : h2, + }); + if (res.interrupt) { + lastRule = res.rule; + break; + } + } + + expect(lastRule).toBe('wait_in_window_excessive'); + }); + }); + + describe('buildDesktopActionSignature', () => { + it('does not include raw text for type_text', () => { + const block: any = { + type: MessageContentType.ToolUse, + id: 't4', + name: 'computer_type_text', + input: { text: 'super secret password', isSensitive: true }, + }; + const sig = buildDesktopActionSignature(block); + expect(sig).toContain('len='); + expect(sig).toContain('sensitive=true'); + expect(sig).not.toContain('super secret password'); + }); + }); +}); diff --git a/packages/bytebot-agent/src/agent/agent.desktop-safety.ts b/packages/bytebot-agent/src/agent/agent.desktop-safety.ts new file mode 100644 index 000000000..f14915f01 --- /dev/null +++ b/packages/bytebot-agent/src/agent/agent.desktop-safety.ts @@ -0,0 +1,489 @@ +import { + ComputerToolUseContentBlock, + MessageContentType, +} from '@bytebot/shared'; + +export const DESKTOP_MAX_ACTIONS_WITHOUT_OBSERVATION = parseInt( + process.env.BYTEBOT_DESKTOP_MAX_ACTIONS_WITHOUT_OBSERVATION || '3', + 10, +); + +export const DESKTOP_MAX_KEY_HOLD_MS = parseInt( + process.env.BYTEBOT_DESKTOP_MAX_KEY_HOLD_MS || '750', + 10, +); + +export const DESKTOP_LOOP_REPEAT_THRESHOLD = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_REPEAT_THRESHOLD || '6', + 10, +); + +export const DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING || '6', + 10, +); + +// Secondary stability threshold to tolerate animated pages/spinners. +// Used only for loop detection heuristics; "meaningful progress" still uses the primary threshold above. +export const DESKTOP_LOOP_ANIMATED_MAX_HAMMING = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_ANIMATED_MAX_HAMMING || '12', + 10, +); + +export const DESKTOP_LOOP_RECENT_ACTIONS_MAX = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_RECENT_ACTIONS_MAX || '25', + 10, +); + +export const DESKTOP_LOOP_COORDINATE_BUCKET_PX = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_COORDINATE_BUCKET_PX || '20', + 10, +); + +export const DESKTOP_LOOP_WAIT_IN_WINDOW_THRESHOLD = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_WAIT_IN_WINDOW_THRESHOLD || '12', + 10, +); + +export const DESKTOP_LOOP_REPAIR_ENABLED = + (process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_ENABLED || '') + .trim() + .toLowerCase() === 'true'; + +export const DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED = + (process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED || '') + .trim() + .toLowerCase() === 'true'; + +export const DESKTOP_LOOP_REPAIR_ATTEMPT2_MIN_CONFIDENCE = (() => { + const raw = ( + process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_ATTEMPT2_MIN_CONFIDENCE || '0.7' + ).trim(); + const value = Number.parseFloat(raw); + if (!Number.isFinite(value)) return 0.7; + return Math.min(Math.max(value, 0), 1); +})(); + +export const DESKTOP_LOOP_REPAIR_EPISODE_MAX_MS = parseInt( + process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_EPISODE_MAX_MS || '120000', + 10, +); + +export const DESKTOP_TOOL_CONTRACT_VIOLATION_LIMIT = parseInt( + process.env.BYTEBOT_DESKTOP_TOOL_CONTRACT_VIOLATION_LIMIT || '10', + 10, +); + +const MODIFIER_KEY_NAMES = new Set([ + 'shift', + 'shift_l', + 'shift_r', + 'control', + 'ctrl', + 'control_l', + 'control_r', + 'alt', + 'alt_l', + 'alt_r', + 'meta', + 'meta_l', + 'meta_r', + 'super', + 'super_l', + 'super_r', + 'cmd', + 'command', + 'option', +]); + +export function isModifierKeyName(key: string): boolean { + const normalized = key.trim().toLowerCase(); + if (!normalized) return false; + if (MODIFIER_KEY_NAMES.has(normalized)) return true; + + return ( + normalized.startsWith('shift') || + normalized.startsWith('control') || + normalized.startsWith('ctrl') || + normalized.startsWith('alt') || + normalized.startsWith('meta') || + normalized.startsWith('super') || + normalized.startsWith('cmd') || + normalized.startsWith('command') || + normalized.startsWith('option') + ); +} + +export type PressKeysNormalization = { + normalizedBlock: ComputerToolUseContentBlock; + rewriteReason?: + | 'non_modifier_down_to_tap' + | 'modifier_down_missing_holdms_to_tap' + | 'modifier_holdms_clamped' + | 'none'; + rewrittenKeys?: string[]; + holdMs?: number | null; +}; + +export function normalizeComputerToolUseBlock( + block: ComputerToolUseContentBlock, +): PressKeysNormalization { + if (block.name !== 'computer_press_keys') { + return { normalizedBlock: block, rewriteReason: 'none' }; + } + + const input = (block as any).input as Record | undefined; + const keys = Array.isArray(input?.keys) + ? (input.keys as unknown[]).filter( + (k): k is string => typeof k === 'string', + ) + : []; + const press = + input?.press === 'down' || input?.press === 'up' ? input.press : 'down'; + + const holdMsRaw = + typeof input?.holdMs === 'number' + ? input.holdMs + : typeof input?.hold_ms === 'number' + ? (input as any).hold_ms + : null; + + const holdMs = + typeof holdMsRaw === 'number' && + Number.isFinite(holdMsRaw) && + holdMsRaw >= 0 + ? Math.min(Math.floor(holdMsRaw), DESKTOP_MAX_KEY_HOLD_MS) + : null; + + const nonModifiers = keys.filter((k) => !isModifierKeyName(k)); + + if (press === 'down' && nonModifiers.length > 0) { + return { + normalizedBlock: { + type: MessageContentType.ToolUse, + id: block.id, + name: 'computer_type_keys', + input: { + keys, + delay: 75, + }, + }, + rewriteReason: 'non_modifier_down_to_tap', + rewrittenKeys: nonModifiers, + holdMs: null, + }; + } + + if (press === 'down' && nonModifiers.length === 0 && holdMs === null) { + return { + normalizedBlock: { + type: MessageContentType.ToolUse, + id: block.id, + name: 'computer_type_keys', + input: { + keys, + delay: 75, + }, + }, + rewriteReason: 'modifier_down_missing_holdms_to_tap', + rewrittenKeys: keys, + holdMs: null, + }; + } + + if (press === 'down' && holdMsRaw !== null && holdMsRaw !== holdMs) { + return { + normalizedBlock: { + ...block, + input: { + ...(block as any).input, + holdMs, + }, + } as ComputerToolUseContentBlock, + rewriteReason: 'modifier_holdms_clamped', + rewrittenKeys: keys, + holdMs, + }; + } + + if (press === 'down' && holdMs !== null) { + return { + normalizedBlock: { + ...block, + input: { + ...(block as any).input, + holdMs, + }, + } as ComputerToolUseContentBlock, + rewriteReason: 'none', + rewrittenKeys: [], + holdMs, + }; + } + + return { normalizedBlock: block, rewriteReason: 'none' }; +} + +export type DesktopActionSample = { + atMs: number; + signature: string; + screenshotHash?: string | null; +}; + +const NIBBLE_POPCOUNT = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4]; + +export function hammingDistanceHex(a: string, b: string): number { + const aa = a.trim().toLowerCase(); + const bb = b.trim().toLowerCase(); + if (aa.length !== bb.length) return Number.POSITIVE_INFINITY; + + let distance = 0; + for (let i = 0; i < aa.length; i++) { + const x = parseInt(aa[i], 16); + const y = parseInt(bb[i], 16); + if (Number.isNaN(x) || Number.isNaN(y)) return Number.POSITIVE_INFINITY; + distance += NIBBLE_POPCOUNT[x ^ y]; + } + return distance; +} + +export class DesktopLoopDetector { + private recent: DesktopActionSample[] = []; + private consecutiveWaitCount = 0; + + record(sample: DesktopActionSample): { + interrupt: boolean; + rule?: string; + signature?: string; + count?: number; + } { + this.recent.push(sample); + if (this.recent.length > DESKTOP_LOOP_RECENT_ACTIONS_MAX) { + this.recent.shift(); + } + + const signature = sample.signature; + const hash = sample.screenshotHash ?? null; + const actionType = signature.split('(')[0] || signature; + + if (actionType === 'computer_wait') { + this.consecutiveWaitCount++; + } else { + this.consecutiveWaitCount = 0; + } + + // Special-case: repeated waits are a strong signal of a stalled control loop, + // even if the screen hash changes due to animations/spinners. + if (this.consecutiveWaitCount >= DESKTOP_LOOP_REPEAT_THRESHOLD) { + return { interrupt: true, rule: 'wait_loop_excessive' }; + } + + const waitCountInWindow = this.recent.filter((r) => { + const t = r.signature.split('(')[0] || r.signature; + return t === 'computer_wait'; + }).length; + + const hashes = this.recent + .map((r) => r.screenshotHash) + .filter((h): h is string => typeof h === 'string' && h.trim().length > 0); + + // Require multiple observations to assert "no progress" robustly. + const stableHashWindowSize = 3; + const recentHashes = hashes.slice(-stableHashWindowSize); + const hasStableObservationWindow = + recentHashes.length >= stableHashWindowSize && + recentHashes.every( + (h) => + hammingDistanceHex(recentHashes[0], h) <= + DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING, + ); + + const hasAnimatedObservationWindow = + !hasStableObservationWindow && + recentHashes.length >= stableHashWindowSize && + recentHashes.every( + (h) => + hammingDistanceHex(recentHashes[0], h) <= + DESKTOP_LOOP_ANIMATED_MAX_HAMMING, + ); + + // Animated/spinner case: tolerate small hash jitter while still stopping obvious repeats. + if ( + hasAnimatedObservationWindow && + waitCountInWindow >= DESKTOP_LOOP_WAIT_IN_WINDOW_THRESHOLD + ) { + return { interrupt: true, rule: 'wait_in_window_excessive' }; + } + + if ( + (!hasStableObservationWindow && !hasAnimatedObservationWindow) || + !hash + ) { + return { interrupt: false }; + } + + const signatureCounts = new Map(); + for (const r of this.recent) { + signatureCounts.set( + r.signature, + (signatureCounts.get(r.signature) || 0) + 1, + ); + } + + let topSignature: string | null = null; + let topCount = 0; + for (const [sig, count] of signatureCounts.entries()) { + if (count > topCount) { + topSignature = sig; + topCount = count; + } + } + + if (topSignature && topCount >= DESKTOP_LOOP_REPEAT_THRESHOLD) { + if (hasStableObservationWindow) { + return { + interrupt: true, + rule: 'repeat_in_window_no_progress', + signature: topSignature, + count: topCount, + }; + } + + // Conservative: only trip the animated/jitter-tolerant rule for click/move bucket loops. + const topType = (topSignature.split('(')[0] || topSignature).trim(); + if ( + topType === 'computer_click_mouse' || + topType === 'computer_move_mouse' + ) { + return { + interrupt: true, + rule: 'repeat_in_window_animated_no_progress', + signature: topSignature, + count: topCount, + }; + } + } + + return { interrupt: false }; + } + + getRecent(): DesktopActionSample[] { + return [...this.recent]; + } +} + +export function buildDesktopActionSignature( + block: ComputerToolUseContentBlock, +): string { + const name = block.name; + const input = (block as any).input as Record | undefined; + + const bucket = (coordinates?: { x: number; y: number } | null): string => { + if ( + !coordinates || + typeof coordinates.x !== 'number' || + typeof coordinates.y !== 'number' || + !Number.isFinite(coordinates.x) || + !Number.isFinite(coordinates.y) + ) { + return '∅'; + } + const bx = Math.floor(coordinates.x / DESKTOP_LOOP_COORDINATE_BUCKET_PX); + const by = Math.floor(coordinates.y / DESKTOP_LOOP_COORDINATE_BUCKET_PX); + return `${bx},${by}`; + }; + + switch (name) { + case 'computer_move_mouse': { + const coordinates = + input?.coordinates && + typeof input.coordinates === 'object' && + typeof (input as any).coordinates.x === 'number' && + typeof (input as any).coordinates.y === 'number' + ? ((input as any).coordinates as { x: number; y: number }) + : null; + return `${name}(bucket=${bucket(coordinates)})`; + } + case 'computer_type_keys': { + const keys = Array.isArray(input?.keys) + ? (input.keys as unknown[]).filter( + (k): k is string => typeof k === 'string', + ) + : []; + return `${name}(keys=${keys.join('+') || '∅'})`; + } + case 'computer_press_keys': { + const keys = Array.isArray(input?.keys) + ? (input.keys as unknown[]).filter( + (k): k is string => typeof k === 'string', + ) + : []; + const press = + input?.press === 'down' || input?.press === 'up' ? input.press : 'down'; + const holdMs = + typeof input?.holdMs === 'number' + ? Math.floor(input.holdMs) + : typeof (input as any)?.hold_ms === 'number' + ? Math.floor((input as any).hold_ms) + : null; + return `${name}(press=${press},keys=${keys.join('+') || '∅'},holdMs=${holdMs ?? '∅'})`; + } + case 'computer_type_text': { + const text = typeof input?.text === 'string' ? input.text : ''; + const isSensitive = + typeof input?.isSensitive === 'boolean' ? input.isSensitive : false; + return `${name}(len=${text.length},hasNewline=${text.includes('\n')},sensitive=${isSensitive})`; + } + case 'computer_paste_text': { + const text = typeof input?.text === 'string' ? input.text : ''; + const isSensitive = + typeof input?.isSensitive === 'boolean' ? input.isSensitive : false; + return `${name}(len=${text.length},hasNewline=${text.includes('\n')},sensitive=${isSensitive})`; + } + case 'computer_wait': { + return name; + } + case 'computer_click_mouse': { + const button = typeof input?.button === 'string' ? input.button : 'left'; + const clickCount = + typeof input?.clickCount === 'number' ? input.clickCount : 1; + const coordinates = + input?.coordinates && + typeof input.coordinates === 'object' && + typeof (input as any).coordinates.x === 'number' && + typeof (input as any).coordinates.y === 'number' + ? ((input as any).coordinates as { x: number; y: number }) + : null; + return `${name}(bucket=${bucket(coordinates)},button=${button},clickCount=${clickCount})`; + } + case 'computer_press_mouse': { + const button = typeof input?.button === 'string' ? input.button : 'left'; + const press = + input?.press === 'down' || input?.press === 'up' ? input.press : 'down'; + const coordinates = + input?.coordinates && + typeof input.coordinates === 'object' && + typeof (input as any).coordinates.x === 'number' && + typeof (input as any).coordinates.y === 'number' + ? ((input as any).coordinates as { x: number; y: number }) + : null; + return `${name}(bucket=${bucket(coordinates)},button=${button},press=${press})`; + } + case 'computer_scroll': { + const direction = + typeof input?.direction === 'string' ? input.direction : 'down'; + const scrollCount = + typeof input?.scrollCount === 'number' ? input.scrollCount : 1; + const coordinates = + input?.coordinates && + typeof input.coordinates === 'object' && + typeof (input as any).coordinates.x === 'number' && + typeof (input as any).coordinates.y === 'number' + ? ((input as any).coordinates as { x: number; y: number }) + : null; + return `${name}(bucket=${bucket(coordinates)},direction=${direction},scrollCount=${scrollCount})`; + } + default: + return name; + } +} diff --git a/packages/bytebot-agent/src/agent/agent.desktop-ui-repair.ladder.fixture.spec.ts b/packages/bytebot-agent/src/agent/agent.desktop-ui-repair.ladder.fixture.spec.ts new file mode 100644 index 000000000..7488474ec --- /dev/null +++ b/packages/bytebot-agent/src/agent/agent.desktop-ui-repair.ladder.fixture.spec.ts @@ -0,0 +1,184 @@ +import { MessageContentType } from '@bytebot/shared'; + +function makeFakePngBase64(width: number, height: number): string { + const buffer = Buffer.alloc(24); + Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]).copy(buffer, 0); + buffer.writeUInt32BE(13, 8); // IHDR length + buffer.write('IHDR', 12, 'ascii'); + buffer.writeUInt32BE(width, 16); + buffer.writeUInt32BE(height, 20); + return buffer.toString('base64'); +} + +describe('desktop UI repair ladder fixture replay', () => { + it('runs Esc once, then a single close-click, then escalates to takeover (bounded)', async () => { + jest.resetModules(); + + process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_ENABLED = 'true'; + process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED = 'true'; + process.env.BYTEBOT_DESKTOP_LOOP_REPAIR_ATTEMPT2_MIN_CONFIDENCE = '0.7'; + + const screenshotBase64 = makeFakePngBase64(100, 100); + const actionHashes = { + before: 'ffffffffffffffff', + after: 'ffffffffffffffff', // unchanged => no progress + }; + + const toolCalls: Record = { + computer_type_keys: 0, + computer_screenshot: 0, + computer_click_mouse: 0, + }; + + jest.doMock('./agent.computer-use', () => { + const actual = jest.requireActual('./agent.computer-use'); + return { + ...actual, + resetDesktopInput: jest.fn(async () => undefined), + handleComputerToolUse: jest.fn( + async (block: any, _logger: any, ctx: any) => { + toolCalls[block.name] = (toolCalls[block.name] || 0) + 1; + + const screenshotHash = actionHashes.after; + ctx?.onAction?.({ + actionType: block.name, + success: true, + durationMs: 1, + coordinates: block?.input?.coordinates, + actionSignature: `${block.name}`, + screenshotHash, + screenshotCaptured: true, + }); + + const imageBlock = { + type: MessageContentType.Image, + source: { + type: 'base64', + media_type: 'image/png', + data: screenshotBase64, + }, + }; + + if (block.name === 'computer_screenshot') { + return { + type: MessageContentType.ToolResult, + tool_use_id: block.id, + content: [imageBlock], + }; + } + + return { + type: MessageContentType.ToolResult, + tool_use_id: block.id, + content: [ + { type: MessageContentType.Text, text: 'ok' }, + imageBlock, + ], + }; + }, + ), + }; + }); + + const { AgentProcessor } = await import('./agent.processor'); + + const eventEmitter = { emit: jest.fn() } as any; + + const processor = new AgentProcessor( + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + { isPhase6Enabled: () => false } as any, + {} as any, + { isWorkspaceEnabled: () => false } as any, + eventEmitter, + ) as any; + + processor.lastScreenshotHash = actionHashes.before; + + const generatedToolResults: any[] = []; + + const service = { + generateMessage: jest.fn(async () => { + return { + contentBlocks: [ + { + type: MessageContentType.Text, + text: JSON.stringify({ + x: 90, + y: 10, + confidence: 0.9, + rationale_code: 'CLOSE_X', + }), + }, + ], + }; + }), + } as any; + + const model = { name: 'desktop-vision' } as any; + + const actionContext: any = { + taskId: 't-1', + desktopUrl: 'http://desktop', + onAction: (action: any) => { + processor.handleDesktopSafetyAction(action); + }, + }; + + const callRepair = () => + processor.performDesktopUiRepair({ + taskId: 't-1', + desktopUrl: 'http://desktop', + actionContext, + trigger: 'loop', + service, + model, + generatedToolResults, + }); + + const res1 = await callRepair(); + expect(res1.shortCircuitReason).toBe('ui_repair_esc'); + expect(toolCalls.computer_type_keys).toBe(1); + + const res2 = await callRepair(); + expect(res2.shortCircuitReason).toBe('takeover_required'); + expect(toolCalls.computer_screenshot).toBe(1); + expect(toolCalls.computer_click_mouse).toBe(1); + expect(service.generateMessage).toHaveBeenCalledTimes(1); + + expect(processor.pendingDesktopNeedsHelp).toMatchObject({ + errorCode: 'DESKTOP_TAKEOVER_REQUIRED', + details: { + reason: 'UI_BLOCKED_POPUP', + loop: expect.objectContaining({ trigger: 'loop' }), + repair: expect.objectContaining({ + attempt1: 'esc', + attempt2: 'failed', + }), + }, + }); + + // Re-entry safety: do not run repairs again in the same episode once takeover is required. + const res3 = await callRepair(); + expect(res3.shortCircuitReason).toBe('takeover_required'); + expect(toolCalls.computer_type_keys).toBe(1); + expect(toolCalls.computer_screenshot).toBe(1); + expect(toolCalls.computer_click_mouse).toBe(1); + expect(service.generateMessage).toHaveBeenCalledTimes(1); + + expect(eventEmitter.emit).toHaveBeenCalledWith( + 'desktop.ui_repair.attempt', + expect.objectContaining({ attempt: 'esc', trigger: 'loop' }), + ); + expect(eventEmitter.emit).toHaveBeenCalledWith( + 'desktop.ui_repair.attempt', + expect.objectContaining({ attempt: 'close_click', trigger: 'loop' }), + ); + }); +}); diff --git a/packages/bytebot-agent/src/agent/agent.module.ts b/packages/bytebot-agent/src/agent/agent.module.ts index 40e651abe..db9bd4098 100644 --- a/packages/bytebot-agent/src/agent/agent.module.ts +++ b/packages/bytebot-agent/src/agent/agent.module.ts @@ -11,6 +11,8 @@ import { GoogleModule } from '../google/google.module'; import { SummariesModule } from 'src/summaries/summaries.modue'; import { AgentAnalyticsService } from './agent.analytics'; import { ProxyModule } from 'src/proxy/proxy.module'; +import { TaskControllerModule } from '../task-controller/task-controller.module'; +import { ActionLoggingModule } from '../action-logging/action-logging.module'; @Module({ imports: [ @@ -22,6 +24,8 @@ import { ProxyModule } from 'src/proxy/proxy.module'; OpenAIModule, GoogleModule, ProxyModule, + TaskControllerModule, + ActionLoggingModule, ], providers: [ AgentProcessor, diff --git a/packages/bytebot-agent/src/agent/agent.processor.ts b/packages/bytebot-agent/src/agent/agent.processor.ts index c48912fae..aa63de5bc 100644 --- a/packages/bytebot-agent/src/agent/agent.processor.ts +++ b/packages/bytebot-agent/src/agent/agent.processor.ts @@ -37,8 +37,80 @@ import { SUMMARIZATION_SYSTEM_PROMPT, } from './agent.constants'; import { SummariesService } from '../summaries/summaries.service'; -import { handleComputerToolUse } from './agent.computer-use'; +import { + handleComputerToolUse, + ActionContext, + ActionResult, + DesktopRequiredError, +} from './agent.computer-use'; import { ProxyService } from '../proxy/proxy.service'; +import { TaskControllerService } from '../task-controller/task-controller.service'; +import { + ActionLoggingService, + ActionLogEntry, +} from '../action-logging/action-logging.service'; +import { + validateMessageHistory, + formatValidationError, +} from './message-history.validator'; +// v2.2.7: Import Mutex to prevent concurrent iteration execution +import { Mutex } from 'async-mutex'; +// v2.3.0 M4: Import WorkspaceService for workspace-aware desktop resolution +import { WorkspaceService } from '../workspace/workspace.service'; +import { + isDispatchedUserPromptStep, + resolveExecutionSurface, + shouldAcquireDesktop, +} from './execution-surface'; +import { buildNeedsHelpResult, parseNeedsHelpErrorCode } from './needs-help'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { + DESKTOP_MAX_ACTIONS_WITHOUT_OBSERVATION, + DESKTOP_LOOP_COORDINATE_BUCKET_PX, + DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING, + DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED, + DESKTOP_LOOP_REPAIR_ATTEMPT2_MIN_CONFIDENCE, + DESKTOP_LOOP_REPAIR_ENABLED, + DESKTOP_LOOP_REPAIR_EPISODE_MAX_MS, + DESKTOP_LOOP_REPEAT_THRESHOLD, + DESKTOP_TOOL_CONTRACT_VIOLATION_LIMIT, + DesktopLoopDetector, + hammingDistanceHex, + normalizeComputerToolUseBlock, +} from './agent.desktop-safety'; +import { resetDesktopInput } from './agent.computer-use'; +import { + buildUiRepairClassifierMessage, + buildUiRepairClassifierSystemPrompt, + decodePngDimensionsFromBase64, + parseUiRepairCandidate, + validateUiRepairCandidate, +} from './agent.desktop-repair'; + +const DESKTOP_OBSERVATION_REQUIRED_TOOLS = new Set([ + 'computer_move_mouse', + 'computer_trace_mouse', + 'computer_click_mouse', + 'computer_press_mouse', + 'computer_drag_mouse', + 'computer_scroll', + 'computer_type_keys', + 'computer_press_keys', + 'computer_type_text', + 'computer_paste_text', + 'computer_wait', + 'computer_application', +]); + +type DesktopUiRepairEpisode = { + episodeId: number; + startedAtMs: number; + anchorHash: string | null; + escUsed: boolean; + closeClickUsed: boolean; + progressCandidateHash: string | null; + progressCandidateCount: number; +}; @Injectable() export class AgentProcessor { @@ -47,6 +119,82 @@ export class AgentProcessor { private isProcessing = false; private abortController: AbortController | null = null; private services: Record = {}; + // v2.2.1: Track cached desktop URL per task to avoid repeated waits + private cachedDesktopUrl: string | null = null; + private desktopLoopDetector = new DesktopLoopDetector(); + private desktopToolContractViolations = 0; + private desktopActionsWithoutObservation = 0; + private lastScreenshotHash: string | null = null; + private blockedClickBuckets = new Map< + string, + { sinceHash: string; blockedAtMs: number; count: number } + >(); + private pendingForcedScreenshot = false; + private pendingForcedScreenshotReason: 'actions_budget' | null = null; + private pendingDesktopRepairTrigger: { + trigger: 'loop' | 'blocked_click'; + details?: Record; + } | null = null; + private desktopUiRepairEpisodeSeq = 0; + private desktopUiRepairEpisode: DesktopUiRepairEpisode | null = null; + private desktopUiRepairInProgress = false; + private pendingDesktopNeedsHelp: ReturnType< + typeof buildNeedsHelpResult + > | null = null; + // v2.2.7: Mutex to prevent concurrent iteration execution + // This prevents race conditions where multiple LLM calls could be made simultaneously + // See: 2025-12-09-race-condition-duplicate-llm-calls-fix.md + private readonly iterationMutex = new Mutex(); + + /** + * Safely schedules the next iteration with proper error handling. + * v2.0.28: Added to prevent unhandled promise rejections from fire-and-forget calls. + * + * Instead of `void this.runIteration(taskId)` which ignores errors, + * this method catches and logs any errors that occur. + */ + private safeRunIteration(taskId: string): void { + this.runIteration(taskId).catch((error: Error) => { + const timestamp = new Date().toISOString(); + + // Phase 4: Specifically identify DesktopRequiredError for execution surface violations + if (error instanceof DesktopRequiredError) { + this.logger.error( + `[${timestamp}] [Phase 4] Task ${taskId} failed: Desktop required but not available. ${error.message}`, + ); + } else { + this.logger.error( + `[${timestamp}] Unhandled error in runIteration for task ${taskId}: ${error.message}`, + error.stack, + ); + } + + // Attempt to mark task as failed if we haven't already + // Phase 4: Include error message for DesktopRequiredError and other failures + if (this.isProcessing && this.currentTaskId === taskId) { + this.tasksService + .update(taskId, { status: TaskStatus.FAILED, error: error.message }) + .catch((updateError: Error) => { + this.logger.error( + `[${timestamp}] Failed to mark task ${taskId} as FAILED after error: ${updateError.message}`, + ); + }); + + this.isProcessing = false; + this.currentTaskId = null; + } + }); + } + + /** + * Safely schedules the next iteration via setImmediate with proper error handling. + * v2.0.28: Replaces bare setImmediate to prevent unhandled promise rejections. + */ + private scheduleNextIteration(taskId: string): void { + setImmediate(() => { + this.safeRunIteration(taskId); + }); + } constructor( private readonly tasksService: TasksService, @@ -57,6 +205,11 @@ export class AgentProcessor { private readonly googleService: GoogleService, private readonly proxyService: ProxyService, private readonly inputCaptureService: InputCaptureService, + private readonly taskControllerService: TaskControllerService, + private readonly actionLoggingService: ActionLoggingService, + // v2.3.0 M4: WorkspaceService for persistent workspace resolution and locking + private readonly workspaceService: WorkspaceService, + private readonly eventEmitter: EventEmitter2, ) { this.services = { anthropic: this.anthropicService, @@ -65,6 +218,20 @@ export class AgentProcessor { proxy: this.proxyService, }; this.logger.log('AgentProcessor initialized'); + + // Log Phase 6 status + if (this.taskControllerService.isPhase6Enabled()) { + this.logger.log('Phase 6 task controller integration enabled'); + } else { + this.logger.log('Phase 6 not enabled - using legacy desktop URL mode'); + } + + // v2.3.0 M4: Log workspace feature status + if (this.workspaceService.isWorkspaceEnabled()) { + this.logger.log('Workspace features enabled (Product 2: Workflows)'); + } else { + this.logger.log('Workspace features disabled (Product 1 only)'); + } } /** @@ -100,7 +267,7 @@ export class AgentProcessor { this.logger.log(`Task resume event received for task ID: ${taskId}`); this.abortController = new AbortController(); - void this.runIteration(taskId); + this.safeRunIteration(taskId); } } @@ -122,284 +289,1521 @@ export class AgentProcessor { this.isProcessing = true; this.currentTaskId = taskId; this.abortController = new AbortController(); + // v2.2.1: Reset cached desktop URL for new task + this.cachedDesktopUrl = null; + this.desktopLoopDetector = new DesktopLoopDetector(); + this.desktopToolContractViolations = 0; + this.desktopActionsWithoutObservation = 0; + this.pendingDesktopNeedsHelp = null; + + // Phase 6.4: Start heartbeat for this task + this.taskControllerService.startHeartbeat(taskId); // Kick off the first iteration without blocking the caller - void this.runIteration(taskId); + // v2.0.28: Use safe wrapper to catch async errors + this.safeRunIteration(taskId); } /** * Runs a single iteration of task processing and schedules the next * iteration via setImmediate while the task remains RUNNING. + * + * v2.2.7: Uses mutex to prevent concurrent iterations that could cause + * duplicate LLM calls and message history corruption. */ private async runIteration(taskId: string): Promise { if (!this.isProcessing) { return; } - try { - const task: Task = await this.tasksService.findById(taskId); + // v2.2.7: Use mutex to serialize iteration execution + // This prevents concurrent iterations from handleTaskResume or other triggers + const iterationId = `iter-${Date.now()}-${Math.random().toString(36).substring(7)}`; + + // Check if mutex is already locked (another iteration is in progress) + if (this.iterationMutex.isLocked()) { + this.logger.warn( + `[${iterationId}] Iteration already in progress for task ${taskId}, skipping duplicate call`, + ); + return; + } + + return this.iterationMutex.runExclusive(async () => { + this.logger.debug( + `[${iterationId}] Starting iteration for task ${taskId}`, + ); + + try { + const task: Task = await this.tasksService.findById(taskId); + + if (task.status !== TaskStatus.RUNNING) { + this.logger.log( + `Task processing completed for task ID: ${taskId} with status: ${task.status}`, + ); + + // Phase 6.4: Cleanup heartbeat and flush logs on task completion + this.taskControllerService.stopHeartbeat(taskId); + try { + await this.actionLoggingService.flushActions(taskId); + } catch (error: any) { + this.logger.warn( + `Failed to flush action logs for ${taskId}: ${error.message}`, + ); + } - if (task.status !== TaskStatus.RUNNING) { - this.logger.log( - `Task processing completed for task ID: ${taskId} with status: ${task.status}`, + this.isProcessing = false; + this.currentTaskId = null; + return; + } + + this.logger.log(`Processing iteration for task ID: ${taskId}`); + + // Stark Fix (Atom 5): Defensive guard — prompt steps must never be executed by the agent. + // Prefer explicit machine flags over NL heuristics: a prompt-step is identified by ASK_USER tool. + if (isDispatchedUserPromptStep({ allowedTools: task.allowedTools })) { + this.logger.error( + `Task ${taskId} appears to be a USER_INPUT_REQUIRED prompt step (allowedTools includes ASK_USER). ` + + `This must not be dispatched to the agent.`, + ); + + await this.tasksService.update(taskId, { + status: TaskStatus.NEEDS_HELP, + result: { + ...buildNeedsHelpResult({ + errorCode: 'DISPATCHED_USER_PROMPT_STEP', + message: + 'This task requires user input and must not be executed by the agent. ' + + 'Orchestrator should create a UserPrompt and wait.', + }), + }, + }); + await this.tasksService.clearLease(taskId); + this.taskControllerService.stopHeartbeat(taskId); + this.isProcessing = false; + this.currentTaskId = null; + return; + } + + // Refresh abort controller for this iteration to avoid accumulating + // "abort" listeners on a single AbortSignal across iterations. + this.abortController = new AbortController(); + + const latestSummary = await this.summariesService.findLatest(taskId); + const unsummarizedMessages = + await this.messagesService.findUnsummarized(taskId); + const messages = [ + ...(latestSummary + ? [ + { + id: '', + createdAt: new Date(), + updatedAt: new Date(), + taskId, + summaryId: null, + role: Role.USER, + content: [ + { + type: MessageContentType.Text, + text: latestSummary.content, + }, + ], + }, + ] + : []), + ...unsummarizedMessages, + ]; + this.logger.debug( + `Sending ${messages.length} messages to LLM for processing`, ); - this.isProcessing = false; - this.currentTaskId = null; - return; - } - this.logger.log(`Processing iteration for task ID: ${taskId}`); + // v2.2.5: Validate message history before sending to LLM + // This catches corruption from race conditions or worker crashes + const validation = validateMessageHistory(messages, this.logger); + if (!validation.isValid) { + const errorMessage = formatValidationError(validation); + this.logger.error( + `Task ${taskId}: Message history validation failed - ${errorMessage}`, + ); + this.logger.error( + `Orphaned tool_use IDs: ${validation.orphanedToolUseIds.join(', ')}`, + ); - // Refresh abort controller for this iteration to avoid accumulating - // "abort" listeners on a single AbortSignal across iterations. - this.abortController = new AbortController(); + await this.tasksService.update(taskId, { + status: TaskStatus.FAILED, + error: errorMessage, + }); - const latestSummary = await this.summariesService.findLatest(taskId); - const unsummarizedMessages = - await this.messagesService.findUnsummarized(taskId); - const messages = [ - ...(latestSummary - ? [ - { - id: '', - createdAt: new Date(), - updatedAt: new Date(), - taskId, - summaryId: null, - role: Role.USER, - content: [ - { - type: MessageContentType.Text, - text: latestSummary.content, - }, - ], - }, - ] - : []), - ...unsummarizedMessages, - ]; - this.logger.debug( - `Sending ${messages.length} messages to LLM for processing`, - ); + // Clear lease since task is done + await this.tasksService.clearLease(taskId); - const model = task.model as unknown as BytebotAgentModel; - let agentResponse: BytebotAgentResponse; + this.isProcessing = false; + this.currentTaskId = null; + this.taskControllerService.stopHeartbeat(taskId); + return; + } - const service = this.services[model.provider]; - if (!service) { - this.logger.warn( - `No service found for model provider: ${model.provider}`, + const model = task.model as unknown as BytebotAgentModel; + let agentResponse: BytebotAgentResponse; + + const service = this.services[model.provider]; + if (!service) { + this.logger.warn( + `No service found for model provider: ${model.provider}`, + ); + await this.tasksService.update(taskId, { + status: TaskStatus.FAILED, + }); + await this.tasksService.clearLease(taskId); + this.isProcessing = false; + this.currentTaskId = null; + return; + } + + // Stark Fix (Atom 5): Determine execution surface up front. + // Default behavior: requiresDesktop=true -> DESKTOP, otherwise TEXT_ONLY. + // If an explicit surface is present, it is honored (but desktop acquisition still requires requiresDesktop=true). + const executionSurface = resolveExecutionSurface({ + requiresDesktop: task.requiresDesktop, + executionSurface: task.executionSurface, + }); + + agentResponse = await service.generateMessage( + AGENT_SYSTEM_PROMPT, + messages, + model.name, + { + useTools: true, + toolPolicy: { + requiresDesktop: task.requiresDesktop, + executionSurface, + gatewayToolsOnly: task.gatewayToolsOnly, + allowedTools: task.allowedTools, + }, + signal: this.abortController.signal, + }, + ); + + const messageContentBlocks = agentResponse.contentBlocks; + + this.logger.debug( + `Received ${messageContentBlocks.length} content blocks from LLM`, ); - await this.tasksService.update(taskId, { - status: TaskStatus.FAILED, + + if (messageContentBlocks.length === 0) { + // v2.0.23: Changed from FAILED to NEEDS_HELP + // Empty response doesn't necessarily mean failure - could be: + // - Thinking-only responses + // - Tool execution results without text + // - Edge cases in response formatting + // Escalating to NEEDS_HELP allows user to review and resume + this.logger.warn( + `Task ID: ${taskId} received no content blocks from LLM, escalating to needs_help`, + ); + await this.tasksService.update(taskId, { + status: TaskStatus.NEEDS_HELP, + result: buildNeedsHelpResult({ + errorCode: 'LLM_EMPTY_RESPONSE', + message: + 'Task received no content blocks from LLM (empty response).', + details: { + provider: model.provider, + model: model.name, + }, + }), + }); + // v2.2.5: Clear lease when escalating + await this.tasksService.clearLease(taskId); + this.isProcessing = false; + this.currentTaskId = null; + return; + } + + await this.messagesService.create({ + content: messageContentBlocks, + role: Role.ASSISTANT, + taskId, }); - this.isProcessing = false; - this.currentTaskId = null; - return; - } - agentResponse = await service.generateMessage( - AGENT_SYSTEM_PROMPT, - messages, - model.name, - true, - this.abortController.signal, - ); + // Calculate if we need to summarize based on token usage + const contextWindow = model.contextWindow || 200000; // Default to 200k if not specified + const contextThreshold = contextWindow * 0.75; + const shouldSummarize = + agentResponse.tokenUsage.totalTokens >= contextThreshold; - const messageContentBlocks = agentResponse.contentBlocks; + if (shouldSummarize) { + try { + // After we've successfully generated a response, we can summarize the unsummarized messages + const summaryResponse = await service.generateMessage( + SUMMARIZATION_SYSTEM_PROMPT, + [ + ...messages, + { + id: '', + createdAt: new Date(), + updatedAt: new Date(), + taskId, + summaryId: null, + role: Role.USER, + content: [ + { + type: MessageContentType.Text, + text: 'Respond with a summary of the messages above. Do not include any additional information.', + }, + ], + }, + ], + model.name, + { useTools: false, signal: this.abortController.signal }, + ); - this.logger.debug( - `Received ${messageContentBlocks.length} content blocks from LLM`, - ); + const summaryContentBlocks = summaryResponse.contentBlocks; - if (messageContentBlocks.length === 0) { - this.logger.warn( - `Task ID: ${taskId} received no content blocks from LLM, marking as failed`, + this.logger.debug( + `Received ${summaryContentBlocks.length} summary content blocks from LLM`, + ); + const summaryContent = summaryContentBlocks + .filter( + (block: MessageContentBlock) => + block.type === MessageContentType.Text, + ) + .map((block: TextContentBlock) => block.text) + .join('\n'); + + const summary = await this.summariesService.create({ + content: summaryContent, + taskId, + }); + + await this.messagesService.attachSummary(taskId, summary.id, [ + ...messages.map((message) => { + return message.id; + }), + ]); + + this.logger.log( + `Generated summary for task ${taskId} due to token usage (${agentResponse.tokenUsage.totalTokens}/${contextWindow})`, + ); + } catch (error: any) { + this.logger.error( + `Error summarizing messages for task ID: ${taskId}`, + error.stack, + ); + } + } + + this.logger.debug( + `Token usage for task ${taskId}: ${agentResponse.tokenUsage.totalTokens}/${contextWindow} (${Math.round((agentResponse.tokenUsage.totalTokens / contextWindow) * 100)}%)`, + ); + + const generatedToolResults: MessageContentBlock[] = []; + + let setTaskStatusToolUseBlock: SetTaskStatusToolUseBlock | null = null; + + // v2.3.0 M4: Check if this response contains desktop tool use blocks + const hasDesktopToolUse = messageContentBlocks.some( + isComputerToolUseContentBlock, ); - await this.tasksService.update(taskId, { - status: TaskStatus.FAILED, + + // Text-only means text-only: never wait for/acquire a desktop when requiresDesktop=false or surface=TEXT_ONLY. + const acquireDesktop = shouldAcquireDesktop({ + requiresDesktop: task.requiresDesktop, + surface: executionSurface, + hasDesktopToolUse, }); - this.isProcessing = false; - this.currentTaskId = null; - return; - } - await this.messagesService.create({ - content: messageContentBlocks, - role: Role.ASSISTANT, - taskId, - }); + if (hasDesktopToolUse && !acquireDesktop) { + this.logger.warn( + `Task ${taskId} requested desktop tool use but desktop is not allowed ` + + `(requiresDesktop=${task.requiresDesktop}, surface=${executionSurface}).`, + ); - // Calculate if we need to summarize based on token usage - const contextWindow = model.contextWindow || 200000; // Default to 200k if not specified - const contextThreshold = contextWindow * 0.75; - const shouldSummarize = - agentResponse.tokenUsage.totalTokens >= contextThreshold; + await this.tasksService.update(taskId, { + status: TaskStatus.NEEDS_HELP, + result: { + ...buildNeedsHelpResult({ + errorCode: 'DESKTOP_NOT_ALLOWED', + message: + 'Desktop tools were requested, but this task is configured for TEXT_ONLY execution. ' + + 'Orchestrator may have misrouted this step.', + details: { + requiresDesktop: task.requiresDesktop, + executionSurface, + }, + }), + }, + }); + await this.tasksService.clearLease(taskId); + this.taskControllerService.stopHeartbeat(taskId); + this.isProcessing = false; + this.currentTaskId = null; + return; + } - if (shouldSummarize) { - try { - // After we've successfully generated a response, we can summarize the unsummarized messages - const summaryResponse = await service.generateMessage( - SUMMARIZATION_SYSTEM_PROMPT, - [ - ...messages, - { - id: '', - createdAt: new Date(), - updatedAt: new Date(), + // v2.3.0 M4: Workspace-aware desktop resolution + // For Product 2 Workflows (workspaceId set): Use WorkspaceService + // For Product 1 Tasks (workspaceId null): Use TaskControllerService + let desktopUrl: string | undefined; + const workspaceId = (task as any).workspaceId as string | null; + const nodeRunId = (task as any).nodeRunId as string | null; + + if (acquireDesktop) { + if (this.cachedDesktopUrl) { + desktopUrl = this.cachedDesktopUrl; + this.logger.debug( + `Using cached desktop URL for task ${taskId}: ${desktopUrl}`, + ); + } else if ( + workspaceId && + this.workspaceService.isWorkspaceEnabled() + ) { + // Product 2: Workflow with persistent workspace + try { + this.logger.log( + `Waiting for workspace ${workspaceId} to be ready for task ${taskId}...`, + ); + desktopUrl = await this.workspaceService.waitForWorkspaceReady( + workspaceId, + { + timeoutMs: 120000, // 2 minute timeout (includes wake time from hibernation) + }, + ); + this.cachedDesktopUrl = desktopUrl; + this.logger.log( + `Workspace ${workspaceId} ready for task ${taskId}: ${desktopUrl}`, + ); + } catch (error: any) { + this.logger.error( + `Failed to get workspace ${workspaceId} for task ${taskId}: ${error.message}`, + ); + await this.tasksService.update(taskId, { + status: TaskStatus.FAILED, + error: `Failed to get workspace: ${error.message}`, + }); + await this.tasksService.clearLease(taskId); + this.isProcessing = false; + this.currentTaskId = null; + this.taskControllerService.stopHeartbeat(taskId); + return; + } + } else { + // Product 1: Task with ephemeral desktop (Phase 6) + try { + this.logger.log( + `Waiting for desktop to be ready for task ${taskId}...`, + ); + desktopUrl = await this.taskControllerService.waitForDesktop( taskId, - summaryId: null, - role: Role.USER, + { + timeoutMs: 60000, // 60 second timeout + }, + ); + this.cachedDesktopUrl = desktopUrl; + this.logger.log( + `Desktop ready for task ${taskId}: ${desktopUrl}`, + ); + } catch (error: any) { + this.logger.error( + `Failed to get desktop for task ${taskId}: ${error.message}`, + ); + // Mark task as failed if we can't get a desktop + await this.tasksService.update(taskId, { + status: TaskStatus.FAILED, + error: `Failed to get desktop: ${error.message}`, + }); + await this.tasksService.clearLease(taskId); + this.isProcessing = false; + this.currentTaskId = null; + this.taskControllerService.stopHeartbeat(taskId); + return; + } + } + } + + // Phase 6.4: Create action context for logging + // Phase 4: Include requiresDesktop for fail-fast validation + const actionContext: ActionContext = { + taskId, + desktopUrl, + requiresDesktop: task.requiresDesktop, + onAction: (action: ActionResult) => { + this.handleDesktopSafetyAction(action); + + // Log action asynchronously (non-blocking) + const logEntry: ActionLogEntry = { + taskId, + actionType: action.actionType, + actionStatus: action.success ? 'success' : 'failed', + coordinates: action.coordinates, + durationMs: action.durationMs, + errorMessage: action.errorMessage, + actionData: action.input, + llmModel: model.name, + }; + this.actionLoggingService.logAction(logEntry).catch((err) => { + this.logger.warn(`Failed to log action: ${err.message}`); + }); + }, + }; + + const needsWorkspaceLock = + workspaceId && nodeRunId && hasDesktopToolUse; + let workspaceLockAcquired = false; + + // v2.3.0 M4: Acquire granular workspace lock before desktop tool execution + // Lock is held only during active desktop tool execution (30-60 seconds) + if (needsWorkspaceLock) { + const lockResult = await this.workspaceService.acquireLock( + workspaceId, + nodeRunId, + ); + if (!lockResult.acquired) { + // Lock contention - another node run is using the workspace + this.logger.warn( + `Lock contention on workspace ${workspaceId}: ${lockResult.message}. ` + + `Retrying after ${lockResult.retryAfterMs || 5000}ms`, + ); + // Wait and retry in next iteration + await new Promise((resolve) => + setTimeout(resolve, lockResult.retryAfterMs || 5000), + ); + // Schedule next iteration to retry + if (this.isProcessing) { + this.scheduleNextIteration(taskId); + } + return; + } + workspaceLockAcquired = true; + this.logger.log( + `Workspace lock acquired for task ${taskId} (nodeRun: ${nodeRunId})`, + ); + } + + try { + const handledToolUseIds = new Set(); + let shortCircuitReason: + | 'ui_repair_esc' + | 'ui_repair_close_click' + | 'takeover_required' + | null = null; + + for (const block of messageContentBlocks) { + if (isComputerToolUseContentBlock(block)) { + const normalized = normalizeComputerToolUseBlock(block); + + // Hard invariant: never keep clicking the same region when it produces no progress. + if ( + normalized.normalizedBlock.name === 'computer_click_mouse' && + desktopUrl && + !this.pendingDesktopNeedsHelp + ) { + const input = (normalized.normalizedBlock as any).input as + | { coordinates?: { x: number; y: number } } + | undefined; + const coords = input?.coordinates; + if ( + coords && + typeof coords.x === 'number' && + typeof coords.y === 'number' && + Number.isFinite(coords.x) && + Number.isFinite(coords.y) + ) { + const bx = Math.floor( + coords.x / DESKTOP_LOOP_COORDINATE_BUCKET_PX, + ); + const by = Math.floor( + coords.y / DESKTOP_LOOP_COORDINATE_BUCKET_PX, + ); + const bucketKey = `${bx},${by}`; + + const blocked = this.blockedClickBuckets.get(bucketKey); + const stillNoProgress = + blocked && + this.lastScreenshotHash && + hammingDistanceHex( + blocked.sinceHash, + this.lastScreenshotHash, + ) <= DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING; + + if (stillNoProgress) { + generatedToolResults.push({ + type: MessageContentType.ToolResult, + tool_use_id: block.id, + is_error: true, + content: [ + { + type: MessageContentType.Text, + text: '[System] Blocked repeating click in the same UI region with no progress; running the bounded UI repair ladder.', + }, + ], + } as any); + handledToolUseIds.add(block.id); + + const repair = await this.performDesktopUiRepair({ + taskId, + desktopUrl, + actionContext, + trigger: 'blocked_click', + triggerDetails: { + bucketKey, + blockedClickCount: blocked.count, + }, + service, + model, + generatedToolResults, + }); + + shortCircuitReason = repair.shortCircuitReason; + break; + } + } + } + + if ( + normalized.rewriteReason === 'non_modifier_down_to_tap' || + normalized.rewriteReason === + 'modifier_down_missing_holdms_to_tap' + ) { + this.desktopToolContractViolations++; + const rewrittenKeys = normalized.rewrittenKeys || []; + for (const key of rewrittenKeys) { + this.eventEmitter.emit('desktop.keydown.rewritten', { + key, + reason: normalized.rewriteReason, + }); + } + + if ( + this.desktopToolContractViolations >= + DESKTOP_TOOL_CONTRACT_VIOLATION_LIMIT && + !this.pendingDesktopNeedsHelp + ) { + this.pendingDesktopNeedsHelp = buildNeedsHelpResult({ + errorCode: 'TOOL_CONTRACT_VIOLATION', + details: { + violations: this.desktopToolContractViolations, + limit: DESKTOP_TOOL_CONTRACT_VIOLATION_LIMIT, + lastRewriteReason: normalized.rewriteReason, + lastRewrittenKeys: rewrittenKeys, + recentActions: this.desktopLoopDetector.getRecent(), + }, + }); + this.eventEmitter.emit('desktop.interrupt', { + reasonCode: 'TOOL_CONTRACT_VIOLATION', + }); + } + } + + const result = await handleComputerToolUse( + normalized.normalizedBlock, + this.logger, + actionContext, + ); + generatedToolResults.push(result); + handledToolUseIds.add(block.id); + + if ( + this.pendingForcedScreenshot && + this.pendingForcedScreenshotReason === 'actions_budget' && + desktopUrl && + !this.pendingDesktopNeedsHelp + ) { + const actionsWithoutObservation = + this.desktopActionsWithoutObservation; + this.pendingForcedScreenshot = false; + this.pendingForcedScreenshotReason = null; + + const forcedToolUseId = `forced_screenshot_${Date.now()}`; + const forcedScreenshotBlock = { + type: MessageContentType.ToolUse, + id: forcedToolUseId, + name: 'computer_screenshot', + input: {}, + } as any; + + const forcedResult = await handleComputerToolUse( + forcedScreenshotBlock, + this.logger, + actionContext, + ); + + const content = Array.isArray((forcedResult as any).content) + ? ((forcedResult as any).content as any[]) + : []; + + const images = content.filter( + (c) => + c && + typeof c === 'object' && + c.type === MessageContentType.Image, + ); + + if (images.length > 0) { + generatedToolResults.push({ + type: MessageContentType.Text, + text: '[System] Forced screenshot for observation (actions budget).', + } as any); + generatedToolResults.push(...(images as any)); + } else { + // If we cannot observe, we must stop deterministically (typed interrupt). + this.pendingDesktopNeedsHelp = buildNeedsHelpResult({ + errorCode: 'UI_OBSERVATION_FAILED', + message: + 'Forced screenshot failed; cannot safely proceed without observation.', + details: { + reason: 'forced_screenshot_failed', + actionsWithoutObservation, + limit: DESKTOP_MAX_ACTIONS_WITHOUT_OBSERVATION, + lastToolResult: content, + }, + }); + this.eventEmitter.emit('desktop.interrupt', { + reasonCode: 'UI_OBSERVATION_FAILED', + }); + } + } + + // Modal-aware bounded self-repair. Runs after we observe a no-progress loop. + if ( + this.pendingDesktopRepairTrigger && + desktopUrl && + !this.pendingDesktopNeedsHelp + ) { + const trigger = this.pendingDesktopRepairTrigger; + this.pendingDesktopRepairTrigger = null; + + const repair = await this.performDesktopUiRepair({ + taskId, + desktopUrl, + actionContext, + trigger: trigger.trigger, + triggerDetails: trigger.details, + service, + model, + generatedToolResults, + }); + + shortCircuitReason = repair.shortCircuitReason; + break; + } + + if (this.pendingDesktopNeedsHelp) { + break; + } + } + + if (isCreateTaskToolUseBlock(block)) { + const type = block.input.type?.toUpperCase() as TaskType; + const priority = + block.input.priority?.toUpperCase() as TaskPriority; + + await this.tasksService.create({ + description: block.input.description, + type, + createdBy: Role.ASSISTANT, + ...(block.input.scheduledFor && { + scheduledFor: new Date(block.input.scheduledFor), + }), + model: task.model, + priority, + }); + + generatedToolResults.push({ + type: MessageContentType.ToolResult, + tool_use_id: block.id, content: [ { type: MessageContentType.Text, - text: 'Respond with a summary of the messages above. Do not include any additional information.', + text: 'The task has been created', }, ], - }, - ], - model.name, - false, - this.abortController.signal, - ); + }); + handledToolUseIds.add(block.id); + } - const summaryContentBlocks = summaryResponse.contentBlocks; + if (isSetTaskStatusToolUseBlock(block)) { + setTaskStatusToolUseBlock = block; - this.logger.debug( - `Received ${summaryContentBlocks.length} summary content blocks from LLM`, - ); - const summaryContent = summaryContentBlocks - .filter( - (block: MessageContentBlock) => - block.type === MessageContentType.Text, - ) - .map((block: TextContentBlock) => block.text) - .join('\n'); - - const summary = await this.summariesService.create({ - content: summaryContent, + generatedToolResults.push({ + type: MessageContentType.ToolResult, + tool_use_id: block.id, + is_error: block.input.status === 'failed', + content: [ + { + type: MessageContentType.Text, + text: block.input.description, + }, + ], + }); + handledToolUseIds.add(block.id); + } + } + + if (shortCircuitReason) { + // Ensure the assistant's tool-use message is fully acknowledged with tool results. + // We intentionally skip remaining tool calls to allow the model to replan using + // the latest screenshot after safety repair. + for (const block of messageContentBlocks) { + if ( + block && + typeof block === 'object' && + (block as any).type === MessageContentType.ToolUse && + typeof (block as any).id === 'string' && + !handledToolUseIds.has((block as any).id) + ) { + generatedToolResults.push({ + type: MessageContentType.ToolResult, + tool_use_id: (block as any).id, + is_error: true, + content: [ + { + type: MessageContentType.Text, + text: `[System] Skipped tool execution due to desktop safety intervention (${shortCircuitReason}). Replan using the latest screenshot.`, + }, + ], + } as any); + handledToolUseIds.add((block as any).id); + } + } + } + } finally { + // v2.3.0 M4: Release workspace lock after desktop tool execution + if (workspaceLockAcquired && workspaceId && nodeRunId) { + await this.workspaceService.releaseLock(workspaceId, nodeRunId); + this.logger.log( + `Workspace lock released for task ${taskId} (nodeRun: ${nodeRunId})`, + ); + } + } + + if (generatedToolResults.length > 0) { + await this.messagesService.create({ + content: generatedToolResults, + role: Role.USER, taskId, }); + } - await this.messagesService.attachSummary(taskId, summary.id, [ - ...messages.map((message) => { - return message.id; - }), - ]); + if (this.pendingDesktopNeedsHelp) { + // Best-effort cleanup: release any potentially stuck keys/buttons before yielding. + if (desktopUrl) { + try { + await resetDesktopInput(desktopUrl); + } catch (error: any) { + this.logger.warn( + `Failed to reset desktop input for task ${taskId}: ${error.message}`, + ); + } + } - this.logger.log( - `Generated summary for task ${taskId} due to token usage (${agentResponse.tokenUsage.totalTokens}/${contextWindow})`, + await this.tasksService.update(taskId, { + status: TaskStatus.NEEDS_HELP, + result: this.pendingDesktopNeedsHelp, + }); + await this.tasksService.clearLease(taskId); + this.taskControllerService.stopHeartbeat(taskId); + this.isProcessing = false; + this.currentTaskId = null; + return; + } + + // Update the task status after all tool results have been generated if we have a set task status tool use block + if (setTaskStatusToolUseBlock) { + switch (setTaskStatusToolUseBlock.input.status) { + case 'completed': + // v2.4.1: Capture actual outcome description as task result + // This persists the AI's description of what was accomplished + // so orchestrator can include it in context for subsequent steps + await this.tasksService.update(taskId, { + status: TaskStatus.COMPLETED, + completedAt: new Date(), + result: { + summary: setTaskStatusToolUseBlock.input.description, + completedAt: new Date().toISOString(), + }, + }); + // v2.2.5: Clear lease on completion + await this.tasksService.clearLease(taskId); + // v2.2.10: Stop heartbeat on task completion + this.taskControllerService.stopHeartbeat(taskId); + break; + case 'needs_help': + const rawErrorCode = (setTaskStatusToolUseBlock.input as any) + ?.errorCode; + const requestedErrorCode = parseNeedsHelpErrorCode(rawErrorCode); + const detailsRaw = (setTaskStatusToolUseBlock.input as any) + ?.details; + const details = + detailsRaw && typeof detailsRaw === 'object' + ? (detailsRaw as Record) + : undefined; + + let errorCode = + requestedErrorCode ?? 'CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP'; + + // Strategy prompts must never block execution; treat generic asks as contract violations. + if (errorCode === 'AGENT_REQUESTED_HELP') { + errorCode = 'CONTRACT_VIOLATION_STRATEGY_AS_HELP'; + } + + const mergedDetails: Record = { + ...(details || {}), + ...(typeof rawErrorCode === 'string' && rawErrorCode.trim() + ? { rawErrorCode: rawErrorCode.trim() } + : {}), + }; + + await this.tasksService.update(taskId, { + status: TaskStatus.NEEDS_HELP, + result: buildNeedsHelpResult({ + errorCode, + message: setTaskStatusToolUseBlock.input.description, + details: + Object.keys(mergedDetails).length > 0 + ? mergedDetails + : undefined, + }), + }); + // v2.2.5: Clear lease when escalating to user + await this.tasksService.clearLease(taskId); + // v2.2.10: Stop heartbeat when escalating to user + this.taskControllerService.stopHeartbeat(taskId); + break; + } + } + + // Schedule the next iteration without blocking + // v2.0.28: Use safe wrapper to catch async errors + if (this.isProcessing) { + this.scheduleNextIteration(taskId); + } + } catch (error: any) { + if (error?.name === 'BytebotAgentInterrupt') { + this.logger.warn( + `[${iterationId}] Processing aborted for task ID: ${taskId}`, ); - } catch (error: any) { + } else { this.logger.error( - `Error summarizing messages for task ID: ${taskId}`, + `[${iterationId}] Error during task processing iteration for task ID: ${taskId} - ${error.message}`, error.stack, ); + + const llmErrorType = + typeof error?.llmErrorType === 'string' + ? error.llmErrorType + : undefined; + const errorMessage = String(error?.message || 'Unknown error'); + + // Error taxonomy: connection errors and provider unavailability are INFRA, not semantic. + // Prefer machine signals (llmErrorType), fallback to conservative string patterns. + const infraLlmTypes = new Set([ + 'TIMEOUT', + 'NETWORK', + 'SERVER_ERROR', + 'RATE_LIMIT', + 'OVERLOADED', + 'UNKNOWN', + ]); + + const lower = errorMessage.toLowerCase(); + const isInfra = + (llmErrorType && infraLlmTypes.has(llmErrorType)) || + lower.includes('[infra]') || + lower.includes('econnrefused') || + lower.includes('etimedout') || + lower.includes('enotfound') || + lower.includes('econnreset') || + lower.includes('socket hang up') || + lower.includes('fetch failed') || + lower.includes('network') || + lower.includes('connection refused') || + lower.includes('service unavailable') || + lower.includes('bad gateway') || + lower.includes('circuit breaker open'); + + const persistedError = + isInfra && !lower.includes('[infra]') + ? `[INFRA] ${errorMessage}` + : errorMessage; + + await this.tasksService.update(taskId, { + status: TaskStatus.FAILED, + error: persistedError, + result: { + errorCategory: isInfra ? 'INFRA' : 'SEMANTIC', + llmErrorType: llmErrorType || null, + attempts: + typeof error?.attempts === 'number' + ? error.attempts + : undefined, + durationMs: + typeof error?.durationMs === 'number' + ? error.durationMs + : undefined, + }, + }); + // v2.2.5: Clear lease on failure + await this.tasksService.clearLease(taskId); + // v2.2.10: Stop heartbeat on task failure + this.taskControllerService.stopHeartbeat(taskId); + this.isProcessing = false; + this.currentTaskId = null; } } this.logger.debug( - `Token usage for task ${taskId}: ${agentResponse.tokenUsage.totalTokens}/${contextWindow} (${Math.round((agentResponse.tokenUsage.totalTokens / contextWindow) * 100)}%)`, + `[${iterationId}] Iteration completed for task ${taskId}`, ); + }); // End of runExclusive + } + + private async performDesktopUiRepair(input: { + taskId: string; + desktopUrl: string; + actionContext: ActionContext; + trigger: 'loop' | 'blocked_click'; + triggerDetails?: Record; + service: BytebotAgentService; + model: BytebotAgentModel; + generatedToolResults: MessageContentBlock[]; + }): Promise<{ + shortCircuitReason: + | 'ui_repair_esc' + | 'ui_repair_close_click' + | 'takeover_required'; + }> { + const { desktopUrl, actionContext } = input; + + const now = Date.now(); + + const extractBucketKeyFromSignature = ( + signature?: unknown, + ): string | null => { + if (typeof signature !== 'string') return null; + const match = signature.match(/bucket=([0-9]+,[0-9]+)/); + return match ? match[1] : null; + }; + + const getOrStartEpisode = (): DesktopUiRepairEpisode => { + const existing = this.desktopUiRepairEpisode; + if ( + existing && + existing.startedAtMs > 0 && + now - existing.startedAtMs <= DESKTOP_LOOP_REPAIR_EPISODE_MAX_MS + ) { + return existing; + } + + const nextEpisode: DesktopUiRepairEpisode = { + episodeId: ++this.desktopUiRepairEpisodeSeq, + startedAtMs: now, + anchorHash: this.lastScreenshotHash, + escUsed: false, + closeClickUsed: false, + progressCandidateHash: null, + progressCandidateCount: 0, + }; + this.desktopUiRepairEpisode = nextEpisode; + return nextEpisode; + }; + + const buildLoopContext = () => { + const rule = + typeof input.triggerDetails?.rule === 'string' + ? input.triggerDetails.rule + : null; + const signature = + typeof input.triggerDetails?.signature === 'string' + ? input.triggerDetails.signature + : null; + + const bucket = + typeof (input.triggerDetails as any)?.bucketKey === 'string' + ? String((input.triggerDetails as any).bucketKey) + : extractBucketKeyFromSignature(signature); + + return { + trigger: input.trigger, + rule, + bucket, + }; + }; - const generatedToolResults: ToolResultContentBlock[] = []; + const takeover = (details: Record) => { + if (!this.pendingDesktopNeedsHelp) { + const episode = getOrStartEpisode(); + const loop = buildLoopContext(); + const attempt2ErrorRaw = + (details as any)?.attempt2 && + typeof (details as any).attempt2 === 'object' + ? (details as any).attempt2.error + : null; + const attempt2Error = + typeof attempt2ErrorRaw === 'string' ? attempt2ErrorRaw : null; - let setTaskStatusToolUseBlock: SetTaskStatusToolUseBlock | null = null; + const repair = { + attempt1: episode.escUsed + ? 'esc' + : DESKTOP_LOOP_REPAIR_ENABLED + ? 'skipped' + : 'disabled', + attempt2: attempt2Error + ? attempt2Error === 'no_progress_after_click' + ? 'failed' + : 'invalid' + : episode.closeClickUsed + ? 'close_click' + : DESKTOP_LOOP_REPAIR_ENABLED && + DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED + ? 'skipped' + : 'disabled', + }; - for (const block of messageContentBlocks) { - if (isComputerToolUseContentBlock(block)) { - const result = await handleComputerToolUse(block, this.logger); - generatedToolResults.push(result); + this.pendingDesktopNeedsHelp = buildNeedsHelpResult({ + errorCode: 'DESKTOP_TAKEOVER_REQUIRED', + details: { + reason: 'UI_BLOCKED_POPUP', + loop, + repair, + ...details, + }, + }); + this.eventEmitter.emit('desktop.interrupt', { + reasonCode: 'DESKTOP_TAKEOVER_REQUIRED', + }); + } + return { shortCircuitReason: 'takeover_required' as const }; + }; + + if (!DESKTOP_LOOP_REPAIR_ENABLED) { + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + repairEnabled: false, + repairAttempt2Enabled: DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED, + }); + } + + // Attempt 1: Esc once (tap) then force replan. + const episode = getOrStartEpisode(); + + if (!episode.escUsed) { + const beforeHash = this.lastScreenshotHash; + episode.escUsed = true; + + this.desktopUiRepairInProgress = true; + this.eventEmitter.emit('desktop.ui_repair.attempt', { + attempt: 'esc', + trigger: input.trigger, + }); + + try { + const toolUseId = `ui_repair_esc_${Date.now()}`; + const escBlock = { + type: MessageContentType.ToolUse, + id: toolUseId, + name: 'computer_type_keys', + input: { keys: ['Escape'], delay: 75 }, + } as any; + + const escResult = await handleComputerToolUse( + escBlock, + this.logger, + actionContext, + ); + + const content = Array.isArray((escResult as any).content) + ? ((escResult as any).content as any[]) + : []; + + const images = content.filter( + (c) => + c && typeof c === 'object' && c.type === MessageContentType.Image, + ); + + input.generatedToolResults.push({ + type: MessageContentType.Text, + text: '[System] Desktop safety repair: pressed Escape to dismiss a potential modal/popup. Replan from the updated screenshot.', + } as any); + + if (images.length > 0) { + input.generatedToolResults.push(...(images as any)); } - if (isCreateTaskToolUseBlock(block)) { - const type = block.input.type?.toUpperCase() as TaskType; - const priority = block.input.priority?.toUpperCase() as TaskPriority; + const afterHash = this.lastScreenshotHash; + const dist = + beforeHash && afterHash + ? hammingDistanceHex(beforeHash, afterHash) + : null; + const success = + typeof dist === 'number' && Number.isFinite(dist) + ? dist > DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING + : false; - await this.tasksService.create({ - description: block.input.description, - type, - createdBy: Role.ASSISTANT, - ...(block.input.scheduledFor && { - scheduledFor: new Date(block.input.scheduledFor), - }), - model: task.model, - priority, + this.eventEmitter.emit('desktop.ui_repair.result', { + attempt: 'esc', + outcome: success ? 'success' : 'failure_no_change', + trigger: input.trigger, + }); + } finally { + this.desktopUiRepairInProgress = false; + // Reset loop detector after repair so we don't instantly re-trigger on old history. + this.desktopLoopDetector = new DesktopLoopDetector(); + } + + return { shortCircuitReason: 'ui_repair_esc' }; + } + + // Attempt 2: single-shot "close/dismiss click" (bounded). + if (DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED && !episode.closeClickUsed) { + episode.closeClickUsed = true; + const beforeHash = this.lastScreenshotHash; + + this.desktopUiRepairInProgress = true; + this.eventEmitter.emit('desktop.ui_repair.attempt', { + attempt: 'close_click', + trigger: input.trigger, + }); + + try { + // Capture screenshot for the classifier. + const screenshotToolUseId = `ui_repair_shot_${Date.now()}`; + const shotBlock = { + type: MessageContentType.ToolUse, + id: screenshotToolUseId, + name: 'computer_screenshot', + input: {}, + } as any; + + const shotResult = await handleComputerToolUse( + shotBlock, + this.logger, + actionContext, + ); + + const shotContent = Array.isArray((shotResult as any).content) + ? ((shotResult as any).content as any[]) + : []; + + const firstImage = shotContent.find( + (c) => + c && typeof c === 'object' && c.type === MessageContentType.Image, + ); + + const screenshotBase64 = + firstImage?.source && typeof firstImage.source.data === 'string' + ? (firstImage.source.data as string) + : null; + + if (!screenshotBase64) { + this.eventEmitter.emit('desktop.ui_repair.result', { + attempt: 'close_click', + outcome: 'failure_invalid', + trigger: input.trigger, }); + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + attempt2: { error: 'screenshot_missing' }, + }); + } - generatedToolResults.push({ - type: MessageContentType.ToolResult, - tool_use_id: block.id, - content: [ - { - type: MessageContentType.Text, - text: 'The task has been created', - }, - ], + const dims = decodePngDimensionsFromBase64(screenshotBase64); + if (!dims) { + this.eventEmitter.emit('desktop.ui_repair.result', { + attempt: 'close_click', + outcome: 'failure_invalid', + trigger: input.trigger, + }); + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + attempt2: { error: 'screenshot_dimensions_unavailable' }, }); } - if (isSetTaskStatusToolUseBlock(block)) { - setTaskStatusToolUseBlock = block; - - generatedToolResults.push({ - type: MessageContentType.ToolResult, - tool_use_id: block.id, - is_error: block.input.status === 'failed', - content: [ - { - type: MessageContentType.Text, - text: block.input.description, - }, - ], + const classifierSystem = buildUiRepairClassifierSystemPrompt(); + const classifierMessages = [ + buildUiRepairClassifierMessage({ + taskId: input.taskId, + screenshotBase64Png: screenshotBase64, + }), + ]; + + const classifierResponse = await input.service.generateMessage( + classifierSystem, + classifierMessages, + input.model.name, + { useTools: false, signal: this.abortController?.signal }, + ); + + const textBlocks = classifierResponse.contentBlocks.filter( + (b) => b.type === MessageContentType.Text, + ) as any[]; + const combinedText = textBlocks + .map((b) => b.text) + .join('\n') + .trim(); + + const parsed = parseUiRepairCandidate(combinedText); + if (!parsed.ok) { + this.eventEmitter.emit('desktop.ui_repair.result', { + attempt: 'close_click', + outcome: 'failure_invalid', + trigger: input.trigger, + }); + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + attempt2: { + error: 'classifier_parse_failed', + detail: parsed.error, + }, }); } - } - if (generatedToolResults.length > 0) { - await this.messagesService.create({ - content: generatedToolResults, - role: Role.USER, - taskId, + const validation = validateUiRepairCandidate({ + candidate: parsed.candidate, + dimensions: dims, + minConfidence: DESKTOP_LOOP_REPAIR_ATTEMPT2_MIN_CONFIDENCE, }); - } - // Update the task status after all tool results have been generated if we have a set task status tool use block - if (setTaskStatusToolUseBlock) { - switch (setTaskStatusToolUseBlock.input.status) { - case 'completed': - await this.tasksService.update(taskId, { - status: TaskStatus.COMPLETED, - completedAt: new Date(), - }); - break; - case 'needs_help': - await this.tasksService.update(taskId, { - status: TaskStatus.NEEDS_HELP, - }); - break; + if (!validation.ok) { + this.eventEmitter.emit('desktop.ui_repair.result', { + attempt: 'close_click', + outcome: 'failure_invalid', + trigger: input.trigger, + }); + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + attempt2: { error: 'candidate_rejected', detail: validation.error }, + }); } - } - // Schedule the next iteration without blocking - if (this.isProcessing) { - setImmediate(() => this.runIteration(taskId)); + // Execute exactly one click at the suggested point. + const clickToolUseId = `ui_repair_click_${Date.now()}`; + const clickBlock = { + type: MessageContentType.ToolUse, + id: clickToolUseId, + name: 'computer_click_mouse', + input: { + coordinates: { + x: Math.round(parsed.candidate.x), + y: Math.round(parsed.candidate.y), + }, + button: 'left', + clickCount: 1, + }, + } as any; + + const clickResult = await handleComputerToolUse( + clickBlock, + this.logger, + actionContext, + ); + + const clickContent = Array.isArray((clickResult as any).content) + ? ((clickResult as any).content as any[]) + : []; + + const clickImages = clickContent.filter( + (c) => + c && typeof c === 'object' && c.type === MessageContentType.Image, + ); + + input.generatedToolResults.push({ + type: MessageContentType.Text, + text: '[System] Desktop safety repair: attempted a single close/dismiss click for a blocking overlay. Replan from the updated screenshot.', + } as any); + + if (clickImages.length > 0) { + input.generatedToolResults.push(...(clickImages as any)); + } + + const afterHash = this.lastScreenshotHash; + const dist = + beforeHash && afterHash + ? hammingDistanceHex(beforeHash, afterHash) + : null; + const success = + typeof dist === 'number' && Number.isFinite(dist) + ? dist > DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING + : false; + + this.eventEmitter.emit('desktop.ui_repair.result', { + attempt: 'close_click', + outcome: success ? 'success' : 'failure_no_change', + trigger: input.trigger, + }); + + if (!success) { + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + attempt2: { + error: 'no_progress_after_click', + rationale: parsed.candidate.rationaleCode, + }, + }); + } + } finally { + this.desktopUiRepairInProgress = false; + this.desktopLoopDetector = new DesktopLoopDetector(); } - } catch (error: any) { - if (error?.name === 'BytebotAgentInterrupt') { - this.logger.warn(`Processing aborted for task ID: ${taskId}`); + + return { shortCircuitReason: 'ui_repair_close_click' }; + } + + // Repairs exhausted → takeover. + return takeover({ + trigger: input.trigger, + triggerDetails: input.triggerDetails, + repair: { + attempt2Enabled: DESKTOP_LOOP_REPAIR_ATTEMPT2_ENABLED, + }, + }); + } + + private handleDesktopSafetyAction(action: ActionResult): void { + if (this.pendingDesktopNeedsHelp) { + return; + } + + // Track last observed screenshot hash to gate "no progress" blocking and reset repair budgets. + const prevHash = this.lastScreenshotHash; + const nextHash = + typeof action.screenshotHash === 'string' ? action.screenshotHash : null; + + // Episode expiry: avoid holding "attempt used" state forever. + if ( + this.desktopUiRepairEpisode && + Date.now() - this.desktopUiRepairEpisode.startedAtMs > + DESKTOP_LOOP_REPAIR_EPISODE_MAX_MS + ) { + this.desktopUiRepairEpisode = null; + } + + // Confirm progress conservatively: require the UI to move away from the episode anchor AND + // remain stable for at least 2 observations. This prevents animated jitter from resetting + // the repair ladder and causing repeated Esc. + if (this.desktopUiRepairEpisode?.anchorHash && nextHash) { + const distFromAnchor = hammingDistanceHex( + this.desktopUiRepairEpisode.anchorHash, + nextHash, + ); + + if (distFromAnchor > DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING) { + const candidate = this.desktopUiRepairEpisode.progressCandidateHash; + if ( + candidate && + hammingDistanceHex(candidate, nextHash) <= + DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING + ) { + this.desktopUiRepairEpisode.progressCandidateCount++; + } else { + this.desktopUiRepairEpisode.progressCandidateHash = nextHash; + this.desktopUiRepairEpisode.progressCandidateCount = 1; + } + + if (this.desktopUiRepairEpisode.progressCandidateCount >= 2) { + this.blockedClickBuckets.clear(); + this.desktopUiRepairEpisode = null; + } } else { - this.logger.error( - `Error during task processing iteration for task ID: ${taskId} - ${error.message}`, - error.stack, + this.desktopUiRepairEpisode.progressCandidateHash = null; + this.desktopUiRepairEpisode.progressCandidateCount = 0; + } + } + + if (nextHash) { + this.lastScreenshotHash = nextHash; + } + + // If a click produced no meaningful change, block repeating the same click bucket until progress. + if ( + action.actionType === 'computer_click_mouse' && + action.coordinates && + prevHash && + nextHash + ) { + const dist = hammingDistanceHex(prevHash, nextHash); + if (dist <= DESKTOP_LOOP_NO_CHANGE_MAX_HAMMING) { + const bx = Math.floor( + action.coordinates.x / DESKTOP_LOOP_COORDINATE_BUCKET_PX, ); - await this.tasksService.update(taskId, { - status: TaskStatus.FAILED, + const by = Math.floor( + action.coordinates.y / DESKTOP_LOOP_COORDINATE_BUCKET_PX, + ); + const bucketKey = `${bx},${by}`; + const existing = this.blockedClickBuckets.get(bucketKey); + this.blockedClickBuckets.set(bucketKey, { + sinceHash: nextHash, + blockedAtMs: Date.now(), + count: (existing?.count || 0) + 1, }); - this.isProcessing = false; - this.currentTaskId = null; } } + + const isUiAffecting = DESKTOP_OBSERVATION_REQUIRED_TOOLS.has( + action.actionType, + ); + + const hasObservation = action.screenshotCaptured === true; + + if (hasObservation || action.actionType === 'computer_screenshot') { + this.desktopActionsWithoutObservation = 0; + } else if (isUiAffecting) { + this.desktopActionsWithoutObservation++; + } + + if ( + isUiAffecting && + this.desktopActionsWithoutObservation >= + DESKTOP_MAX_ACTIONS_WITHOUT_OBSERVATION + ) { + // Gold rule: do not continue “blind”. Instead of immediately interrupting, force an observation. + // If the forced screenshot fails, we fall back to the typed interrupt path. + this.pendingForcedScreenshot = true; + this.pendingForcedScreenshotReason = 'actions_budget'; + this.eventEmitter.emit('desktop.forced_screenshot', { + reason: 'actions_budget', + }); + return; + } + + if (!isUiAffecting || typeof action.actionSignature !== 'string') { + return; + } + + // During a self-repair action, do not schedule nested repairs/interrupts. + if (this.desktopUiRepairInProgress) { + return; + } + + const loop = this.desktopLoopDetector.record({ + atMs: Date.now(), + signature: action.actionSignature, + screenshotHash: action.screenshotHash ?? null, + }); + + if (loop.interrupt) { + this.eventEmitter.emit('desktop.loop.detected', { + rule: loop.rule || 'unknown', + }); + + // Schedule a bounded repair (executed in the tool loop). Avoid stacking multiple repairs. + if (!this.pendingDesktopRepairTrigger) { + this.pendingDesktopRepairTrigger = { + trigger: 'loop', + details: { + rule: loop.rule || 'unknown', + signature: loop.signature, + count: loop.count, + }, + }; + } + return; + } } async stopProcessing(): Promise { @@ -407,13 +1811,28 @@ export class AgentProcessor { return; } - this.logger.log(`Stopping execution of task ${this.currentTaskId}`); + const taskId = this.currentTaskId; + this.logger.log(`Stopping execution of task ${taskId}`); // Signal any in-flight async operations to abort this.abortController?.abort(); await this.inputCaptureService.stop(); + // Phase 6.4: Stop heartbeat and flush action logs + if (taskId) { + this.taskControllerService.stopHeartbeat(taskId); + + // Flush any pending action logs + try { + await this.actionLoggingService.flushActions(taskId); + } catch (error: any) { + this.logger.warn( + `Failed to flush action logs for ${taskId}: ${error.message}`, + ); + } + } + this.isProcessing = false; this.currentTaskId = null; } diff --git a/packages/bytebot-agent/src/agent/agent.scheduler.ts b/packages/bytebot-agent/src/agent/agent.scheduler.ts index 48815c7a2..ca4a0f4e9 100644 --- a/packages/bytebot-agent/src/agent/agent.scheduler.ts +++ b/packages/bytebot-agent/src/agent/agent.scheduler.ts @@ -1,10 +1,17 @@ import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; -import { Cron, CronExpression } from '@nestjs/schedule'; +import { Cron, CronExpression, Interval } from '@nestjs/schedule'; import { TasksService } from '../tasks/tasks.service'; import { AgentProcessor } from './agent.processor'; import { TaskStatus } from '@prisma/client'; import { writeFile } from './agent.computer-use'; +/** + * v2.2.5: Interval for lease renewal (in milliseconds) + * Should be less than the lease timeout to prevent expiration during processing + * Default: 60 seconds (lease timeout is 300 seconds) + */ +const LEASE_RENEWAL_INTERVAL_MS = 60000; + @Injectable() export class AgentScheduler implements OnModuleInit { private readonly logger = new Logger(AgentScheduler.name); @@ -37,9 +44,14 @@ export class AgentScheduler implements OnModuleInit { if (this.agentProcessor.isRunning()) { return; } - // Find the highest priority task to execute - const task = await this.tasksService.findNextTask(); + + // v2.2.3: Use atomic task claiming to prevent race conditions + // This replaces the previous findNextTask() + update() pattern which allowed + // multiple pods to claim the same task simultaneously + const task = await this.tasksService.claimNextTask(); + if (task) { + // Write any attached files to the desktop if (task.files.length > 0) { this.logger.debug( `Task ID: ${task.id} has files, writing them to the desktop`, @@ -52,12 +64,52 @@ export class AgentScheduler implements OnModuleInit { } } - await this.tasksService.update(task.id, { - status: TaskStatus.RUNNING, - executedAt: new Date(), - }); - this.logger.debug(`Processing task ID: ${task.id}`); + // Task is already marked as RUNNING by claimNextTask() + this.logger.debug(`Processing claimed task ID: ${task.id}`); this.agentProcessor.processTask(task.id); } } + + /** + * v2.2.5: Periodically renew the lease for the currently processing task. + * This prevents the task from being marked as orphaned while it's still being worked on. + * Runs every 60 seconds (less than the 5-minute lease timeout). + */ + @Interval(LEASE_RENEWAL_INTERVAL_MS) + async renewCurrentTaskLease() { + const currentTaskId = this.agentProcessor.getCurrentTaskId(); + const podName = process.env.POD_NAME || 'unknown'; + + if (!currentTaskId || !this.agentProcessor.isRunning()) { + return; + } + + this.logger.debug(`Renewing lease for current task ${currentTaskId}`); + const renewed = await this.tasksService.renewLease(currentTaskId, podName); + + if (!renewed) { + this.logger.warn( + `Failed to renew lease for task ${currentTaskId} - may have been claimed by another pod`, + ); + // Note: The processor will detect this on next iteration when task status changes + } + } + + /** + * v2.2.5: Recover orphaned tasks with expired leases. + * Runs every minute to check for tasks that were abandoned by crashed workers. + * + * Note: This is intentionally on a slower schedule than task claiming to reduce + * database load. Orphaned tasks are not time-critical since they've already failed. + */ + @Cron(CronExpression.EVERY_MINUTE) + async recoverOrphanedTasks() { + const recoveredIds = await this.tasksService.recoverOrphanedTasks(); + + if (recoveredIds.length > 0) { + this.logger.warn( + `Recovered ${recoveredIds.length} orphaned task(s): ${recoveredIds.join(', ')}`, + ); + } + } } diff --git a/packages/bytebot-agent/src/agent/agent.tools.ts b/packages/bytebot-agent/src/agent/agent.tools.ts index 85bb5b4a4..7393c7f71 100644 --- a/packages/bytebot-agent/src/agent/agent.tools.ts +++ b/packages/bytebot-agent/src/agent/agent.tools.ts @@ -181,7 +181,9 @@ export const _typeKeysTool = { export const _pressKeysTool = { name: 'computer_press_keys', description: - 'Presses or releases specific keys (useful for holding modifiers)', + 'Presses or releases specific keys (useful for holding modifiers). ' + + 'Only use this for modifier holds (Shift/Ctrl/Alt/Meta) and provide holdMs <= 750. ' + + 'For non-modifier keys like Enter/Tab/Escape/arrows, use computer_type_keys (tap/chord-tap) or computer_type_text (e.g. "\\n" for Enter).', input_schema: { type: 'object' as const, properties: { @@ -195,6 +197,15 @@ export const _pressKeysTool = { enum: ['up', 'down'], description: 'Whether to press down or release up', }, + holdMs: { + type: 'integer' as const, + description: + 'Optional hold duration in milliseconds for press="down". ' + + 'Only valid for modifier keys and will be capped at 750ms by the agent.', + nullable: true, + minimum: 0, + maximum: 750, + }, }, required: ['keys', 'press'], }, @@ -322,11 +333,46 @@ export const _setTaskStatusTool = { enum: ['completed', 'needs_help'], description: 'The status of the task', }, + errorCode: { + type: 'string' as const, + description: + 'Optional structured reason code when status="needs_help". ' + + 'Use a specific code when external input or takeover is truly required; ' + + 'do not use needs_help for strategy decisions (pick a reasonable default and continue).', + nullable: true, + enum: [ + 'GOAL_INTAKE_REQUIRED', + 'APPROVAL_REQUIRED', + 'DESKTOP_TAKEOVER_REQUIRED', + 'DISPATCHED_USER_PROMPT_STEP', + 'DESKTOP_NOT_ALLOWED', + 'LLM_EMPTY_RESPONSE', + 'TOOL_CONTRACT_VIOLATION', + 'LOOP_DETECTED_NO_PROGRESS', + 'UI_OBSERVATION_FAILED', + 'UI_BLOCKED_SIGNIN', + 'UI_BLOCKED_POPUP', + 'CAPABILITY_MISMATCH', + 'LLM_PROXY_DOWN', + 'MODEL_UNAVAILABLE', + 'WAITING_PROVIDER', + // Contract violation codes are emitted by the agent when the model response is malformed. + 'CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP', + 'CONTRACT_VIOLATION_STRATEGY_AS_HELP', + ], + }, description: { type: 'string' as const, description: 'If the task is completed, a summary of the task. If the task needs help, a description of the issue or clarification needed.', }, + details: { + type: 'object' as const, + description: + 'Optional structured details for status="needs_help". Must not include secrets.', + nullable: true, + additionalProperties: true, + }, }, required: ['status', 'description'], }, diff --git a/packages/bytebot-agent/src/agent/agent.types.ts b/packages/bytebot-agent/src/agent/agent.types.ts index 981ee0eb7..b3706de9d 100644 --- a/packages/bytebot-agent/src/agent/agent.types.ts +++ b/packages/bytebot-agent/src/agent/agent.types.ts @@ -1,5 +1,6 @@ import { Message } from '@prisma/client'; import { MessageContentBlock } from '@bytebot/shared'; +import { ToolPolicyContext } from './tool-policy'; export interface BytebotAgentResponse { contentBlocks: MessageContentBlock[]; @@ -15,11 +16,16 @@ export interface BytebotAgentService { systemPrompt: string, messages: Message[], model: string, - useTools: boolean, - signal?: AbortSignal, + options?: BytebotAgentGenerateMessageOptions, ): Promise; } +export interface BytebotAgentGenerateMessageOptions { + useTools?: boolean; + toolPolicy?: ToolPolicyContext; + signal?: AbortSignal; +} + export interface BytebotAgentModel { provider: 'anthropic' | 'openai' | 'google' | 'proxy'; name: string; diff --git a/packages/bytebot-agent/src/agent/execution-surface.spec.ts b/packages/bytebot-agent/src/agent/execution-surface.spec.ts new file mode 100644 index 000000000..3b5513e19 --- /dev/null +++ b/packages/bytebot-agent/src/agent/execution-surface.spec.ts @@ -0,0 +1,29 @@ +import { + isDispatchedUserPromptStep, + resolveExecutionSurface, + shouldAcquireDesktop, +} from './execution-surface'; + +describe('execution-surface helpers', () => { + it('resolves surface from explicit value or requiresDesktop default', () => { + expect(resolveExecutionSurface({ requiresDesktop: true })).toBe('DESKTOP'); + expect(resolveExecutionSurface({ requiresDesktop: false })).toBe('TEXT_ONLY'); + expect(resolveExecutionSurface({ requiresDesktop: false, executionSurface: 'DESKTOP' })).toBe('DESKTOP'); + expect(resolveExecutionSurface({ requiresDesktop: true, executionSurface: 'TEXT_ONLY' })).toBe('TEXT_ONLY'); + expect(resolveExecutionSurface({ requiresDesktop: true, executionSurface: 'invalid' })).toBe('DESKTOP'); + }); + + it('acquires desktop only when requiresDesktop && surface=DESKTOP && desktop tools used', () => { + expect(shouldAcquireDesktop({ requiresDesktop: true, surface: 'DESKTOP', hasDesktopToolUse: true })).toBe(true); + expect(shouldAcquireDesktop({ requiresDesktop: false, surface: 'DESKTOP', hasDesktopToolUse: true })).toBe(false); + expect(shouldAcquireDesktop({ requiresDesktop: true, surface: 'TEXT_ONLY', hasDesktopToolUse: true })).toBe(false); + expect(shouldAcquireDesktop({ requiresDesktop: true, surface: 'DESKTOP', hasDesktopToolUse: false })).toBe(false); + }); + + it('detects dispatched user-prompt steps via ASK_USER tool flag', () => { + expect(isDispatchedUserPromptStep({ allowedTools: ['ASK_USER'] })).toBe(true); + expect(isDispatchedUserPromptStep({ allowedTools: [] })).toBe(false); + expect(isDispatchedUserPromptStep({ allowedTools: null })).toBe(false); + }); +}); + diff --git a/packages/bytebot-agent/src/agent/execution-surface.ts b/packages/bytebot-agent/src/agent/execution-surface.ts new file mode 100644 index 000000000..bc7004f24 --- /dev/null +++ b/packages/bytebot-agent/src/agent/execution-surface.ts @@ -0,0 +1,28 @@ +export type ExecutionSurface = 'TEXT_ONLY' | 'DESKTOP'; + +export function parseExecutionSurface(value: unknown): ExecutionSurface | undefined { + if (value === 'TEXT_ONLY' || value === 'DESKTOP') return value; + return undefined; +} + +export function resolveExecutionSurface(input: { + requiresDesktop?: boolean; + executionSurface?: unknown; +}): ExecutionSurface { + const explicit = parseExecutionSurface(input.executionSurface); + if (explicit) return explicit; + return input.requiresDesktop ? 'DESKTOP' : 'TEXT_ONLY'; +} + +export function shouldAcquireDesktop(input: { + requiresDesktop: boolean; + surface: ExecutionSurface; + hasDesktopToolUse: boolean; +}): boolean { + return input.requiresDesktop && input.surface === 'DESKTOP' && input.hasDesktopToolUse; +} + +export function isDispatchedUserPromptStep(input: { allowedTools?: string[] | null }): boolean { + return (input.allowedTools || []).includes('ASK_USER'); +} + diff --git a/packages/bytebot-agent/src/agent/fixtures/desktop-loop-google-flights.jsonl b/packages/bytebot-agent/src/agent/fixtures/desktop-loop-google-flights.jsonl new file mode 100644 index 000000000..8266d7c9d --- /dev/null +++ b/packages/bytebot-agent/src/agent/fixtures/desktop-loop-google-flights.jsonl @@ -0,0 +1,12 @@ +{"tool":"computer_move_mouse","coordinates":{"x":765,"y":592},"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_click_mouse","coordinates":{"x":765,"y":592},"button":"left","clickCount":1,"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_move_mouse","coordinates":{"x":766,"y":593},"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_click_mouse","coordinates":{"x":766,"y":593},"button":"left","clickCount":1,"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_move_mouse","coordinates":{"x":765,"y":592},"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_click_mouse","coordinates":{"x":765,"y":592},"button":"left","clickCount":1,"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_move_mouse","coordinates":{"x":766,"y":593},"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_click_mouse","coordinates":{"x":766,"y":593},"button":"left","clickCount":1,"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_move_mouse","coordinates":{"x":765,"y":592},"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_click_mouse","coordinates":{"x":765,"y":592},"button":"left","clickCount":1,"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_move_mouse","coordinates":{"x":766,"y":593},"screenshotHash":"ffffffffffffffff"} +{"tool":"computer_click_mouse","coordinates":{"x":766,"y":593},"button":"left","clickCount":1,"screenshotHash":"ffffffffffffffff"} diff --git a/packages/bytebot-agent/src/agent/message-history.validator.ts b/packages/bytebot-agent/src/agent/message-history.validator.ts new file mode 100644 index 000000000..6361479b2 --- /dev/null +++ b/packages/bytebot-agent/src/agent/message-history.validator.ts @@ -0,0 +1,164 @@ +/** + * v2.2.5: Message History Validator + * + * Validates message history before sending to AI API to detect corruption. + * The most common corruption is orphaned tool_use blocks without corresponding + * tool_result blocks, which causes the Anthropic API to return: + * "tool_use ids were found without tool_result blocks immediately after" + * + * This corruption can occur due to: + * - Race conditions in task claiming (fixed in v2.2.4) + * - Worker crashes during tool execution + * - Network failures during message saving + * + * When corruption is detected, the task should be marked as FAILED rather than + * attempting repair, since the task state is unknown and user review is needed. + */ + +import { Logger } from '@nestjs/common'; +import { Message, Role } from '@prisma/client'; +import { MessageContentBlock, MessageContentType } from '@bytebot/shared'; + +export interface ValidationResult { + isValid: boolean; + errors: string[]; + orphanedToolUseIds: string[]; +} + +/** + * Validates message history for structural integrity. + * + * Checks: + * 1. Every tool_use block has a corresponding tool_result in the next user message + * 2. Message alternation is correct (user/assistant/user/assistant...) + * 3. No orphaned tool_result blocks (results without matching tool_use) + */ +export function validateMessageHistory( + messages: Array<{ role: Role; content: MessageContentBlock[] | unknown }>, + logger?: Logger, +): ValidationResult { + const errors: string[] = []; + const orphanedToolUseIds: string[] = []; + + if (!messages || messages.length === 0) { + return { isValid: true, errors: [], orphanedToolUseIds: [] }; + } + + for (let i = 0; i < messages.length; i++) { + const message = messages[i]; + const content = message.content as MessageContentBlock[]; + + // Skip messages without array content + if (!Array.isArray(content)) { + continue; + } + + if (message.role === Role.ASSISTANT) { + // Find all tool_use blocks in this assistant message + const toolUseBlocks = content.filter( + (block) => block.type === MessageContentType.ToolUse, + ); + + if (toolUseBlocks.length === 0) { + continue; + } + + // Check if there's a next message + const nextMessage = messages[i + 1]; + + if (!nextMessage) { + // Last message in history has tool_use blocks without results + // This is the most common case - task was interrupted + const ids = toolUseBlocks + .map((block: any) => block.id) + .filter((id: string) => id); + orphanedToolUseIds.push(...ids); + errors.push( + `Message at index ${i} has ${toolUseBlocks.length} tool_use block(s) without results (end of history)`, + ); + continue; + } + + if (nextMessage.role !== Role.USER) { + // Next message is not a user message (should contain tool_results) + const ids = toolUseBlocks + .map((block: any) => block.id) + .filter((id: string) => id); + orphanedToolUseIds.push(...ids); + errors.push( + `Message at index ${i} has tool_use blocks but next message (index ${i + 1}) is not a user message`, + ); + continue; + } + + // Check that each tool_use has a corresponding tool_result + const nextContent = nextMessage.content as MessageContentBlock[]; + if (!Array.isArray(nextContent)) { + const ids = toolUseBlocks + .map((block: any) => block.id) + .filter((id: string) => id); + orphanedToolUseIds.push(...ids); + errors.push( + `Message at index ${i} has tool_use blocks but next message has invalid content`, + ); + continue; + } + + const toolResultIds = new Set( + nextContent + .filter((block) => block.type === MessageContentType.ToolResult) + .map((block: any) => block.tool_use_id), + ); + + for (const toolUse of toolUseBlocks) { + const toolUseId = (toolUse as any).id; + if (toolUseId && !toolResultIds.has(toolUseId)) { + orphanedToolUseIds.push(toolUseId); + errors.push( + `Tool use ${toolUseId} at message index ${i} has no corresponding tool_result`, + ); + } + } + } + } + + const isValid = errors.length === 0; + + if (!isValid && logger) { + logger.warn( + `Message history validation failed: ${errors.length} error(s) found`, + ); + for (const error of errors) { + logger.warn(` - ${error}`); + } + } + + return { + isValid, + errors, + orphanedToolUseIds, + }; +} + +/** + * Formats validation errors into a human-readable error message for task failure. + */ +export function formatValidationError(result: ValidationResult): string { + if (result.isValid) { + return ''; + } + + const errorCount = result.errors.length; + const orphanedCount = result.orphanedToolUseIds.length; + + let message = `Message history corrupted: ${errorCount} validation error(s) detected.`; + + if (orphanedCount > 0) { + message += ` Found ${orphanedCount} orphaned tool_use block(s) without tool_result responses.`; + } + + message += ' This may have occurred due to a worker crash or race condition during task processing.'; + message += ' Please create a new task to retry.'; + + return message; +} diff --git a/packages/bytebot-agent/src/agent/needs-help.spec.ts b/packages/bytebot-agent/src/agent/needs-help.spec.ts new file mode 100644 index 000000000..7a1413546 --- /dev/null +++ b/packages/bytebot-agent/src/agent/needs-help.spec.ts @@ -0,0 +1,37 @@ +import { buildNeedsHelpResult, parseNeedsHelpErrorCode } from './needs-help'; + +describe('needs-help helpers', () => { + it('uses default message when message is empty', () => { + const result = buildNeedsHelpResult({ + errorCode: 'LLM_EMPTY_RESPONSE', + message: ' ', + }); + + expect(result.errorCode).toBe('LLM_EMPTY_RESPONSE'); + expect(result.message).toBe('LLM returned an empty response.'); + }); + + it('passes through details when provided', () => { + const result = buildNeedsHelpResult({ + errorCode: 'DESKTOP_NOT_ALLOWED', + message: 'Custom', + details: { executionSurface: 'TEXT_ONLY' }, + }); + + expect(result).toEqual({ + errorCode: 'DESKTOP_NOT_ALLOWED', + message: 'Custom', + details: { executionSurface: 'TEXT_ONLY' }, + }); + }); + + it('parses known error codes and rejects unknowns', () => { + expect(parseNeedsHelpErrorCode('UI_BLOCKED_SIGNIN')).toBe('UI_BLOCKED_SIGNIN'); + expect(parseNeedsHelpErrorCode(' UI_BLOCKED_POPUP ')).toBe('UI_BLOCKED_POPUP'); + expect(parseNeedsHelpErrorCode('CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP')).toBe( + 'CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP', + ); + expect(parseNeedsHelpErrorCode('NOT_A_REAL_CODE')).toBeNull(); + expect(parseNeedsHelpErrorCode(null)).toBeNull(); + }); +}); diff --git a/packages/bytebot-agent/src/agent/needs-help.ts b/packages/bytebot-agent/src/agent/needs-help.ts new file mode 100644 index 000000000..ff3b64dcf --- /dev/null +++ b/packages/bytebot-agent/src/agent/needs-help.ts @@ -0,0 +1,114 @@ +export type NeedsHelpErrorCode = + | 'GOAL_INTAKE_REQUIRED' + | 'APPROVAL_REQUIRED' + | 'DESKTOP_TAKEOVER_REQUIRED' + | 'DISPATCHED_USER_PROMPT_STEP' + | 'DESKTOP_NOT_ALLOWED' + | 'LLM_EMPTY_RESPONSE' + | 'TOOL_CONTRACT_VIOLATION' + | 'LOOP_DETECTED_NO_PROGRESS' + | 'UI_OBSERVATION_FAILED' + | 'UI_BLOCKED_SIGNIN' + | 'UI_BLOCKED_POPUP' + | 'CAPABILITY_MISMATCH' + | 'LLM_PROXY_DOWN' + | 'MODEL_UNAVAILABLE' + | 'WAITING_PROVIDER' + | 'CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP' + | 'CONTRACT_VIOLATION_STRATEGY_AS_HELP' + // Deprecated: do not emit. Kept for backward compatibility with older models. + | 'AGENT_REQUESTED_HELP'; + +export type NeedsHelpResult = { + errorCode: NeedsHelpErrorCode; + message: string; + details?: Record; +}; + +const NEEDS_HELP_ERROR_CODE_SET = new Set([ + 'GOAL_INTAKE_REQUIRED', + 'APPROVAL_REQUIRED', + 'DESKTOP_TAKEOVER_REQUIRED', + 'DISPATCHED_USER_PROMPT_STEP', + 'DESKTOP_NOT_ALLOWED', + 'LLM_EMPTY_RESPONSE', + 'TOOL_CONTRACT_VIOLATION', + 'LOOP_DETECTED_NO_PROGRESS', + 'UI_OBSERVATION_FAILED', + 'UI_BLOCKED_SIGNIN', + 'UI_BLOCKED_POPUP', + 'CAPABILITY_MISMATCH', + 'LLM_PROXY_DOWN', + 'MODEL_UNAVAILABLE', + 'WAITING_PROVIDER', + 'CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP', + 'CONTRACT_VIOLATION_STRATEGY_AS_HELP', + 'AGENT_REQUESTED_HELP', +]); + +export function parseNeedsHelpErrorCode(value: unknown): NeedsHelpErrorCode | null { + if (typeof value !== 'string') return null; + const trimmed = value.trim(); + if (!trimmed) return null; + return NEEDS_HELP_ERROR_CODE_SET.has(trimmed as NeedsHelpErrorCode) + ? (trimmed as NeedsHelpErrorCode) + : null; +} + +const DEFAULT_MESSAGES: Record = { + GOAL_INTAKE_REQUIRED: + 'This task requires additional user-provided details before it can proceed.', + APPROVAL_REQUIRED: + 'This task requires explicit user approval before it can proceed.', + DESKTOP_TAKEOVER_REQUIRED: + 'This task requires human takeover to proceed (e.g., sign-in, MFA, CAPTCHA, or other UI blocker).', + DISPATCHED_USER_PROMPT_STEP: + 'This task requires user input and must not be executed by the agent.', + DESKTOP_NOT_ALLOWED: + 'Desktop tools were requested, but this task is configured for TEXT_ONLY execution.', + LLM_EMPTY_RESPONSE: 'LLM returned an empty response.', + TOOL_CONTRACT_VIOLATION: + 'The desktop automation tool contract was violated repeatedly; execution paused for safety.', + LOOP_DETECTED_NO_PROGRESS: + 'Desktop automation appears stuck in a no-progress loop; execution paused for safety.', + UI_OBSERVATION_FAILED: + 'Unable to reliably observe the desktop state (screenshots missing); execution paused for safety.', + UI_BLOCKED_SIGNIN: + 'The UI appears blocked by a sign-in flow that requires human action.', + UI_BLOCKED_POPUP: + 'The UI appears blocked by a popup/modal that requires human action.', + CAPABILITY_MISMATCH: + 'The task requested capabilities that are not available on this agent/runtime.', + LLM_PROXY_DOWN: + 'The LLM gateway/proxy is unreachable; execution paused while waiting for provider recovery.', + MODEL_UNAVAILABLE: + 'The requested model is unavailable; execution paused while waiting for capacity.', + WAITING_PROVIDER: + 'The required provider/model capacity is unavailable; execution paused while waiting.', + CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP: + 'Agent returned NEEDS_HELP without a valid errorCode; treating as a contract violation.', + CONTRACT_VIOLATION_STRATEGY_AS_HELP: + 'Agent attempted to ask a strategy question via NEEDS_HELP; treating as a contract violation.', + AGENT_REQUESTED_HELP: + 'Agent requested help (deprecated); treating as a contract violation.', +}; + +export function buildNeedsHelpResult(input: { + errorCode: NeedsHelpErrorCode; + message?: string | null; + details?: Record | null; +}): NeedsHelpResult { + const message = + typeof input.message === 'string' && input.message.trim().length > 0 + ? input.message.trim() + : DEFAULT_MESSAGES[input.errorCode]; + + const details = + input.details && typeof input.details === 'object' ? input.details : undefined; + + return { + errorCode: input.errorCode, + message, + ...(details ? { details } : {}), + }; +} diff --git a/packages/bytebot-agent/src/agent/tool-policy.ts b/packages/bytebot-agent/src/agent/tool-policy.ts new file mode 100644 index 000000000..92c78caaa --- /dev/null +++ b/packages/bytebot-agent/src/agent/tool-policy.ts @@ -0,0 +1,75 @@ +export type ExecutionSurface = 'TEXT_ONLY' | 'DESKTOP'; + +export interface ToolPolicyContext { + requiresDesktop?: boolean | null; + executionSurface?: ExecutionSurface | string | null; + gatewayToolsOnly?: boolean | null; + allowedTools?: string[] | null; +} + +const ALWAYS_ALLOWED_TOOL_NAMES = new Set(['set_task_status', 'create_task']); + +function normalizeSurface(value: unknown): ExecutionSurface { + return value === 'DESKTOP' ? 'DESKTOP' : 'TEXT_ONLY'; +} + +function normalizeAllowedTools(allowedTools?: string[] | null): string[] { + if (!Array.isArray(allowedTools)) return []; + return allowedTools + .filter((t): t is string => typeof t === 'string') + .map((t) => t.trim()) + .filter(Boolean); +} + +function isDesktopToolName(toolName: string): boolean { + return toolName.startsWith('computer_'); +} + +function isRecognizedToolToken(token: string): boolean { + return ( + token === 'computer' || + token === 'set_task_status' || + token === 'create_task' || + token.startsWith('computer_') + ); +} + +export function filterToolsByPolicy( + tools: T[], + getName: (tool: T) => string, + context?: ToolPolicyContext | null, +): T[] { + if (!context) return tools; + + const requiresDesktop = Boolean(context.requiresDesktop); + const executionSurface = normalizeSurface(context.executionSurface); + const gatewayToolsOnly = Boolean(context.gatewayToolsOnly); + + // Text-only means text-only: never expose desktop tools unless BOTH are true: + // - requiresDesktop=true + // - surface=DESKTOP + const allowDesktopTools = requiresDesktop && executionSurface === 'DESKTOP' && !gatewayToolsOnly; + + const allowedTools = normalizeAllowedTools(context.allowedTools); + const recognizedAllowList = allowedTools.filter(isRecognizedToolToken); + const enforceAllowList = recognizedAllowList.length > 0; + + return tools.filter((tool) => { + const name = getName(tool); + + if (ALWAYS_ALLOWED_TOOL_NAMES.has(name)) return true; + + const isDesktop = isDesktopToolName(name); + if (isDesktop) { + if (!allowDesktopTools) return false; + if (!enforceAllowList) return true; + return ( + recognizedAllowList.includes('computer') || recognizedAllowList.includes(name) + ); + } + + if (!enforceAllowList) return true; + return recognizedAllowList.includes(name); + }); +} + diff --git a/packages/bytebot-agent/src/anthropic/anthropic.service.ts b/packages/bytebot-agent/src/anthropic/anthropic.service.ts index 78f1b94e1..2e33e23b3 100644 --- a/packages/bytebot-agent/src/anthropic/anthropic.service.ts +++ b/packages/bytebot-agent/src/anthropic/anthropic.service.ts @@ -16,9 +16,11 @@ import { Message, Role } from '@prisma/client'; import { anthropicTools } from './anthropic.tools'; import { BytebotAgentService, + BytebotAgentGenerateMessageOptions, BytebotAgentInterrupt, BytebotAgentResponse, } from '../agent/agent.types'; +import { filterToolsByPolicy } from '../agent/tool-policy'; @Injectable() export class AnthropicService implements BytebotAgentService { @@ -34,28 +36,55 @@ export class AnthropicService implements BytebotAgentService { ); } + // v2.2.8: Re-enable SDK retries for transient error resilience. + // The SDK retries connection errors, 408, 409, 429, and 5xx with exponential backoff. + // Duplicate responses are handled by the idempotency check in MessagesService.create() + // which detects duplicate tool_use IDs before persisting to database. + // See: 2025-12-09-race-condition-duplicate-llm-calls-fix.md this.anthropic = new Anthropic({ apiKey: apiKey || 'dummy-key-for-initialization', + maxRetries: 2, // SDK default: 2 retries with exponential backoff }); + + this.logger.log('AnthropicService initialized with maxRetries: 2'); } async generateMessage( systemPrompt: string, messages: Message[], model: string = DEFAULT_MODEL.name, - useTools: boolean = true, - signal?: AbortSignal, + options: BytebotAgentGenerateMessageOptions = {}, ): Promise { + const useTools = options.useTools ?? true; + const signal = options.signal; + + // v2.2.7: Generate unique request ID for tracing + const requestId = `llm-${Date.now()}-${Math.random().toString(36).substring(7)}`; + const startTime = Date.now(); + try { const maxTokens = 8192; // Convert our message content blocks to Anthropic's expected format const anthropicMessages = this.formatMessagesForAnthropic(messages); - // add cache_control to last tool - anthropicTools[anthropicTools.length - 1].cache_control = { - type: 'ephemeral', - }; + const tools = useTools + ? filterToolsByPolicy( + anthropicTools.map((tool) => ({ ...tool })), + (tool) => tool.name, + options.toolPolicy, + ) + : []; + + // Add cache_control to the last tool (best-effort, do not mutate shared definitions) + if (tools.length > 0) { + (tools[tools.length - 1] as any).cache_control = { type: 'ephemeral' }; + } + + // v2.2.7: Log before LLM call for debugging duplicate call issues + this.logger.debug( + `[${requestId}] Starting LLM call: model=${model}, messages=${messages.length}, useTools=${useTools}`, + ); // Make the API call const response = await this.anthropic.messages.create( @@ -71,11 +100,23 @@ export class AnthropicService implements BytebotAgentService { }, ], messages: anthropicMessages, - tools: useTools ? anthropicTools : [], + tools, }, { signal }, ); + const elapsed = Date.now() - startTime; + + // v2.2.7: Log response details for debugging + // Extract tool_use IDs if present for tracking + const toolUseIds = response.content + .filter((block) => block.type === 'tool_use') + .map((block: any) => block.id); + + this.logger.debug( + `[${requestId}] LLM call completed: elapsed=${elapsed}ms, blocks=${response.content.length}, toolUseIds=[${toolUseIds.join(', ')}]`, + ); + // Convert Anthropic's response to our message content blocks format return { contentBlocks: this.formatAnthropicResponse(response.content), @@ -87,14 +128,17 @@ export class AnthropicService implements BytebotAgentService { }, }; } catch (error) { - this.logger.log(error); + const elapsed = Date.now() - startTime; + this.logger.warn( + `[${requestId}] LLM call failed after ${elapsed}ms: ${error.message}`, + ); if (error instanceof APIUserAbortError) { - this.logger.log('Anthropic API call aborted'); + this.logger.log(`[${requestId}] Anthropic API call aborted`); throw new BytebotAgentInterrupt(); } this.logger.error( - `Error sending message to Anthropic: ${error.message}`, + `[${requestId}] Error sending message to Anthropic: ${error.message}`, error.stack, ); throw error; diff --git a/packages/bytebot-agent/src/app.module.ts b/packages/bytebot-agent/src/app.module.ts index 95f84a442..50e925341 100644 --- a/packages/bytebot-agent/src/app.module.ts +++ b/packages/bytebot-agent/src/app.module.ts @@ -13,6 +13,11 @@ import { ScheduleModule } from '@nestjs/schedule'; import { EventEmitterModule } from '@nestjs/event-emitter'; import { SummariesModule } from './summaries/summaries.modue'; import { ProxyModule } from './proxy/proxy.module'; +import { LLMResilienceModule } from './llm-resilience/llm-resilience.module'; +import { WorkspaceModule } from './workspace/workspace.module'; +import { GatewayModule } from './gateway/gateway.module'; +import { ToolsModule } from './tools/tools.module'; +import { MetricsModule } from './metrics/metrics.module'; @Module({ imports: [ @@ -29,7 +34,12 @@ import { ProxyModule } from './proxy/proxy.module'; OpenAIModule, GoogleModule, ProxyModule, + LLMResilienceModule, // v2.5.0: Retry logic for LLM API calls PrismaModule, + WorkspaceModule, // v2.3.0 M4: Workspace-aware desktop resolution and locking + GatewayModule, // v2.3.0 M4: Butler Service Gateway integration + ToolsModule, // v2.3.0 M4: Unified tool execution and routing + MetricsModule, ], controllers: [AppController], providers: [AppService], diff --git a/packages/bytebot-agent/src/gateway/gateway.module.ts b/packages/bytebot-agent/src/gateway/gateway.module.ts new file mode 100644 index 000000000..1da744bc3 --- /dev/null +++ b/packages/bytebot-agent/src/gateway/gateway.module.ts @@ -0,0 +1,16 @@ +/** + * Gateway Module + * v2.3.0 M4: Provides Butler Service Gateway integration for non-desktop tools + */ + +import { Module, Global } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { GatewayService } from './gateway.service'; + +@Global() +@Module({ + imports: [ConfigModule], + providers: [GatewayService], + exports: [GatewayService], +}) +export class GatewayModule {} diff --git a/packages/bytebot-agent/src/gateway/gateway.service.ts b/packages/bytebot-agent/src/gateway/gateway.service.ts new file mode 100644 index 000000000..5efd226a2 --- /dev/null +++ b/packages/bytebot-agent/src/gateway/gateway.service.ts @@ -0,0 +1,572 @@ +/** + * Butler Service Gateway Client + * v2.3.0 M4: Integrates with Butler Service Gateway for non-desktop tools + * + * The Butler Service Gateway provides 25+ pre-integrated tools via unified REST API: + * - search_web_search: Web search + * - weather_get_current: Weather data + * - communications_send_email: Email (high-risk, requires approval) + * - communications_send_sms: SMS (high-risk, requires approval) + * - calendar_create_event: Calendar management + * - notes_create: Note taking + * - And more... + * + * For Product 2 Workflows, the agent can use these gateway tools without + * needing desktop access, allowing parallel execution across nodes. + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; + +/** + * Gateway tool execution request + */ +export interface GatewayToolRequest { + toolName: string; + parameters: Record; + taskId?: string; + nodeRunId?: string; + workspaceId?: string; +} + +/** + * Gateway tool execution result + */ +export interface GatewayToolResult { + success: boolean; + result?: any; + error?: string; + executionTimeMs: number; + requiresApproval?: boolean; + approvalRequestId?: string; +} + +/** + * Gateway tool definition interface (mutable version for filtering) + */ +export interface GatewayToolDefinition { + name: string; + description: string; + category: string; + highRisk: boolean; +} + +/** + * Available gateway tools (from Butler Service Gateway) + */ +export const GATEWAY_TOOLS = { + // Search tools + search_web_search: { + name: 'search_web_search', + description: 'Search the web for information', + category: 'search', + highRisk: false, + }, + search_news: { + name: 'search_news', + description: 'Search for news articles', + category: 'search', + highRisk: false, + }, + + // Weather tools + weather_get_current: { + name: 'weather_get_current', + description: 'Get current weather for a location', + category: 'weather', + highRisk: false, + }, + weather_get_forecast: { + name: 'weather_get_forecast', + description: 'Get weather forecast for a location', + category: 'weather', + highRisk: false, + }, + + // Communication tools (HIGH RISK - require approval) + communications_send_email: { + name: 'communications_send_email', + description: 'Send an email to a recipient', + category: 'communications', + highRisk: true, + }, + communications_send_sms: { + name: 'communications_send_sms', + description: 'Send an SMS message', + category: 'communications', + highRisk: true, + }, + + // Calendar tools + calendar_list_events: { + name: 'calendar_list_events', + description: 'List calendar events', + category: 'calendar', + highRisk: false, + }, + calendar_create_event: { + name: 'calendar_create_event', + description: 'Create a calendar event', + category: 'calendar', + highRisk: false, + }, + calendar_delete_event: { + name: 'calendar_delete_event', + description: 'Delete a calendar event', + category: 'calendar', + highRisk: true, + }, + + // Notes tools + notes_create: { + name: 'notes_create', + description: 'Create a new note', + category: 'notes', + highRisk: false, + }, + notes_list: { + name: 'notes_list', + description: 'List notes', + category: 'notes', + highRisk: false, + }, + notes_delete: { + name: 'notes_delete', + description: 'Delete a note', + category: 'notes', + highRisk: true, + }, + + // Document tools + document_parse: { + name: 'document_parse', + description: 'Parse and extract content from a document', + category: 'document', + highRisk: false, + }, + document_summarize: { + name: 'document_summarize', + description: 'Summarize a document', + category: 'document', + highRisk: false, + }, + + // Data tools + data_extract: { + name: 'data_extract', + description: 'Extract structured data from text', + category: 'data', + highRisk: false, + }, + data_transform: { + name: 'data_transform', + description: 'Transform data between formats', + category: 'data', + highRisk: false, + }, + + // File tools + file_read: { + name: 'file_read', + description: 'Read a file from workspace storage', + category: 'file', + highRisk: false, + }, + file_write: { + name: 'file_write', + description: 'Write a file to workspace storage', + category: 'file', + highRisk: false, + }, + file_list: { + name: 'file_list', + description: 'List files in workspace storage', + category: 'file', + highRisk: false, + }, + + // Integration tools + integration_webhook: { + name: 'integration_webhook', + description: 'Send data to a webhook', + category: 'integration', + highRisk: true, + }, + integration_api_call: { + name: 'integration_api_call', + description: 'Make an API call to an external service', + category: 'integration', + highRisk: true, + }, +} as const; + +export type GatewayToolName = keyof typeof GATEWAY_TOOLS; + +@Injectable() +export class GatewayService { + private readonly logger = new Logger(GatewayService.name); + private readonly gatewayUrl: string; + private readonly internalToken: string; + private readonly timeoutMs: number; + + constructor(private readonly configService: ConfigService) { + // Butler Service Gateway URL + this.gatewayUrl = this.configService.get( + 'BUTLER_GATEWAY_URL', + 'http://butler-gateway:8080', + ); + + // Internal service token for authenticated requests + this.internalToken = this.configService.get( + 'INTERNAL_SERVICE_TOKEN', + '', + ); + + // Tool execution timeout (default 30 seconds) + this.timeoutMs = parseInt( + this.configService.get('GATEWAY_TOOL_TIMEOUT_MS', '30000'), + 10, + ); + + this.logger.log(`Butler Gateway URL: ${this.gatewayUrl}`); + } + + /** + * Check if a tool is a gateway tool + */ + isGatewayTool(toolName: string): boolean { + return toolName in GATEWAY_TOOLS; + } + + /** + * Check if a tool is high-risk and requires approval + */ + isHighRiskTool(toolName: string): boolean { + const tool = GATEWAY_TOOLS[toolName as GatewayToolName]; + return tool?.highRisk ?? false; + } + + /** + * Get tool definition + */ + getToolDefinition(toolName: string): (typeof GATEWAY_TOOLS)[GatewayToolName] | undefined { + return GATEWAY_TOOLS[toolName as GatewayToolName]; + } + + /** + * Get all available gateway tools + */ + getAllTools(): (typeof GATEWAY_TOOLS)[GatewayToolName][] { + return Object.values(GATEWAY_TOOLS); + } + + /** + * Filter tools by allowed list and high-risk list + * Returns mutable GatewayToolDefinition[] since highRisk may be modified + */ + filterTools( + allowedTools?: string[], + highRiskTools?: string[], + ): GatewayToolDefinition[] { + // Convert to mutable array + let tools: GatewayToolDefinition[] = this.getAllTools().map((tool) => ({ + name: tool.name, + description: tool.description, + category: tool.category, + highRisk: tool.highRisk, + })); + + // Filter by allowed list if provided + if (allowedTools && allowedTools.length > 0) { + tools = tools.filter((tool) => allowedTools.includes(tool.name)); + } + + // Mark high-risk tools based on workflow configuration + if (highRiskTools && highRiskTools.length > 0) { + tools = tools.map((tool) => ({ + ...tool, + highRisk: tool.highRisk || highRiskTools.includes(tool.name), + })); + } + + return tools; + } + + /** + * Execute a gateway tool + */ + async executeTool(request: GatewayToolRequest): Promise { + const startTime = Date.now(); + + if (!this.isGatewayTool(request.toolName)) { + return { + success: false, + error: `Unknown gateway tool: ${request.toolName}`, + executionTimeMs: Date.now() - startTime, + }; + } + + try { + const url = `${this.gatewayUrl}/api/v1/tools/${request.toolName}/execute`; + this.logger.log(`Executing gateway tool: ${request.toolName}`); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs); + + try { + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + ...(request.taskId && { 'X-Task-Id': request.taskId }), + ...(request.nodeRunId && { 'X-Node-Run-Id': request.nodeRunId }), + ...(request.workspaceId && { 'X-Workspace-Id': request.workspaceId }), + }, + body: JSON.stringify({ + parameters: request.parameters, + }), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + const executionTimeMs = Date.now() - startTime; + + if (!response.ok) { + const error = await response.json().catch(() => ({ message: 'Unknown error' })); + + // Handle approval-required response (HTTP 202) + if (response.status === 202) { + return { + success: false, + requiresApproval: true, + approvalRequestId: error.approvalRequestId, + error: error.message || 'This action requires human approval', + executionTimeMs, + }; + } + + return { + success: false, + error: error.message || `Gateway returned ${response.status}`, + executionTimeMs, + }; + } + + const result = await response.json(); + + this.logger.log( + `Gateway tool ${request.toolName} completed in ${executionTimeMs}ms`, + ); + + return { + success: true, + result: result.data, + executionTimeMs, + }; + } finally { + clearTimeout(timeoutId); + } + } catch (error: any) { + const executionTimeMs = Date.now() - startTime; + + if (error.name === 'AbortError') { + return { + success: false, + error: `Tool execution timed out after ${this.timeoutMs}ms`, + executionTimeMs, + }; + } + + this.logger.error( + `Gateway tool ${request.toolName} failed: ${error.message}`, + ); + + return { + success: false, + error: error.message, + executionTimeMs, + }; + } + } + + /** + * Check approval status for a pending action + * v2.3.0 M5: Updated to use orchestrator's approval API + */ + async checkApprovalStatus(approvalRequestId: string): Promise<{ + status: 'pending' | 'approved' | 'rejected' | 'expired'; + approvedAt?: string; + rejectedAt?: string; + reason?: string; + }> { + try { + // Use orchestrator URL for approval checks + const orchestratorUrl = this.configService.get( + 'WORKFLOW_ORCHESTRATOR_URL', + '', + ); + + if (!orchestratorUrl) { + this.logger.warn('WORKFLOW_ORCHESTRATOR_URL not set, cannot check approval'); + return { status: 'pending' }; + } + + const url = `${orchestratorUrl}/api/v1/approvals/${approvalRequestId}`; + + const response = await fetch(url, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + }); + + if (!response.ok) { + throw new Error(`Failed to check approval status: ${response.status}`); + } + + const data = await response.json(); + const approval = data.approval; + + // Map status to expected format + const statusMap: Record = { + 'PENDING': 'pending', + 'APPROVED': 'approved', + 'REJECTED': 'rejected', + 'EXPIRED': 'expired', + }; + + return { + status: statusMap[approval.status] || 'pending', + approvedAt: approval.decision?.at, + rejectedAt: approval.decision?.at, + reason: approval.decision?.reason, + }; + } catch (error: any) { + this.logger.error(`Failed to check approval status: ${error.message}`); + return { status: 'pending' }; + } + } + + /** + * Request approval for a high-risk action + * v2.3.0 M5: Creates approval request in orchestrator + */ + async requestApproval(request: { + nodeRunId: string; + workspaceId: string; + tenantId: string; + toolName: string; + toolParams: Record; + currentUrl?: string; + aiReasoning?: string; + }): Promise<{ + approvalRequestId: string; + message: string; + expiresAt: string; + } | null> { + try { + const orchestratorUrl = this.configService.get( + 'WORKFLOW_ORCHESTRATOR_URL', + '', + ); + + if (!orchestratorUrl) { + this.logger.warn('WORKFLOW_ORCHESTRATOR_URL not set, cannot request approval'); + return null; + } + + const url = `${orchestratorUrl}/api/v1/approvals/request`; + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + body: JSON.stringify({ + nodeRunId: request.nodeRunId, + workspaceId: request.workspaceId, + tenantId: request.tenantId, + toolName: request.toolName, + toolParams: request.toolParams, + currentUrl: request.currentUrl, + aiReasoning: request.aiReasoning, + }), + }); + + if (!response.ok) { + const error = await response.json().catch(() => ({ message: 'Unknown error' })); + throw new Error(error.message || `Request failed: ${response.status}`); + } + + const data = await response.json(); + + this.logger.log(`Created approval request: ${data.approval.id} for ${request.toolName}`); + + return { + approvalRequestId: data.approval.id, + message: 'Action requires human approval', + expiresAt: data.approval.expiresAt, + }; + } catch (error: any) { + this.logger.error(`Failed to request approval: ${error.message}`); + return null; + } + } + + /** + * Execute a tool that has been approved + */ + async executeApprovedTool( + approvalRequestId: string, + request: GatewayToolRequest, + ): Promise { + const startTime = Date.now(); + + try { + const url = `${this.gatewayUrl}/api/v1/tools/${request.toolName}/execute`; + this.logger.log(`Executing approved tool: ${request.toolName}`); + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + 'X-Approval-Id': approvalRequestId, // Pass approval ID for audit + }, + body: JSON.stringify({ + parameters: request.parameters, + }), + }); + + const executionTimeMs = Date.now() - startTime; + + if (!response.ok) { + const error = await response.json().catch(() => ({ message: 'Unknown error' })); + return { + success: false, + error: error.message || `Execution failed: ${response.status}`, + executionTimeMs, + }; + } + + const result = await response.json(); + + return { + success: true, + result: result.data, + executionTimeMs, + }; + } catch (error: any) { + const executionTimeMs = Date.now() - startTime; + return { + success: false, + error: error.message, + executionTimeMs, + }; + } + } +} diff --git a/packages/bytebot-agent/src/google/google.service.ts b/packages/bytebot-agent/src/google/google.service.ts index 42c806cfd..e46750a56 100644 --- a/packages/bytebot-agent/src/google/google.service.ts +++ b/packages/bytebot-agent/src/google/google.service.ts @@ -12,9 +12,11 @@ import { } from '@bytebot/shared'; import { BytebotAgentService, + BytebotAgentGenerateMessageOptions, BytebotAgentInterrupt, BytebotAgentResponse, } from '../agent/agent.types'; +import { filterToolsByPolicy } from '../agent/tool-policy'; import { Message, Role } from '@prisma/client'; import { googleTools } from './google.tools'; import { @@ -49,15 +51,30 @@ export class GoogleService implements BytebotAgentService { systemPrompt: string, messages: Message[], model: string = DEFAULT_MODEL.name, - useTools: boolean = true, - signal?: AbortSignal, + options: BytebotAgentGenerateMessageOptions = {}, ): Promise { + const useTools = options.useTools ?? true; + const signal = options.signal; + try { const maxTokens = 8192; // Convert our message content blocks to Anthropic's expected format const googleMessages = this.formatMessagesForGoogle(messages); + const googleToolsWithNames = googleTools.filter( + (tool): tool is (typeof googleTools)[number] & { name: string } => + typeof tool.name === 'string' && tool.name.trim().length > 0, + ); + + const functionDeclarations = useTools + ? filterToolsByPolicy( + googleToolsWithNames, + (tool) => tool.name, + options.toolPolicy, + ) + : []; + const response: GenerateContentResponse = await this.google.models.generateContent({ model, @@ -71,7 +88,7 @@ export class GoogleService implements BytebotAgentService { tools: useTools ? [ { - functionDeclarations: googleTools, + functionDeclarations, }, ] : [], diff --git a/packages/bytebot-agent/src/llm-resilience/llm-resilience.module.ts b/packages/bytebot-agent/src/llm-resilience/llm-resilience.module.ts new file mode 100644 index 000000000..c485bab1a --- /dev/null +++ b/packages/bytebot-agent/src/llm-resilience/llm-resilience.module.ts @@ -0,0 +1,19 @@ +/** + * LLM Resilience Module + * + * Provides retry logic, circuit breaker, and error classification + * for all LLM API calls in the bytebot-agent. + */ + +import { Module, Global } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { EventEmitterModule } from '@nestjs/event-emitter'; +import { LLMResilienceService } from './llm-resilience.service'; + +@Global() +@Module({ + imports: [ConfigModule, EventEmitterModule], + providers: [LLMResilienceService], + exports: [LLMResilienceService], +}) +export class LLMResilienceModule {} diff --git a/packages/bytebot-agent/src/llm-resilience/llm-resilience.service.spec.ts b/packages/bytebot-agent/src/llm-resilience/llm-resilience.service.spec.ts new file mode 100644 index 000000000..e9bdcb839 --- /dev/null +++ b/packages/bytebot-agent/src/llm-resilience/llm-resilience.service.spec.ts @@ -0,0 +1,92 @@ +import { LLMResilienceService, LLMErrorType } from './llm-resilience.service'; + +describe('LLMResilienceService', () => { + const makeService = (overrides: Record = {}) => { + const configService = { + get: jest.fn((key: string, fallback: string) => { + const map: Record = { + LLM_MAX_RETRIES: '5', + LLM_BASE_DELAY_MS: '1', + LLM_MAX_DELAY_MS: '10', + LLM_JITTER_FACTOR: '0', + LLM_CIRCUIT_BREAKER_THRESHOLD: '2', + LLM_CIRCUIT_BREAKER_RESET_MS: '60000', + ...overrides, + }; + return map[key] ?? fallback; + }), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const service = new LLMResilienceService(configService, eventEmitter); + return { service, configService, eventEmitter }; + }; + + it('opens circuit breaker after threshold and fails fast', async () => { + const { service } = makeService({ LLM_CIRCUIT_BREAKER_THRESHOLD: '2' }); + + const operation = jest.fn(async () => { + const error = new Error('connect ECONNREFUSED 10.0.0.1:4000'); + (error as any).code = 'ECONNREFUSED'; + throw error; + }); + + const r1 = await service.executeWithRetry(operation, 'litellm', { maxRetries: 0 }); + expect(r1.success).toBe(false); + expect(r1.attempts).toBe(1); + expect(service.getCircuitBreakerStatus('litellm')?.state).toBe('CLOSED'); + + const r2 = await service.executeWithRetry(operation, 'litellm', { maxRetries: 0 }); + expect(r2.success).toBe(false); + expect(r2.attempts).toBe(1); + expect(service.getCircuitBreakerStatus('litellm')?.state).toBe('OPEN'); + + const r3 = await service.executeWithRetry(operation, 'litellm', { maxRetries: 0 }); + expect(r3.success).toBe(false); + expect(r3.attempts).toBe(0); + expect(r3.error?.message).toMatch(/Circuit breaker open/i); + + expect(operation).toHaveBeenCalledTimes(2); + }); + + it('treats auth errors as non-retryable', async () => { + const { service } = makeService(); + + const operation = jest.fn(async () => { + const error = new Error('401 Unauthorized'); + (error as any).status = 401; + throw error; + }); + + const result = await service.executeWithRetry(operation, 'litellm', { maxRetries: 5 }); + + expect(result.success).toBe(false); + expect(result.attempts).toBe(1); + expect(result.error?.type).toBe(LLMErrorType.AUTH_ERROR); + expect(result.error?.retryable).toBe(false); + }); + + it('can be manually tripped open for fast failover', async () => { + const { service } = makeService({ LLM_CIRCUIT_BREAKER_THRESHOLD: '5' }); + + service.openCircuit('litellm', { + type: LLMErrorType.NETWORK, + message: 'connect ECONNREFUSED 10.0.0.1:4000', + retryable: true, + }); + + expect(service.getCircuitBreakerStatus('litellm')?.state).toBe('OPEN'); + + const operation = jest.fn(async () => { + throw new Error('should not be called'); + }); + + const result = await service.executeWithRetry(operation, 'litellm', { maxRetries: 0 }); + expect(result.success).toBe(false); + expect(result.attempts).toBe(0); + expect(operation).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/bytebot-agent/src/llm-resilience/llm-resilience.service.ts b/packages/bytebot-agent/src/llm-resilience/llm-resilience.service.ts new file mode 100644 index 000000000..b274308cc --- /dev/null +++ b/packages/bytebot-agent/src/llm-resilience/llm-resilience.service.ts @@ -0,0 +1,587 @@ +/** + * LLM Resilience Service + * v1.0.0: Industry-standard retry logic for LLM API calls + * + * Implements patterns from: + * - OpenAI: Exponential backoff with jitter + * - Anthropic: Retry-after header respect + * - Manus: "Diagnose → Retry → Pivot" error handling + * - Google SRE: Circuit breaker pattern + * + * This service provides: + * - Exponential backoff with jitter (prevents thundering herd) + * - Configurable retry attempts (default: 5 for transient failures) + * - Error classification (transient vs. permanent) + * - Retry-after header support (HTTP 429, 503) + * - Circuit breaker for cascading failure prevention + * - Observability metrics for monitoring + * + * @see https://platform.openai.com/docs/guides/rate-limits + * @see https://docs.anthropic.com/en/api/errors + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; + +// ============================================================================= +// Error Classification Types +// ============================================================================= + +export enum LLMErrorType { + // Transient errors - retry with backoff + RATE_LIMIT = 'RATE_LIMIT', // 429: Rate limit exceeded + SERVER_ERROR = 'SERVER_ERROR', // 5xx: Server-side error + TIMEOUT = 'TIMEOUT', // Request timeout + NETWORK = 'NETWORK', // Connection errors + OVERLOADED = 'OVERLOADED', // 529: Overloaded (Anthropic) + + // Permanent errors - do not retry + AUTH_ERROR = 'AUTH_ERROR', // 401/403: Authentication/authorization + BAD_REQUEST = 'BAD_REQUEST', // 400: Invalid request + NOT_FOUND = 'NOT_FOUND', // 404: Model/resource not found + CONTENT_FILTER = 'CONTENT_FILTER', // Content moderation triggered + CONTEXT_LENGTH = 'CONTEXT_LENGTH', // Context length exceeded + + // Unknown + UNKNOWN = 'UNKNOWN', +} + +export interface LLMError { + type: LLMErrorType; + message: string; + statusCode?: number; + retryable: boolean; + retryAfterMs?: number; + originalError?: Error; +} + +export interface RetryConfig { + maxRetries: number; + baseDelayMs: number; + maxDelayMs: number; + jitterFactor: number; // 0-1, percentage of delay to add as jitter +} + +export interface RetryResult { + success: boolean; + result?: T; + error?: LLMError; + attempts: number; + totalDurationMs: number; +} + +// ============================================================================= +// Circuit Breaker State +// ============================================================================= + +interface CircuitBreakerState { + failures: number; + lastFailure?: Date; + state: 'CLOSED' | 'OPEN' | 'HALF_OPEN'; + openedAt?: Date; +} + +// ============================================================================= +// Service Implementation +// ============================================================================= + +@Injectable() +export class LLMResilienceService { + private readonly logger = new Logger(LLMResilienceService.name); + + // Circuit breaker per endpoint/provider + private readonly circuitBreakers = new Map(); + + // Default configuration + private readonly defaultConfig: RetryConfig; + private readonly circuitBreakerThreshold: number; + private readonly circuitBreakerResetMs: number; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.defaultConfig = { + maxRetries: parseInt(this.configService.get('LLM_MAX_RETRIES', '5'), 10), + baseDelayMs: parseInt(this.configService.get('LLM_BASE_DELAY_MS', '1000'), 10), + maxDelayMs: parseInt(this.configService.get('LLM_MAX_DELAY_MS', '60000'), 10), + jitterFactor: parseFloat(this.configService.get('LLM_JITTER_FACTOR', '0.3')), + }; + + this.circuitBreakerThreshold = parseInt( + this.configService.get('LLM_CIRCUIT_BREAKER_THRESHOLD', '5'), + 10, + ); + this.circuitBreakerResetMs = parseInt( + // Default to 2 minutes to reduce hammering during upstream outages. + this.configService.get('LLM_CIRCUIT_BREAKER_RESET_MS', '120000'), + 10, + ); + + this.logger.log( + `LLMResilienceService initialized (maxRetries: ${this.defaultConfig.maxRetries}, ` + + `baseDelay: ${this.defaultConfig.baseDelayMs}ms, ` + + `circuitBreaker: ${this.circuitBreakerThreshold} failures)`, + ); + } + + /** + * Execute an LLM API call with retry logic + * + * @param operation - Async function that makes the LLM API call + * @param endpoint - Identifier for circuit breaker tracking (e.g., 'litellm', 'openai') + * @param config - Optional retry configuration override + * @returns Result with success status, result/error, and metrics + */ + async executeWithRetry( + operation: () => Promise, + endpoint: string = 'default', + config?: Partial, + ): Promise> { + const retryConfig = { ...this.defaultConfig, ...config }; + const startTime = Date.now(); + let attempts = 0; + let lastError: LLMError | undefined; + + // Check circuit breaker before attempting + if (this.isCircuitOpen(endpoint)) { + this.logger.warn(`Circuit breaker OPEN for ${endpoint}, failing fast`); + return { + success: false, + error: { + type: LLMErrorType.SERVER_ERROR, + message: `Circuit breaker open for ${endpoint}`, + retryable: false, + }, + attempts: 0, + totalDurationMs: Date.now() - startTime, + }; + } + + while (attempts <= retryConfig.maxRetries) { + attempts++; + + try { + this.logger.debug( + `LLM call attempt ${attempts}/${retryConfig.maxRetries + 1} to ${endpoint}`, + ); + + const result = await operation(); + + // Success - reset circuit breaker + this.recordSuccess(endpoint); + + // Emit success metric + this.eventEmitter.emit('llm.call.success', { + endpoint, + attempts, + durationMs: Date.now() - startTime, + }); + + return { + success: true, + result, + attempts, + totalDurationMs: Date.now() - startTime, + }; + } catch (error: any) { + lastError = this.classifyError(error); + + this.logger.warn( + `LLM call attempt ${attempts} failed: ${lastError.type} - ${lastError.message}`, + ); + + // Record failure for circuit breaker (only for retryable/transient failures) + if (lastError.retryable) { + this.recordFailure(endpoint, lastError); + } + + // If error is not retryable, fail immediately + if (!lastError.retryable) { + this.logger.error( + `LLM call failed with non-retryable error: ${lastError.type}`, + ); + break; + } + + // If we've exhausted retries, fail + if (attempts > retryConfig.maxRetries) { + this.logger.error( + `LLM call failed after ${attempts} attempts, exhausted retries`, + ); + break; + } + + // Calculate delay with exponential backoff and jitter + const delay = this.calculateDelay( + attempts, + retryConfig, + lastError.retryAfterMs, + ); + + this.logger.log( + `Retrying LLM call in ${Math.round(delay)}ms ` + + `(attempt ${attempts + 1}/${retryConfig.maxRetries + 1})`, + ); + + await this.sleep(delay); + } + } + + // Emit failure metric + this.eventEmitter.emit('llm.call.failed', { + endpoint, + attempts, + durationMs: Date.now() - startTime, + errorType: lastError?.type, + }); + + return { + success: false, + error: lastError, + attempts, + totalDurationMs: Date.now() - startTime, + }; + } + + /** + * Classify an error to determine if it's retryable + */ + classifyError(error: any): LLMError { + const message = error?.message || String(error); + const statusCode = error?.status || error?.statusCode || error?.code; + + // Rate limit (429) + if (statusCode === 429 || message.toLowerCase().includes('rate limit')) { + const retryAfterMs = this.parseRetryAfter(error); + return { + type: LLMErrorType.RATE_LIMIT, + message: message, + statusCode: 429, + retryable: true, + retryAfterMs, + originalError: error, + }; + } + + // Overloaded (529 - Anthropic specific) + if (statusCode === 529 || message.toLowerCase().includes('overloaded')) { + return { + type: LLMErrorType.OVERLOADED, + message: message, + statusCode: 529, + retryable: true, + retryAfterMs: 5000, // Default 5s for overload + originalError: error, + }; + } + + // Server errors (5xx) + if (statusCode >= 500 && statusCode < 600) { + return { + type: LLMErrorType.SERVER_ERROR, + message: message, + statusCode, + retryable: true, + originalError: error, + }; + } + + // Timeout + if ( + message.toLowerCase().includes('timeout') || + message.toLowerCase().includes('etimedout') || + error?.code === 'ETIMEDOUT' || + error?.code === 'ESOCKETTIMEDOUT' + ) { + return { + type: LLMErrorType.TIMEOUT, + message: message, + retryable: true, + originalError: error, + }; + } + + // Network errors + if ( + message.toLowerCase().includes('econnrefused') || + message.toLowerCase().includes('enotfound') || + message.toLowerCase().includes('socket hang up') || + message.toLowerCase().includes('network') || + message.toLowerCase().includes('connection') || + error?.code === 'ECONNREFUSED' || + error?.code === 'ENOTFOUND' || + error?.code === 'ECONNRESET' + ) { + return { + type: LLMErrorType.NETWORK, + message: message, + retryable: true, + originalError: error, + }; + } + + // Authentication errors (401, 403) + if (statusCode === 401 || statusCode === 403) { + return { + type: LLMErrorType.AUTH_ERROR, + message: message, + statusCode, + retryable: false, + originalError: error, + }; + } + + // Bad request (400) + if (statusCode === 400) { + return { + type: LLMErrorType.BAD_REQUEST, + message: message, + statusCode: 400, + retryable: false, + originalError: error, + }; + } + + // Not found (404) + if (statusCode === 404) { + return { + type: LLMErrorType.NOT_FOUND, + message: message, + statusCode: 404, + retryable: false, + originalError: error, + }; + } + + // Content filter / moderation + if ( + message.toLowerCase().includes('content') && + (message.toLowerCase().includes('filter') || + message.toLowerCase().includes('moderation') || + message.toLowerCase().includes('policy')) + ) { + return { + type: LLMErrorType.CONTENT_FILTER, + message: message, + retryable: false, + originalError: error, + }; + } + + // Context length exceeded + if ( + message.toLowerCase().includes('context') || + message.toLowerCase().includes('token') || + message.toLowerCase().includes('length') + ) { + return { + type: LLMErrorType.CONTEXT_LENGTH, + message: message, + retryable: false, + originalError: error, + }; + } + + // Unknown - default to retryable for safety + return { + type: LLMErrorType.UNKNOWN, + message: message, + retryable: true, // Conservative: retry unknown errors + originalError: error, + }; + } + + /** + * Calculate delay with exponential backoff and jitter + * + * Formula: min(maxDelay, baseDelay * 2^attempt) + random_jitter + */ + private calculateDelay( + attempt: number, + config: RetryConfig, + retryAfterMs?: number, + ): number { + // If server specified retry-after, use it (with bounds) + if (retryAfterMs && retryAfterMs > 0) { + return Math.min(retryAfterMs, config.maxDelayMs); + } + + // Exponential backoff: baseDelay * 2^(attempt-1) + const exponentialDelay = config.baseDelayMs * Math.pow(2, attempt - 1); + const boundedDelay = Math.min(exponentialDelay, config.maxDelayMs); + + // Add jitter: random value between 0 and jitterFactor * delay + const jitter = Math.random() * config.jitterFactor * boundedDelay; + + return boundedDelay + jitter; + } + + /** + * Parse retry-after header from error response + */ + private parseRetryAfter(error: any): number | undefined { + // Check for retry-after in headers + const retryAfter = + error?.headers?.['retry-after'] || + error?.response?.headers?.['retry-after'] || + error?.error?.headers?.['retry-after']; + + if (!retryAfter) return undefined; + + // Could be seconds (number) or HTTP date + const seconds = parseInt(retryAfter, 10); + if (!isNaN(seconds)) { + return seconds * 1000; // Convert to ms + } + + // Try parsing as HTTP date + const date = new Date(retryAfter); + if (!isNaN(date.getTime())) { + return Math.max(0, date.getTime() - Date.now()); + } + + return undefined; + } + + /** + * Check if circuit breaker is open for an endpoint + */ + private isCircuitOpen(endpoint: string): boolean { + const state = this.circuitBreakers.get(endpoint); + if (!state || state.state === 'CLOSED') return false; + + if (state.state === 'OPEN') { + // Check if it's time to try half-open + const timeSinceOpen = Date.now() - (state.openedAt?.getTime() || 0); + if (timeSinceOpen >= this.circuitBreakerResetMs) { + state.state = 'HALF_OPEN'; + this.logger.log(`Circuit breaker for ${endpoint} moving to HALF_OPEN`); + this.eventEmitter.emit('llm.circuit.state-changed', { + endpoint, + state: 'HALF_OPEN', + }); + return false; // Allow one request through + } + return true; + } + + return false; // HALF_OPEN allows requests + } + + /** + * Record a successful call (resets circuit breaker) + */ + private recordSuccess(endpoint: string): void { + const state = this.circuitBreakers.get(endpoint); + if (state) { + const previousState = state.state; + if (state.state === 'HALF_OPEN') { + this.logger.log(`Circuit breaker for ${endpoint} reset to CLOSED`); + } + state.failures = 0; + state.state = 'CLOSED'; + state.openedAt = undefined; + + if (previousState !== 'CLOSED') { + this.eventEmitter.emit('llm.circuit.state-changed', { + endpoint, + state: 'CLOSED', + }); + } + } + } + + /** + * Record a failed call (may trip circuit breaker) + */ + private recordFailure(endpoint: string, error: LLMError): void { + let state = this.circuitBreakers.get(endpoint); + if (!state) { + state = { failures: 0, state: 'CLOSED' }; + this.circuitBreakers.set(endpoint, state); + } + + state.failures++; + state.lastFailure = new Date(); + + // Check if we should open the circuit + if (state.failures >= this.circuitBreakerThreshold && state.state !== 'OPEN') { + state.state = 'OPEN'; + state.openedAt = new Date(); + this.logger.warn( + `Circuit breaker for ${endpoint} OPENED after ${state.failures} failures`, + ); + + this.eventEmitter.emit('llm.circuit.state-changed', { + endpoint, + state: 'OPEN', + }); + + // Emit circuit breaker event + this.eventEmitter.emit('llm.circuit.opened', { + endpoint, + failures: state.failures, + lastError: error.type, + }); + } + } + + /** + * Get circuit breaker status for an endpoint + */ + getCircuitBreakerStatus(endpoint: string): CircuitBreakerState | null { + return this.circuitBreakers.get(endpoint) || null; + } + + /** + * Force-open the circuit breaker for an endpoint. + * + * Used by higher-level routing logic to fail over quickly when an endpoint + * is known-bad (e.g., connection refused/timeouts during a gateway restart). + */ + openCircuit(endpoint: string, error?: LLMError): void { + let state = this.circuitBreakers.get(endpoint); + if (!state) { + state = { failures: 0, state: 'CLOSED' }; + this.circuitBreakers.set(endpoint, state); + } + + if (state.state === 'OPEN') return; + + state.failures = Math.max(state.failures, this.circuitBreakerThreshold); + state.lastFailure = new Date(); + state.state = 'OPEN'; + state.openedAt = new Date(); + + this.logger.warn(`Circuit breaker for ${endpoint} OPENED (manual trip)`); + + this.eventEmitter.emit('llm.circuit.state-changed', { + endpoint, + state: 'OPEN', + }); + + this.eventEmitter.emit('llm.circuit.opened', { + endpoint, + failures: state.failures, + lastError: error?.type ?? LLMErrorType.UNKNOWN, + manual: true, + }); + } + + /** + * Reset circuit breaker for an endpoint (for manual intervention) + */ + resetCircuitBreaker(endpoint: string): void { + this.circuitBreakers.delete(endpoint); + this.logger.log(`Circuit breaker for ${endpoint} manually reset`); + } + + /** + * Sleep helper + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-agent/src/main.ts b/packages/bytebot-agent/src/main.ts index d9eef248e..f9ed59e01 100644 --- a/packages/bytebot-agent/src/main.ts +++ b/packages/bytebot-agent/src/main.ts @@ -2,17 +2,118 @@ import { NestFactory } from '@nestjs/core'; import { AppModule } from './app.module'; import { webcrypto } from 'crypto'; import { json, urlencoded } from 'express'; +import { Logger, INestApplication } from '@nestjs/common'; +import { RedisIoAdapter } from './adapters/redis-io.adapter'; // Polyfill for crypto global (required by @nestjs/schedule) if (!globalThis.crypto) { globalThis.crypto = webcrypto as any; } +const logger = new Logger('Bootstrap'); +let app: INestApplication; + +/** + * Global error handlers for unhandled promise rejections and uncaught exceptions. + * v2.0.28: Added to catch async errors that escape normal error handling. + * + * Best practice: Log the error with full context, then exit the process. + * Use Kubernetes/PM2 to automatically restart the application. + */ +function setupGlobalErrorHandlers(): void { + // Handle unhandled promise rejections (async errors not caught) + process.on('unhandledRejection', (reason: Error | unknown, promise: Promise) => { + const timestamp = new Date().toISOString(); + const errorMessage = reason instanceof Error ? reason.message : String(reason); + const errorStack = reason instanceof Error ? reason.stack : undefined; + + logger.error({ + event: 'unhandledRejection', + timestamp, + error: errorMessage, + stack: errorStack, + promise: String(promise), + }); + + // In production, we log and let the process continue but track these + // Kubernetes will restart the pod if it becomes unhealthy + logger.error( + `[${timestamp}] UNHANDLED REJECTION: ${errorMessage}\n${errorStack || 'No stack trace'}`, + ); + }); + + // Handle uncaught exceptions (sync errors not handled) + process.on('uncaughtException', (error: Error, origin: string) => { + const timestamp = new Date().toISOString(); + + logger.error({ + event: 'uncaughtException', + timestamp, + error: error.message, + stack: error.stack, + origin, + }); + + logger.error( + `[${timestamp}] UNCAUGHT EXCEPTION (${origin}): ${error.message}\n${error.stack || 'No stack trace'}`, + ); + + // For uncaught exceptions, we must exit - the process is in an undefined state + // Kubernetes will automatically restart the pod + process.exit(1); + }); + + // Handle graceful shutdown signals + process.on('SIGTERM', async () => { + logger.log('SIGTERM signal received: starting graceful shutdown'); + await gracefulShutdown('SIGTERM'); + }); + + process.on('SIGINT', async () => { + logger.log('SIGINT signal received: starting graceful shutdown'); + await gracefulShutdown('SIGINT'); + }); +} + +/** + * Graceful shutdown handler - closes connections cleanly before exit + */ +async function gracefulShutdown(signal: string): Promise { + const shutdownTimeout = setTimeout(() => { + logger.error('Forcing shutdown after timeout'); + process.exit(1); + }, 10000); // 10 second timeout + + try { + if (app) { + await app.close(); + logger.log('Application closed successfully'); + } + clearTimeout(shutdownTimeout); + process.exit(0); + } catch (error) { + logger.error(`Error during ${signal} shutdown:`, error); + clearTimeout(shutdownTimeout); + process.exit(1); + } +} + async function bootstrap() { - console.log('Starting bytebot-agent application...'); + logger.log('Starting bytebot-agent application (v2.2.14)...'); + + // Set up global error handlers BEFORE creating the app + setupGlobalErrorHandlers(); try { - const app = await NestFactory.create(AppModule); + app = await NestFactory.create(AppModule); + + // v2.2.12: Configure Redis adapter for WebSocket cross-replica events + // This ensures task updates are broadcast to all connected clients + // regardless of which replica they're connected to + const redisIoAdapter = new RedisIoAdapter(app); + await redisIoAdapter.connectToRedis(); + app.useWebSocketAdapter(redisIoAdapter); + logger.log('WebSocket Redis adapter configured'); // Configure body parser with increased payload size limit (50MB) app.use(json({ limit: '50mb' })); @@ -24,9 +125,13 @@ async function bootstrap() { methods: ['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH'], }); - await app.listen(process.env.PORT ?? 9991); + const port = process.env.PORT ?? 9991; + await app.listen(port); + logger.log(`Application listening on port ${port}`); + logger.log('Global error handlers initialized'); } catch (error) { - console.error('Error starting application:', error); + logger.error('Error starting application:', error); + process.exit(1); } } bootstrap(); diff --git a/packages/bytebot-agent/src/messages/messages.service.ts b/packages/bytebot-agent/src/messages/messages.service.ts index fd2e79e55..84661b00a 100644 --- a/packages/bytebot-agent/src/messages/messages.service.ts +++ b/packages/bytebot-agent/src/messages/messages.service.ts @@ -3,6 +3,7 @@ import { NotFoundException, Inject, forwardRef, + Logger, } from '@nestjs/common'; import { PrismaService } from '../prisma/prisma.service'; import { Message, Role, Prisma } from '@prisma/client'; @@ -11,6 +12,7 @@ import { isComputerToolUseContentBlock, isToolResultContentBlock, isUserActionContentBlock, + isToolUseContentBlock, } from '@bytebot/shared'; import { TasksGateway } from '../tasks/tasks.gateway'; @@ -27,6 +29,8 @@ export interface GroupedMessages { @Injectable() export class MessagesService { + private readonly logger = new Logger(MessagesService.name); + constructor( private prisma: PrismaService, @Inject(forwardRef(() => TasksGateway)) @@ -38,6 +42,53 @@ export class MessagesService { role: Role; taskId: string; }): Promise { + // v2.2.7: Idempotency check for ASSISTANT messages + // Prevents duplicate messages from race conditions or SDK retries + // See: 2025-12-09-race-condition-duplicate-llm-calls-fix.md + if (data.role === Role.ASSISTANT) { + const toolUseIds = data.content + .filter((block) => isToolUseContentBlock(block)) + .map((block: any) => block.id); + + if (toolUseIds.length > 0) { + // Check if any of these tool_use IDs already exist in previous messages + const existingMessages = await this.prisma.message.findMany({ + where: { taskId: data.taskId }, + orderBy: { createdAt: 'desc' }, + take: 10, // Check last 10 messages for efficiency + }); + + const existingToolUseIds = new Set(); + for (const msg of existingMessages) { + const content = msg.content as MessageContentBlock[]; + for (const block of content) { + if (isToolUseContentBlock(block)) { + existingToolUseIds.add((block as any).id); + } + } + } + + const duplicateIds = toolUseIds.filter((id) => existingToolUseIds.has(id)); + if (duplicateIds.length > 0) { + this.logger.warn( + `[Idempotency] Skipping duplicate ASSISTANT message for task ${data.taskId}: ` + + `tool_use IDs ${duplicateIds.join(', ')} already exist in message history`, + ); + // Return a minimal message object without actually creating it + // The caller can continue without error, but no duplicate is created + const existingMsg = existingMessages.find((msg) => { + const content = msg.content as MessageContentBlock[]; + return content.some( + (block) => isToolUseContentBlock(block) && duplicateIds.includes((block as any).id), + ); + }); + if (existingMsg) { + return existingMsg; + } + } + } + } + const message = await this.prisma.message.create({ data: { content: data.content as Prisma.InputJsonValue, diff --git a/packages/bytebot-agent/src/metrics/metrics.controller.ts b/packages/bytebot-agent/src/metrics/metrics.controller.ts new file mode 100644 index 000000000..66730bc73 --- /dev/null +++ b/packages/bytebot-agent/src/metrics/metrics.controller.ts @@ -0,0 +1,15 @@ +import { Controller, Get, Res } from '@nestjs/common'; +import { Response } from 'express'; +import { MetricsService } from './metrics.service'; + +@Controller() +export class MetricsController { + constructor(private readonly metricsService: MetricsService) {} + + @Get('metrics') + async getMetrics(@Res() res: Response): Promise { + res.setHeader('Content-Type', this.metricsService.contentType); + res.status(200).send(await this.metricsService.getMetrics()); + } +} + diff --git a/packages/bytebot-agent/src/metrics/metrics.module.ts b/packages/bytebot-agent/src/metrics/metrics.module.ts new file mode 100644 index 000000000..f6e4eab62 --- /dev/null +++ b/packages/bytebot-agent/src/metrics/metrics.module.ts @@ -0,0 +1,10 @@ +import { Module } from '@nestjs/common'; +import { MetricsController } from './metrics.controller'; +import { MetricsService } from './metrics.service'; + +@Module({ + controllers: [MetricsController], + providers: [MetricsService], +}) +export class MetricsModule {} + diff --git a/packages/bytebot-agent/src/metrics/metrics.service.ts b/packages/bytebot-agent/src/metrics/metrics.service.ts new file mode 100644 index 000000000..730e2536e --- /dev/null +++ b/packages/bytebot-agent/src/metrics/metrics.service.ts @@ -0,0 +1,229 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { OnEvent } from '@nestjs/event-emitter'; +import { + Counter, + Gauge, + Histogram, + Registry, + collectDefaultMetrics, +} from 'prom-client'; + +type LlmFailoverEvent = { + endpoint: string; + requestedModel: string; + usedModel: string; +}; + +type LlmCircuitStateChangedEvent = { + endpoint: string; + state: 'CLOSED' | 'OPEN' | 'HALF_OPEN'; +}; + +type LlmEndpointFailoverEvent = { + fromEndpoint: string; + toEndpoint: string; + reason: string; + requestedModel: string; +}; + +type LlmEndpointCallEvent = { + endpoint: string; + requestedModel: string; + durationMs: number; +}; + +type DesktopUiRepairResultEvent = { + attempt: string; + outcome: 'success' | 'failure_no_change' | 'failure_invalid' | 'unknown'; + trigger?: string; +}; + +@Injectable() +export class MetricsService { + private readonly logger = new Logger(MetricsService.name); + private readonly registry = new Registry(); + + private readonly llmFailoverTotal = new Counter({ + name: 'bytebot_llm_failover_total', + help: 'Total LLM failovers (requested_model != used_model)', + labelNames: ['endpoint', 'requested_model', 'used_model'] as const, + registers: [this.registry], + }); + + private readonly llmCircuitOpenedTotal = new Counter({ + name: 'bytebot_llm_circuit_opened_total', + help: 'Total LLM circuit breaker openings', + labelNames: ['endpoint'] as const, + registers: [this.registry], + }); + + private readonly llmCircuitOpen = new Gauge({ + name: 'bytebot_llm_circuit_open', + help: 'LLM circuit breaker open state (1=open, 0=closed)', + labelNames: ['endpoint'] as const, + registers: [this.registry], + }); + + private readonly llmEndpointFailoverTotal = new Counter({ + name: 'bytebot_llm_endpoint_failover_total', + help: 'Total endpoint-level failovers (primary endpoint -> fallback endpoint)', + labelNames: [ + 'from_endpoint', + 'to_endpoint', + 'reason', + 'requested_model', + ] as const, + registers: [this.registry], + }); + + private readonly llmEndpointCallDurationSeconds = new Histogram({ + name: 'bytebot_llm_endpoint_call_duration_seconds', + help: 'LLM call duration by endpoint (seconds)', + labelNames: ['endpoint', 'requested_model'] as const, + buckets: [0.25, 0.5, 1, 2, 5, 10, 20, 30, 60, 120], + registers: [this.registry], + }); + + private readonly desktopKeydownRewrittenTotal = new Counter({ + name: 'bytebot_desktop_keydown_rewritten_total', + help: 'Total illegal key-down requests rewritten into safe taps', + labelNames: ['key', 'reason'] as const, + registers: [this.registry], + }); + + private readonly desktopInterruptsTotal = new Counter({ + name: 'bytebot_desktop_interrupts_total', + help: 'Total desktop automation interrupts raised for safety', + labelNames: ['reason_code'] as const, + registers: [this.registry], + }); + + private readonly desktopLoopDetectedTotal = new Counter({ + name: 'bytebot_desktop_loop_detected_total', + help: 'Total desktop loop detections (no progress)', + labelNames: ['rule'] as const, + registers: [this.registry], + }); + + private readonly desktopForcedScreenshotTotal = new Counter({ + name: 'bytebot_desktop_forced_screenshot_total', + help: 'Total forced desktop screenshots injected to enforce observation', + labelNames: ['reason'] as const, + registers: [this.registry], + }); + + private readonly desktopUiRepairAttemptTotal = new Counter({ + name: 'bytebot_desktop_ui_repair_attempt_total', + help: 'Total desktop UI repair attempts, labeled by attempt and outcome', + labelNames: ['attempt', 'outcome'] as const, + registers: [this.registry], + }); + + private readonly desktopUiRepairSuccessTotal = new Counter({ + name: 'bytebot_desktop_ui_repair_success_total', + help: 'Total successful desktop UI repair attempts', + labelNames: ['attempt'] as const, + registers: [this.registry], + }); + + constructor() { + collectDefaultMetrics({ register: this.registry }); + } + + get contentType(): string { + return this.registry.contentType; + } + + async getMetrics(): Promise { + return await this.registry.metrics(); + } + + @OnEvent('llm.failover') + onLlmFailover(event: LlmFailoverEvent): void { + this.llmFailoverTotal.inc({ + endpoint: event.endpoint, + requested_model: event.requestedModel, + used_model: event.usedModel, + }); + } + + @OnEvent('llm.circuit.opened') + onCircuitOpened(event: { endpoint: string }): void { + this.llmCircuitOpenedTotal.inc({ endpoint: event.endpoint }); + this.llmCircuitOpen.set({ endpoint: event.endpoint }, 1); + } + + @OnEvent('llm.circuit.state-changed') + onCircuitStateChanged(event: LlmCircuitStateChangedEvent): void { + this.llmCircuitOpen.set( + { endpoint: event.endpoint }, + event.state === 'OPEN' ? 1 : 0, + ); + this.logger.debug( + `Circuit state changed for ${event.endpoint}: ${event.state}`, + ); + } + + @OnEvent('llm.endpoint.failover') + onEndpointFailover(event: LlmEndpointFailoverEvent): void { + this.llmEndpointFailoverTotal.inc({ + from_endpoint: event.fromEndpoint, + to_endpoint: event.toEndpoint, + reason: event.reason, + requested_model: event.requestedModel, + }); + } + + @OnEvent('llm.endpoint.call') + onEndpointCall(event: LlmEndpointCallEvent): void { + this.llmEndpointCallDurationSeconds.observe( + { endpoint: event.endpoint, requested_model: event.requestedModel }, + event.durationMs / 1000, + ); + } + + @OnEvent('desktop.keydown.rewritten') + onDesktopKeydownRewritten(event: { key: string; reason: string }): void { + this.desktopKeydownRewrittenTotal.inc({ + key: event.key, + reason: event.reason, + }); + } + + @OnEvent('desktop.interrupt') + onDesktopInterrupt(event: { reasonCode: string }): void { + this.desktopInterruptsTotal.inc({ reason_code: event.reasonCode }); + } + + @OnEvent('desktop.loop.detected') + onDesktopLoopDetected(event: { rule: string }): void { + this.desktopLoopDetectedTotal.inc({ rule: event.rule }); + } + + @OnEvent('desktop.forced_screenshot') + onDesktopForcedScreenshot(event: { reason: string }): void { + this.desktopForcedScreenshotTotal.inc({ reason: event.reason }); + } + + @OnEvent('desktop.ui_repair.result') + onDesktopUiRepairResult(event: DesktopUiRepairResultEvent): void { + const attemptRaw = String(event.attempt || '').trim(); + const attempt = + attemptRaw === 'esc' || attemptRaw === 'close_click' + ? attemptRaw + : 'unknown'; + + const outcomeRaw = String(event.outcome || '').trim(); + const outcome = + outcomeRaw === 'success' || + outcomeRaw === 'failure_no_change' || + outcomeRaw === 'failure_invalid' + ? outcomeRaw + : 'unknown'; + + this.desktopUiRepairAttemptTotal.inc({ attempt, outcome }); + if (outcome === 'success') { + this.desktopUiRepairSuccessTotal.inc({ attempt }); + } + } +} diff --git a/packages/bytebot-agent/src/openai/openai.service.ts b/packages/bytebot-agent/src/openai/openai.service.ts index f78e7b1b0..090dc2515 100644 --- a/packages/bytebot-agent/src/openai/openai.service.ts +++ b/packages/bytebot-agent/src/openai/openai.service.ts @@ -17,9 +17,11 @@ import { Message, Role } from '@prisma/client'; import { openaiTools } from './openai.tools'; import { BytebotAgentService, + BytebotAgentGenerateMessageOptions, BytebotAgentInterrupt, BytebotAgentResponse, } from '../agent/agent.types'; +import { filterToolsByPolicy } from '../agent/tool-policy'; @Injectable() export class OpenAIService implements BytebotAgentService { @@ -44,13 +46,19 @@ export class OpenAIService implements BytebotAgentService { systemPrompt: string, messages: Message[], model: string = DEFAULT_MODEL.name, - useTools: boolean = true, - signal?: AbortSignal, + options: BytebotAgentGenerateMessageOptions = {}, ): Promise { + const useTools = options.useTools ?? true; + const signal = options.signal; + const isReasoning = model.startsWith('o'); try { const openaiMessages = this.formatMessagesForOpenAI(messages); + const tools = useTools + ? filterToolsByPolicy(openaiTools, (tool) => tool.name, options.toolPolicy) + : []; + const maxTokens = 8192; const response = await this.openai.responses.create( { @@ -58,7 +66,7 @@ export class OpenAIService implements BytebotAgentService { max_output_tokens: maxTokens, input: openaiMessages, instructions: systemPrompt, - tools: useTools ? openaiTools : [], + tools, reasoning: isReasoning ? { effort: 'medium' } : null, store: false, include: isReasoning ? ['reasoning.encrypted_content'] : [], diff --git a/packages/bytebot-agent/src/prisma/prisma.service.ts b/packages/bytebot-agent/src/prisma/prisma.service.ts index 33fd9383f..d6dbd4672 100644 --- a/packages/bytebot-agent/src/prisma/prisma.service.ts +++ b/packages/bytebot-agent/src/prisma/prisma.service.ts @@ -1,13 +1,197 @@ -import { Injectable, OnModuleInit } from '@nestjs/common'; +import { Injectable, OnModuleInit, Logger } from '@nestjs/common'; import { PrismaClient } from '@prisma/client'; +/** + * v2.2.6: Maximum retry attempts for transient database errors + * Handles PgBouncer prepared statement errors and connection issues + */ +const MAX_DB_RETRIES = 3; + +/** + * v2.2.6: Base delay for exponential backoff (in milliseconds) + */ +const BASE_RETRY_DELAY_MS = 100; + +/** + * v2.2.19: Build datasource URL with connection pool parameters + * Prevents database connection exhaustion in multi-pod deployments + */ +function buildDatasourceUrl(): string { + const baseUrl = process.env.DATABASE_URL || ''; + const connectionLimit = parseInt(process.env.DB_CONNECTION_LIMIT || '2', 10); + const poolTimeout = parseInt(process.env.DB_POOL_TIMEOUT || '30', 10); + + // Parse and append pool parameters + try { + const url = new URL(baseUrl); + url.searchParams.set('connection_limit', connectionLimit.toString()); + url.searchParams.set('pool_timeout', poolTimeout.toString()); + return url.toString(); + } catch { + // If URL parsing fails, return base URL (will use Prisma defaults) + return baseUrl; + } +} + +/** + * v2.2.6: Check if an error is a transient database error that should be retried + * + * Handles: + * - PgBouncer prepared statement errors (code 26000) + * - Connection errors (P1001, P1002, P1017) + * - Network timeout errors + */ +function isRetryableError(error: any): boolean { + // PostgreSQL prepared statement error (PgBouncer restart) + if (error?.code === '26000' || error?.message?.includes('prepared statement')) { + return true; + } + + // Prisma connection errors + const retryablePrismaCodes = ['P1001', 'P1002', 'P1017', 'P2024']; + if (retryablePrismaCodes.includes(error?.code)) { + return true; + } + + // Connection-related error messages + const connectionErrorPatterns = [ + 'connection', + 'ECONNRESET', + 'ECONNREFUSED', + 'ETIMEDOUT', + 'socket hang up', + 'server closed', + ]; + + if (error?.message && connectionErrorPatterns.some(p => + error.message.toLowerCase().includes(p.toLowerCase()) + )) { + return true; + } + + return false; +} + +/** + * v2.2.6: Sleep for a given number of milliseconds + */ +function sleep(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)); +} + @Injectable() export class PrismaService extends PrismaClient implements OnModuleInit { + private readonly logger = new Logger(PrismaService.name); + constructor() { - super(); + const datasourceUrl = buildDatasourceUrl(); + super({ + datasourceUrl, + log: [ + { level: 'error', emit: 'stdout' }, + { level: 'warn', emit: 'stdout' }, + ], + }); + + // Log connection pool configuration + const connectionLimit = process.env.DB_CONNECTION_LIMIT || '2'; + const poolTimeout = process.env.DB_POOL_TIMEOUT || '30'; + this.logger.log( + `PrismaService initialized with connection_limit=${connectionLimit}, pool_timeout=${poolTimeout}, retry support enabled`, + ); } async onModuleInit() { + // Connect to the database await this.$connect(); + this.logger.log('Connected to PostgreSQL database'); + } + + /** + * v2.2.6: Execute a database operation with retry logic + * Use this wrapper for operations that may encounter transient errors + * + * @param operation - The database operation to execute + * @param operationName - A descriptive name for logging + * @returns The result of the operation + */ + async executeWithRetry( + operation: () => Promise, + operationName: string = 'operation' + ): Promise { + let lastError: Error | null = null; + + for (let attempt = 1; attempt <= MAX_DB_RETRIES; attempt++) { + try { + return await operation(); + } catch (error: any) { + lastError = error; + + if (isRetryableError(error) && attempt < MAX_DB_RETRIES) { + const delayMs = BASE_RETRY_DELAY_MS * Math.pow(2, attempt - 1); + this.logger.warn( + `[Retry ${attempt}/${MAX_DB_RETRIES}] Transient DB error in ${operationName}: ${error.message}. Retrying in ${delayMs}ms...` + ); + await sleep(delayMs); + continue; + } + + throw error; + } + } + + throw lastError; + } + + /** + * v2.2.6: Execute a transaction with retry logic + * Wraps Prisma's $transaction with automatic retry on transient errors + * + * @param fn - The transaction function + * @param operationName - A descriptive name for logging + * @returns The result of the transaction + */ + async transactionWithRetry( + fn: (tx: Omit) => Promise, + operationName: string = 'transaction' + ): Promise { + return this.executeWithRetry( + () => this.$transaction(fn), + operationName + ); + } + + /** + * v2.2.6: Execute a raw query with retry logic + * + * @param query - The raw SQL query + * @param operationName - A descriptive name for logging + * @returns The query result + */ + async queryRawWithRetry( + query: Parameters[0], + operationName: string = 'queryRaw' + ): Promise { + return this.executeWithRetry( + () => this.$queryRaw(query) as Promise, + operationName + ); + } + + /** + * v2.2.6: Execute a raw command with retry logic + * + * @param query - The raw SQL command + * @param operationName - A descriptive name for logging + * @returns The number of affected rows + */ + async executeRawWithRetry( + query: Parameters[0], + operationName: string = 'executeRaw' + ): Promise { + return this.executeWithRetry( + () => this.$executeRaw(query), + operationName + ); } } diff --git a/packages/bytebot-agent/src/proxy/proxy.service.spec.ts b/packages/bytebot-agent/src/proxy/proxy.service.spec.ts new file mode 100644 index 000000000..a60769ac3 --- /dev/null +++ b/packages/bytebot-agent/src/proxy/proxy.service.spec.ts @@ -0,0 +1,907 @@ +import { ProxyService } from './proxy.service'; +import { LLMResilienceService } from '../llm-resilience/llm-resilience.service'; +import { MessageContentType } from '@bytebot/shared'; +import { Role } from '@prisma/client'; + +describe('ProxyService endpoint failover', () => { + const makeResilience = (eventEmitter: { emit: jest.Mock }) => { + const configService = { + get: jest.fn((key: string, fallback: string) => { + const map: Record = { + LLM_MAX_RETRIES: '0', + LLM_BASE_DELAY_MS: '1', + LLM_MAX_DELAY_MS: '1', + LLM_JITTER_FACTOR: '0', + LLM_CIRCUIT_BREAKER_THRESHOLD: '5', + LLM_CIRCUIT_BREAKER_RESET_MS: '60000', + }; + return map[key] ?? fallback; + }), + } as any; + + return new LLMResilienceService(configService, eventEmitter as any); + }; + + it('fails over to the next endpoint on NETWORK errors', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://local-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + const localCreate = jest.fn(async () => { + const error = new Error('connect ECONNREFUSED 10.0.0.1:4000'); + (error as any).code = 'ECONNREFUSED'; + throw error; + }); + const globalCreate = jest.fn(async () => { + return { + model: 'desktop-vision', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(baseURL: string): any { + if (baseURL.includes('local-proxy')) { + return { chat: { completions: { create: localCreate } } }; + } + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + const response = await service.generateMessage('system', messages, 'desktop-vision', { + useTools: false, + }); + expect(response.contentBlocks[0]).toEqual({ type: MessageContentType.Text, text: 'ok' }); + + expect(localCreate).toHaveBeenCalledTimes(1); + expect(globalCreate).toHaveBeenCalledTimes(1); + + expect(eventEmitter.emit).toHaveBeenCalledWith( + 'llm.endpoint.failover', + expect.objectContaining({ + reason: 'NETWORK', + requestedModel: 'desktop-vision', + }), + ); + }); + + it('fails over to the next endpoint on invalid/empty LLM responses', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://local-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + const localCreate = jest.fn(async () => { + return { + model: 'desktop-vision', + // Invalid/empty message (no content/tool_calls) + choices: [{ message: { content: null } }], + usage: { prompt_tokens: 1, completion_tokens: 0, total_tokens: 1 }, + }; + }); + + const globalCreate = jest.fn(async () => { + return { + model: 'desktop-vision', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(baseURL: string): any { + if (baseURL.includes('local-proxy')) { + return { chat: { completions: { create: localCreate } } }; + } + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + const response = await service.generateMessage('system', messages, 'desktop-vision', { + useTools: false, + }); + expect(response.contentBlocks[0]).toEqual({ + type: MessageContentType.Text, + text: 'ok', + }); + + expect(localCreate).toHaveBeenCalledTimes(1); + expect(globalCreate).toHaveBeenCalledTimes(1); + + expect(eventEmitter.emit).toHaveBeenCalledWith( + 'llm.endpoint.failover', + expect.objectContaining({ + reason: 'SERVER_ERROR', + requestedModel: 'desktop-vision', + }), + ); + }); + + it('treats openai/qwen3-vl-32b as desktop-vision for endpoint ordering', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://local-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + // Desktop-vision ordering is global-first + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: + 'http://global-proxy:4000,http://local-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + const localCreate = jest.fn(async () => { + const error = new Error('connect ECONNREFUSED 10.0.0.1:4000'); + (error as any).code = 'ECONNREFUSED'; + throw error; + }); + const globalCreate = jest.fn(async () => { + return { + model: 'qwen3-vl-32b', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(baseURL: string): any { + if (baseURL.includes('local-proxy')) { + return { chat: { completions: { create: localCreate } } }; + } + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + await service.generateMessage( + 'system', + messages, + 'openai/qwen3-vl-32b', + { useTools: false }, + ); + + expect(globalCreate).toHaveBeenCalledTimes(1); + expect(localCreate).toHaveBeenCalledTimes(0); + }); + + it('treats claude-sonnet-4-5 as desktop-vision for endpoint ordering', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://local-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + // Desktop-vision ordering is global-first + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: + 'http://global-proxy:4000,http://local-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + const localCreate = jest.fn(async () => { + const error = new Error('connect ECONNREFUSED 10.0.0.1:4000'); + (error as any).code = 'ECONNREFUSED'; + throw error; + }); + const globalCreate = jest.fn(async () => { + return { + model: 'claude-sonnet-4-5', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(baseURL: string): any { + if (baseURL.includes('local-proxy')) { + return { chat: { completions: { create: localCreate } } }; + } + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + await service.generateMessage( + 'system', + messages, + 'claude-sonnet-4-5', + { useTools: false }, + ); + + expect(globalCreate).toHaveBeenCalledTimes(1); + expect(localCreate).toHaveBeenCalledTimes(0); + }); + + it('disables LiteLLM caching for desktop-vision model requests', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + const create = jest.fn(async (request: any) => { + return { + model: 'qwen3-vl-32b', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + __request: request, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + await service.generateMessage('system', messages, 'openai/qwen3-vl-32b', { + useTools: false, + }); + + expect(create).toHaveBeenCalledTimes(1); + expect(create.mock.calls[0][0].cache).toEqual({ 'no-cache': true }); + }); + + it('disables LiteLLM caching for claude-sonnet-4-5 desktop-vision requests', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + const create = jest.fn(async (request: any) => { + return { + model: 'claude-sonnet-4-5', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + __request: request, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + await service.generateMessage('system', messages, 'claude-sonnet-4-5', { + useTools: false, + }); + + expect(create).toHaveBeenCalledTimes(1); + expect(create.mock.calls[0][0].cache).toEqual({ 'no-cache': true }); + }); + + it('does not replay Thinking blocks into Chat Completions history', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + let capturedRequest: any | undefined; + const globalCreate = jest.fn(async (req: any) => { + capturedRequest = req; + return { + model: 'desktop-vision', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.ASSISTANT, + content: [ + { + type: MessageContentType.Thinking, + thinking: 'secret thinking', + signature: 'sig', + }, + ], + }, + ] as any; + + await service.generateMessage('system', messages, 'desktop-vision', { useTools: false }); + + expect(globalCreate).toHaveBeenCalledTimes(1); + expect(capturedRequest?.messages).toEqual([ + { role: 'system', content: 'system' }, + ]); + }); + + it('does not expose desktop tools when executionSurface=TEXT_ONLY', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + let capturedRequest: any | undefined; + const globalCreate = jest.fn(async (req: any) => { + capturedRequest = req; + return { + model: 'desktop-vision', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + await service.generateMessage('system', messages, 'desktop-vision', { + useTools: true, + toolPolicy: { requiresDesktop: false, executionSurface: 'TEXT_ONLY' }, + }); + + expect(globalCreate).toHaveBeenCalledTimes(1); + + const toolNames = (capturedRequest?.tools || []).map((t: any) => t.function?.name); + expect(toolNames).toContain('set_task_status'); + expect(toolNames.some((name: string) => name?.startsWith('computer_'))).toBe(false); + }); + + it('limits historical screenshots sent to the model (desktop-vision)', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_MAX_IMAGE_BLOCKS: '10', + BYTEBOT_LLM_MAX_IMAGE_BLOCKS_DESKTOP_VISION: '1', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + let capturedRequest: any | undefined; + const globalCreate = jest.fn(async (req: any) => { + capturedRequest = req; + return { + model: 'qwen3-vl-32b', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const screenshot1 = { + type: MessageContentType.ToolResult, + tool_use_id: 'tool-1', + content: [ + { + type: MessageContentType.Image, + source: { type: 'base64', media_type: 'image/png', data: 'AAA' }, + }, + ], + }; + + const screenshot2 = { + type: MessageContentType.ToolResult, + tool_use_id: 'tool-2', + content: [ + { + type: MessageContentType.Image, + source: { type: 'base64', media_type: 'image/png', data: 'BBB' }, + }, + ], + }; + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [screenshot1], + }, + { + id: 'm2', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [screenshot2], + }, + ] as any; + + await service.generateMessage('system', messages, 'desktop-vision', { useTools: false }); + + const msgJson = JSON.stringify(capturedRequest?.messages || []); + expect(msgJson).toContain('BBB'); + expect(msgJson).not.toContain('AAA'); + }); + + it('fails over without calling the endpoint when preflight fails', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://local-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: + 'http://local-proxy:4000,http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'true', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_TTL_MS: '0', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_TIMEOUT_MS: '1', + }; + return map[key] ?? ''; + }), + } as any; + + const localCreate = jest.fn(async () => { + return { + model: 'desktop-vision', + choices: [{ message: { content: 'bad' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + const globalCreate = jest.fn(async () => { + return { + model: 'desktop-vision', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override async preflightEndpoint(baseUrl: string): Promise { + return !baseUrl.includes('local-proxy'); + } + protected override createOpenAIClient(baseURL: string): any { + if (baseURL.includes('local-proxy')) { + return { chat: { completions: { create: localCreate } } }; + } + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + const response = await service.generateMessage('system', messages, 'desktop-vision', { + useTools: false, + }); + expect(response.contentBlocks[0]).toEqual({ type: MessageContentType.Text, text: 'ok' }); + + expect(localCreate).toHaveBeenCalledTimes(0); + expect(globalCreate).toHaveBeenCalledTimes(1); + + expect(eventEmitter.emit).toHaveBeenCalledWith( + 'llm.endpoint.failover', + expect.objectContaining({ + reason: 'NETWORK', + requestedModel: 'desktop-vision', + }), + ); + }); + + it('drops invalid assistant/tool messages before sending to LiteLLM', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + let capturedRequest: any | undefined; + const globalCreate = jest.fn(async (req: any) => { + capturedRequest = req; + return { + model: 'gpt-oss-120b', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + // Invalid assistant message: empty/whitespace-only content + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.ASSISTANT, + content: [{ type: MessageContentType.Text, text: ' ' }], + }, + // Orphan tool result: no matching assistant tool call + { + id: 'm2', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [ + { + type: MessageContentType.ToolResult, + tool_use_id: 'tool-1', + content: [{ type: MessageContentType.Text, text: 'result' }], + }, + ], + }, + { + id: 'm3', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.USER, + content: [{ type: MessageContentType.Text, text: 'hello' }], + }, + ] as any; + + await service.generateMessage('system', messages, 'gpt-oss-120b', { useTools: false }); + expect(globalCreate).toHaveBeenCalledTimes(1); + + const roles = (capturedRequest?.messages || []).map((m: any) => m.role); + expect(roles).toContain('system'); + expect(roles).toContain('user'); + expect(roles).not.toContain('tool'); + + expect(capturedRequest?.messages).not.toEqual( + expect.arrayContaining([ + expect.objectContaining({ role: 'assistant', content: ' ' }), + ]), + ); + }); + + it('coalesces consecutive assistant messages to satisfy strict OpenAI-compatible validators', async () => { + const eventEmitter = { emit: jest.fn() }; + const llmResilienceService = makeResilience(eventEmitter); + + const configService = { + get: jest.fn((key: string) => { + const map: Record = { + BYTEBOT_LLM_PROXY_URL: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS: 'http://global-proxy:4000', + BYTEBOT_LLM_PROXY_API_KEY: 'dummy', + BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED: 'false', + }; + return map[key] ?? ''; + }), + } as any; + + let capturedRequest: any | undefined; + const globalCreate = jest.fn(async (req: any) => { + capturedRequest = req; + return { + model: 'gpt-oss-120b', + choices: [{ message: { content: 'ok' } }], + usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 }, + }; + }); + + class TestProxyService extends ProxyService { + protected override createOpenAIClient(): any { + return { chat: { completions: { create: globalCreate } } }; + } + } + + const service = new TestProxyService( + configService, + llmResilienceService, + eventEmitter as any, + ); + + const messages = [ + { + id: 'm1', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.ASSISTANT, + content: [{ type: MessageContentType.Text, text: 'first' }], + }, + { + id: 'm2', + createdAt: new Date(), + updatedAt: new Date(), + taskId: 't1', + summaryId: null, + role: Role.ASSISTANT, + content: [{ type: MessageContentType.Text, text: 'second' }], + }, + ] as any; + + await service.generateMessage('system', messages, 'gpt-oss-120b', { useTools: false }); + expect(globalCreate).toHaveBeenCalledTimes(1); + + const roles = (capturedRequest?.messages || []).map((m: any) => m.role); + // Only one assistant message should remain. + expect(roles.filter((r: string) => r === 'assistant')).toHaveLength(1); + + const assistant = (capturedRequest?.messages || []).find((m: any) => m.role === 'assistant'); + expect(assistant?.content).toContain('first'); + expect(assistant?.content).toContain('second'); + }); +}); diff --git a/packages/bytebot-agent/src/proxy/proxy.service.ts b/packages/bytebot-agent/src/proxy/proxy.service.ts index 30e843f81..16be93272 100644 --- a/packages/bytebot-agent/src/proxy/proxy.service.ts +++ b/packages/bytebot-agent/src/proxy/proxy.service.ts @@ -1,6 +1,22 @@ +/** + * Proxy Service + * v2.5.0: Added LLM resilience with retry logic and circuit breaker + * + * This service proxies LLM requests through LiteLLM and now includes: + * - Exponential backoff with jitter for transient failures + * - Circuit breaker to prevent cascading failures + * - Error classification (retryable vs. permanent) + * - Retry-after header support for rate limits + * + * @see LLMResilienceService for retry implementation details + */ + import { Injectable, Logger } from '@nestjs/common'; import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; import OpenAI, { APIUserAbortError } from 'openai'; +import { LLMResilienceService } from '../llm-resilience/llm-resilience.service'; +import { LLMErrorType } from '../llm-resilience/llm-resilience.service'; import { ChatCompletionMessageParam, ChatCompletionContentPart, @@ -15,97 +31,621 @@ import { isUserActionContentBlock, isComputerToolUseContentBlock, isImageContentBlock, - ThinkingContentBlock, } from '@bytebot/shared'; import { Message, Role } from '@prisma/client'; import { proxyTools } from './proxy.tools'; import { BytebotAgentService, + BytebotAgentGenerateMessageOptions, BytebotAgentInterrupt, BytebotAgentResponse, } from '../agent/agent.types'; +import { filterToolsByPolicy } from '../agent/tool-policy'; @Injectable() export class ProxyService implements BytebotAgentService { - private readonly openai: OpenAI; private readonly logger = new Logger(ProxyService.name); + private readonly proxyApiKey: string; + private readonly defaultProxyEndpoints: string[]; + private readonly desktopVisionProxyEndpoints: string[]; + private readonly defaultMaxImageBlocks: number; + private readonly desktopVisionMaxImageBlocks: number; + private readonly endpointPreflightEnabled: boolean; + private readonly endpointPreflightTimeoutMs: number; + private readonly endpointPreflightTtlMs: number; + private readonly endpointPreflightCache = new Map< + string, + { ok: boolean; checkedAt: number } + >(); + private readonly openaiClientsByBaseUrl = new Map(); + + constructor( + private readonly configService: ConfigService, + private readonly llmResilienceService: LLMResilienceService, + private readonly eventEmitter: EventEmitter2, + ) { + const primaryProxyUrl = (this.configService.get('BYTEBOT_LLM_PROXY_URL') || '').trim(); + this.proxyApiKey = (this.configService.get('BYTEBOT_LLM_PROXY_API_KEY') || '').trim(); + + const endpointsRaw = (this.configService.get('BYTEBOT_LLM_PROXY_ENDPOINTS') || '').trim(); + const desktopVisionEndpointsRaw = (this.configService.get('BYTEBOT_LLM_PROXY_DESKTOP_VISION_ENDPOINTS') || '').trim(); - constructor(private readonly configService: ConfigService) { - const proxyUrl = this.configService.get('BYTEBOT_LLM_PROXY_URL'); + this.defaultProxyEndpoints = this.parseProxyEndpoints(endpointsRaw) || + (primaryProxyUrl ? [this.normalizeBaseUrl(primaryProxyUrl)] : []); + this.desktopVisionProxyEndpoints = this.parseProxyEndpoints(desktopVisionEndpointsRaw) || + this.defaultProxyEndpoints; + + this.defaultMaxImageBlocks = Math.max( + 0, + parseInt( + (this.configService.get('BYTEBOT_LLM_MAX_IMAGE_BLOCKS') || '8').trim(), + 10, + ) || 0, + ); + this.desktopVisionMaxImageBlocks = Math.max( + 0, + parseInt( + (this.configService.get('BYTEBOT_LLM_MAX_IMAGE_BLOCKS_DESKTOP_VISION') || '2').trim(), + 10, + ) || 0, + ); - if (!proxyUrl) { + this.endpointPreflightEnabled = this.parseBooleanEnv( + this.configService.get('BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_ENABLED'), + true, + ); + this.endpointPreflightTimeoutMs = Math.max( + 100, + parseInt( + ( + this.configService.get( + 'BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_TIMEOUT_MS', + ) || '2000' + ).trim(), + 10, + ) || 0, + ); + this.endpointPreflightTtlMs = Math.max( + 0, + parseInt( + ( + this.configService.get( + 'BYTEBOT_LLM_PROXY_ENDPOINT_PREFLIGHT_TTL_MS', + ) || '5000' + ).trim(), + 10, + ) || 0, + ); + + if (this.defaultProxyEndpoints.length === 0) { this.logger.warn( 'BYTEBOT_LLM_PROXY_URL is not set. ProxyService will not work properly.', ); } - // Initialize OpenAI client with proxy configuration - this.openai = new OpenAI({ - apiKey: 'dummy-key-for-proxy', - baseURL: proxyUrl, + const primaryForLog = this.defaultProxyEndpoints[0] ?? primaryProxyUrl ?? ''; + this.logger.log( + `ProxyService initialized with LLM resilience (primary proxy: ${primaryForLog || 'unset'})`, + ); + } + + private parseProxyEndpoints(raw: string): string[] | null { + if (!raw) return null; + const endpoints = raw + .split(',') + .map((s) => s.trim()) + .filter(Boolean) + .map((s) => this.normalizeBaseUrl(s)); + const unique = Array.from(new Set(endpoints)); + return unique.length > 0 ? unique : null; + } + + private parseBooleanEnv(value: string | undefined, defaultValue: boolean): boolean { + if (value == null) return defaultValue; + const normalized = value.trim().toLowerCase(); + if (normalized === '') return defaultValue; + if (['1', 'true', 'yes', 'y', 'on'].includes(normalized)) return true; + if (['0', 'false', 'no', 'n', 'off'].includes(normalized)) return false; + return defaultValue; + } + + private normalizeBaseUrl(baseUrl: string): string { + return baseUrl.replace(/\/+$/, ''); + } + + protected async preflightEndpoint( + baseUrl: string, + signal?: AbortSignal, + ): Promise { + if (!this.endpointPreflightEnabled) return true; + + const normalized = this.normalizeBaseUrl(baseUrl); + const key = this.endpointKey(normalized); + const now = Date.now(); + + const cached = this.endpointPreflightCache.get(key); + if (cached && now - cached.checkedAt < this.endpointPreflightTtlMs) { + return cached.ok; + } + + // Some callers set baseUrl to ".../v1". Health endpoints live on the root. + const healthBase = normalized.replace(/\/v1$/, ''); + const url = `${healthBase}/health/readiness`; + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.endpointPreflightTimeoutMs); + + const abortFromUpstream = () => controller.abort(); + signal?.addEventListener('abort', abortFromUpstream, { once: true }); + + try { + const response = await fetch(url, { + method: 'GET', + headers: this.proxyApiKey + ? { Authorization: `Bearer ${this.proxyApiKey}` } + : undefined, + signal: controller.signal, + }); + const ok = response.ok; + this.endpointPreflightCache.set(key, { ok, checkedAt: now }); + return ok; + } catch { + this.endpointPreflightCache.set(key, { ok: false, checkedAt: now }); + return false; + } finally { + clearTimeout(timeoutId); + signal?.removeEventListener('abort', abortFromUpstream); + } + } + + private sanitizeChatMessages( + messages: ChatCompletionMessageParam[], + ): ChatCompletionMessageParam[] { + const sanitized: ChatCompletionMessageParam[] = []; + const seenToolCallIds = new Set(); + + for (const message of messages) { + if (message.role === 'assistant') { + const maybeToolCalls = (message as any).tool_calls as unknown; + const toolCalls = Array.isArray(maybeToolCalls) ? maybeToolCalls : []; + + for (const toolCall of toolCalls) { + if (toolCall && typeof toolCall.id === 'string' && toolCall.id.trim() !== '') { + seenToolCallIds.add(toolCall.id); + } + } + + const content = (message as any).content as unknown; + const hasContent = + typeof content === 'string' + ? content.trim().length > 0 + : Array.isArray(content) + ? content.length > 0 + : content != null; + const hasToolCalls = toolCalls.length > 0; + + if (!hasContent && !hasToolCalls) continue; + sanitized.push(message); + continue; + } + + if (message.role === 'tool') { + const toolCallId = (message as any).tool_call_id as unknown; + if ( + typeof toolCallId === 'string' && + toolCallId.trim() !== '' && + !seenToolCallIds.has(toolCallId) + ) { + continue; + } + sanitized.push(message); + continue; + } + + sanitized.push(message); + } + + return sanitized; + } + + private coalesceAssistantMessages( + messages: ChatCompletionMessageParam[], + ): ChatCompletionMessageParam[] { + const coalesced: ChatCompletionMessageParam[] = []; + + for (const message of messages) { + const previous = coalesced[coalesced.length - 1]; + if (message.role !== 'assistant' || previous?.role !== 'assistant') { + coalesced.push(message); + continue; + } + + const prev = previous as any; + const next = message as any; + + const prevToolCalls = Array.isArray(prev.tool_calls) ? prev.tool_calls : []; + const nextToolCalls = Array.isArray(next.tool_calls) ? next.tool_calls : []; + const toolCalls = [...prevToolCalls, ...nextToolCalls]; + + const prevContent = prev.content; + const nextContent = next.content; + + const mergedContent = this.mergeAssistantContent(prevContent, nextContent); + + coalesced[coalesced.length - 1] = { + ...previous, + ...message, + ...(mergedContent != null && { content: mergedContent }), + ...(toolCalls.length > 0 && { tool_calls: toolCalls }), + }; + } + + return coalesced; + } + + private mergeAssistantContent( + left: unknown, + right: unknown, + ): string | null { + const leftValue = this.normalizeAssistantContent(left); + const rightValue = this.normalizeAssistantContent(right); + + if (leftValue == null) return rightValue; + if (rightValue == null) return leftValue; + + const leftTrimmed = leftValue.trim(); + const rightTrimmed = rightValue.trim(); + if (!leftTrimmed) return rightValue; + if (!rightTrimmed) return leftValue; + return `${leftValue}\n\n${rightValue}`; + } + + private normalizeAssistantContent( + value: unknown, + ): string | null { + if (value == null) return null; + if (typeof value === 'string') return value; + if (Array.isArray(value)) { + const parts = value + .map((part: any) => { + if (!part || typeof part !== 'object') return null; + if (part.type === 'text' && typeof part.text === 'string') return part.text; + if (typeof part.refusal === 'string') return part.refusal; + return null; + }) + .filter((text): text is string => typeof text === 'string'); + + const combined = parts.join('\n').trim(); + return combined ? combined : null; + } + return null; + } + + private isDesktopVisionRequestedModel(model: string): boolean { + const trimmed = (model || '').trim(); + return ( + trimmed === 'desktop-vision' || + trimmed === 'claude-sonnet-4-5' || + trimmed === 'claude-opus-4-5' || + trimmed === 'qwen3-vl-32b' || + trimmed.endsWith('/qwen3-vl-32b') || + trimmed.endsWith('/claude-sonnet-4-5-20250929') || + trimmed.endsWith('/claude-opus-4-5-20251101') + ); + } + + private getProxyEndpointsForModel(model: string): string[] { + // Scoped stop-the-bleed: desktop-vision can use a different endpoint ordering (e.g., global-first). + if (this.isDesktopVisionRequestedModel(model)) return this.desktopVisionProxyEndpoints; + return this.defaultProxyEndpoints; + } + + private endpointKey(baseUrl: string): string { + try { + const url = new URL(baseUrl); + return `litellm:${url.host}`; + } catch { + return `litellm:${baseUrl}`; + } + } + + protected createOpenAIClient(baseURL: string): OpenAI { + // Use proxy auth key when required (e.g., LiteLLM master_key enabled). + // Keep dummy fallback for backwards compatibility with unauthenticated proxies. + return new OpenAI({ + apiKey: this.proxyApiKey || 'dummy-key-for-proxy', + baseURL, + // v2.5.0: Increase default timeout to allow for retry logic + timeout: 120000, // 2 minutes (increased from default 10 minutes) }); } + private getOpenAIClient(baseUrl: string): OpenAI { + const normalized = this.normalizeBaseUrl(baseUrl); + const existing = this.openaiClientsByBaseUrl.get(normalized); + if (existing) return existing; + const client = this.createOpenAIClient(normalized); + this.openaiClientsByBaseUrl.set(normalized, client); + return client; + } + + /** + * Check if a model is an OpenAI o-series reasoning model + * These models support the reasoning_effort parameter + */ + private isReasoningModel(model: string): boolean { + // Extract model name from potential namespace (e.g., "openai/o3-..." -> "o3-...") + const modelName = model.includes('/') ? model.split('/').pop() || model : model; + // O-series models start with 'o' followed by a digit (o1, o3, etc.) + return /^o\d/.test(modelName); + } + /** * Main method to generate messages using the Chat Completions API + * + * v2.5.0: Now includes retry logic with exponential backoff + * - 5 retries for transient failures (timeout, rate limit, server errors) + * - Circuit breaker to prevent cascading failures + * - Abort signal still respected for user-initiated cancellation */ async generateMessage( systemPrompt: string, messages: Message[], model: string, - useTools: boolean = true, - signal?: AbortSignal, + options: BytebotAgentGenerateMessageOptions = {}, ): Promise { + const useTools = options.useTools ?? true; + const signal = options.signal; + // Convert messages to Chat Completion format - const chatMessages = this.formatMessagesForChatCompletion( - systemPrompt, - messages, + const chatMessages = this.coalesceAssistantMessages( + this.sanitizeChatMessages( + this.formatMessagesForChatCompletion(systemPrompt, messages, model), + ), ); - try { - // Prepare the Chat Completion request - const completionRequest: OpenAI.Chat.ChatCompletionCreateParams = { - model, - messages: chatMessages, - max_tokens: 8192, - ...(useTools && { tools: proxyTools }), - reasoning_effort: 'high', - }; - // Make the API call - const completion = await this.openai.chat.completions.create( - completionRequest, - { signal }, - ); + // Determine if model supports reasoning_effort parameter + // Only OpenAI o-series models (o1, o3, etc.) support this + // Anthropic models use 'thinking' parameter instead (disabled via anthropic.service.ts) + const isReasoning = this.isReasoningModel(model); + + // Prepare the Chat Completion request + const tools = useTools + ? filterToolsByPolicy( + proxyTools, + (tool) => tool.function.name, + options.toolPolicy, + ) + : []; + + const completionRequest: OpenAI.Chat.ChatCompletionCreateParams & { + cache?: { 'no-cache': boolean }; + } = { + model, + messages: chatMessages, + max_tokens: 8192, + ...(tools.length > 0 && { tools }), + // Only include reasoning_effort for o-series models + // Non-reasoning models (Claude, GPT-4, etc.) don't support this parameter + ...(isReasoning && { reasoning_effort: 'high' }), + }; + + // Desktop/Vision requests must be deterministic and must not be served from semantic cache. + // This prevents a cached fallback (e.g., gpt-4o-mini) from masking a healthy qwen3-vl-32b backend. + if (this.isDesktopVisionRequestedModel(model)) { + completionRequest.cache = { 'no-cache': true }; + } + + const endpoints = this.getProxyEndpointsForModel(model); + const attemptedEndpointKeys: string[] = []; + let lastFailure: { errorType: string; errorMessage: string; attempts: number; durationMs: number } | null = null; + + for (let i = 0; i < endpoints.length; i++) { + const baseUrl = endpoints[i]; + const key = this.endpointKey(baseUrl); + attemptedEndpointKeys.push(key); - // Process the response - const choice = completion.choices[0]; - if (!choice || !choice.message) { - throw new Error('No valid response from Chat Completion API'); + const callStart = Date.now(); + const hasFallbackEndpoint = i < endpoints.length - 1; + + // Fast endpoint preflight to avoid OS-level TCP connect hangs (50s+ blackholes). + // If the endpoint is not reachable/readiness-failing, fail over immediately. + if (endpoints.length > 1) { + const ok = await this.preflightEndpoint(baseUrl, signal); + if (!ok) { + const errorType = LLMErrorType.NETWORK; + const errorMessage = `Endpoint preflight failed for ${key}`; + + lastFailure = { + errorType, + errorMessage, + attempts: 0, + durationMs: Date.now() - callStart, + }; + + this.llmResilienceService.openCircuit(key, { + type: errorType, + message: errorMessage, + retryable: true, + originalError: new Error(errorMessage), + }); + + if (hasFallbackEndpoint) { + const nextKey = this.endpointKey(endpoints[i + 1]); + this.eventEmitter.emit('llm.endpoint.failover', { + fromEndpoint: key, + toEndpoint: nextKey, + reason: errorType, + requestedModel: model, + }); + this.logger.warn( + `LLM endpoint preflight failed (${key}); failing over to ${nextKey}`, + ); + continue; + } + + break; + } } - // Convert response to MessageContentBlocks - const contentBlocks = this.formatChatCompletionResponse(choice.message); + const openai = this.getOpenAIClient(baseUrl); - return { - contentBlocks, - tokenUsage: { - inputTokens: completion.usage?.prompt_tokens || 0, - outputTokens: completion.usage?.completion_tokens || 0, - totalTokens: completion.usage?.total_tokens || 0, + // v2.5.0+: Wrap API call with retry logic (maxRetries=0 per-endpoint; failover happens at the endpoint layer). + const result = await this.llmResilienceService.executeWithRetry( + async () => { + // Check if aborted before making the call + if (signal?.aborted) { + throw new APIUserAbortError(); + } + + return await openai.chat.completions.create(completionRequest, { signal }); }, - }; - } catch (error: any) { - if (error instanceof APIUserAbortError) { - this.logger.log('Chat Completion API call aborted'); + key, + // Connection errors are INFRA: fail fast and fail over to next endpoint when configured. + { maxRetries: 0 }, + ); + + // Handle abort separately (not a retryable error) + if (!result.success && result.error?.originalError instanceof APIUserAbortError) { + this.logger.log('Chat Completion API call aborted by user'); throw new BytebotAgentInterrupt(); } - this.logger.error( - `Error sending message to proxy: ${error.message}`, - error.stack, - ); - throw error; + if (result.success) { + const completion = result.result!; + this.eventEmitter.emit('llm.endpoint.call', { + endpoint: key, + requestedModel: model, + durationMs: Date.now() - callStart, + }); + + // Emit failover event when LiteLLM routed to a different underlying model. + // This is expected when model-group fallbacks are configured. + if (completion.model && completion.model !== model) { + this.eventEmitter.emit('llm.failover', { + endpoint: key, + requestedModel: model, + usedModel: completion.model, + }); + } + + // Process the response + try { + const choice = completion.choices?.[0]; + if (!choice || !choice.message) { + throw new Error('No valid message in Chat Completion response'); + } + + // Convert response to MessageContentBlocks + const contentBlocks = this.formatChatCompletionResponse(choice.message); + if (contentBlocks.length === 0) { + throw new Error('Chat Completion response contained no usable content blocks'); + } + + return { + contentBlocks, + tokenUsage: { + inputTokens: completion.usage?.prompt_tokens || 0, + outputTokens: completion.usage?.completion_tokens || 0, + totalTokens: completion.usage?.total_tokens || 0, + }, + }; + } catch (error: any) { + const errorType = LLMErrorType.SERVER_ERROR; + const errorMessage = error?.message || 'Invalid Chat Completion response'; + + lastFailure = { + errorType, + errorMessage, + attempts: result.attempts, + durationMs: result.totalDurationMs, + }; + + const hasNextEndpoint = i < endpoints.length - 1; + if (hasNextEndpoint) { + // Treat invalid/empty responses as INFRA: open circuit and fail over to next endpoint. + this.llmResilienceService.openCircuit(key, { + type: errorType, + message: errorMessage, + retryable: true, + originalError: error instanceof Error ? error : undefined, + }); + + const nextKey = this.endpointKey(endpoints[i + 1]); + this.eventEmitter.emit('llm.endpoint.failover', { + fromEndpoint: key, + toEndpoint: nextKey, + reason: errorType, + requestedModel: model, + }); + + this.logger.warn( + `LLM endpoint returned invalid response [${errorType}] (${key}); failing over to ${nextKey}`, + ); + continue; + } + + // No fallback endpoint available + break; + } + } + + const errorType = result.error?.type || 'UNKNOWN'; + const errorMessage = result.error?.message || 'Unknown error'; + + lastFailure = { + errorType, + errorMessage, + attempts: result.attempts, + durationMs: result.totalDurationMs, + }; + + // Endpoint-level failover decision: only for infrastructure failures. + const isInfra = + errorType === LLMErrorType.NETWORK || + errorType === LLMErrorType.TIMEOUT || + errorType === LLMErrorType.SERVER_ERROR || + errorType === LLMErrorType.OVERLOADED || + errorType === LLMErrorType.RATE_LIMIT; + + const hasNextEndpoint = i < endpoints.length - 1; + if (isInfra && hasNextEndpoint) { + // Open circuit immediately to prevent hammering a broken gateway. + this.llmResilienceService.openCircuit(key, result.error || undefined); + + const nextKey = this.endpointKey(endpoints[i + 1]); + this.eventEmitter.emit('llm.endpoint.failover', { + fromEndpoint: key, + toEndpoint: nextKey, + reason: errorType, + requestedModel: model, + }); + + this.logger.warn( + `LLM endpoint failed [${errorType}] (${key}); failing over to ${nextKey}`, + ); + continue; + } + + // Non-infra failure (or no fallback endpoint): stop here. + break; } + + const errorType = lastFailure?.errorType || 'UNKNOWN'; + const errorMessage = lastFailure?.errorMessage || 'Unknown error'; + const attempts = lastFailure?.attempts || 0; + const durationMs = lastFailure?.durationMs || 0; + + this.logger.error( + `LLM call failed after ${attempts} attempts: [${errorType}] ${errorMessage}`, + ); + + const error = new Error(`LLM API error after ${attempts} attempts: ${errorMessage}`); + (error as any).llmErrorType = errorType; + (error as any).attempts = attempts; + (error as any).durationMs = durationMs; + (error as any).attemptedEndpoints = attemptedEndpointKeys; + throw error; } /** @@ -114,9 +654,15 @@ export class ProxyService implements BytebotAgentService { private formatMessagesForChatCompletion( systemPrompt: string, messages: Message[], + requestedModel: string, ): ChatCompletionMessageParam[] { const chatMessages: ChatCompletionMessageParam[] = []; + const maxImages = this.isDesktopVisionRequestedModel(requestedModel) + ? this.desktopVisionMaxImageBlocks + : this.defaultMaxImageBlocks; + const includedImageRefs = this.getIncludedImageRefs(messages, maxImages); + // Add system message chatMessages.push({ role: 'system', @@ -146,6 +692,7 @@ export class ProxyService implements BytebotAgentService { )}`, }); } else if (isImageContentBlock(block)) { + if (!includedImageRefs.has(block)) continue; chatMessages.push({ role: 'user', content: [ @@ -172,6 +719,7 @@ export class ProxyService implements BytebotAgentService { } case MessageContentType.Image: { const imageBlock = block as ImageContentBlock; + if (!includedImageRefs.has(imageBlock)) break; chatMessages.push({ role: 'user', content: [ @@ -204,13 +752,8 @@ export class ProxyService implements BytebotAgentService { break; } case MessageContentType.Thinking: { - const thinkingBlock = block as ThinkingContentBlock; - const message: ChatCompletionMessageParam = { - role: 'assistant', - content: null, - }; - message['reasoning_content'] = thinkingBlock.thinking; - chatMessages.push(message); + // Do not replay thinking-only blocks back into the LLM context. + // Some OpenAI-compatible servers reject assistant messages without `content` or `tool_calls`. break; } case MessageContentType.ToolResult: { @@ -238,6 +781,7 @@ export class ProxyService implements BytebotAgentService { } if (content.type === MessageContentType.Image) { + if (!includedImageRefs.has(content)) return; chatMessages.push({ role: 'user', content: [ @@ -266,6 +810,47 @@ export class ProxyService implements BytebotAgentService { return chatMessages; } + private getIncludedImageRefs(messages: Message[], maxImages: number): Set { + if (maxImages <= 0) return new Set(); + + const imageRefs: unknown[] = []; + for (const message of messages) { + const messageContentBlocks = message.content as MessageContentBlock[]; + + if (messageContentBlocks.every((block) => isUserActionContentBlock(block))) { + const userActionBlocks = messageContentBlocks.flatMap((block) => block.content); + for (const block of userActionBlocks) { + if (isImageContentBlock(block)) { + imageRefs.push(block); + } + } + continue; + } + + for (const block of messageContentBlocks) { + if (block.type === MessageContentType.Image) { + imageRefs.push(block); + continue; + } + + if (block.type === MessageContentType.ToolResult) { + const toolResultBlock = block as ToolResultContentBlock; + for (const content of toolResultBlock.content) { + if (content.type === MessageContentType.Image) { + imageRefs.push(content); + } + } + } + } + } + + const include = new Set(); + for (let i = imageRefs.length - 1; i >= 0 && include.size < maxImages; i--) { + include.add(imageRefs[i]); + } + return include; + } + /** * Convert Chat Completion response to MessageContentBlocks */ @@ -282,14 +867,6 @@ export class ProxyService implements BytebotAgentService { } as TextContentBlock); } - if (message['reasoning_content']) { - contentBlocks.push({ - type: MessageContentType.Thinking, - thinking: message['reasoning_content'], - signature: message['reasoning_content'], - } as ThinkingContentBlock); - } - // Handle tool calls if (message.tool_calls && message.tool_calls.length > 0) { for (const toolCall of message.tool_calls) { diff --git a/packages/bytebot-agent/src/task-controller/task-controller.module.ts b/packages/bytebot-agent/src/task-controller/task-controller.module.ts new file mode 100644 index 000000000..7871830e9 --- /dev/null +++ b/packages/bytebot-agent/src/task-controller/task-controller.module.ts @@ -0,0 +1,20 @@ +/** + * Task Controller Client Module + * Phase 6.4: Agent Integration + * + * Provides communication with the Task Controller service for: + * - Per-task credential fetching + * - Desktop URL discovery + * - Heartbeat reporting + */ + +import { Module } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { TaskControllerService } from './task-controller.service'; + +@Module({ + imports: [ConfigModule], + providers: [TaskControllerService], + exports: [TaskControllerService], +}) +export class TaskControllerModule {} diff --git a/packages/bytebot-agent/src/task-controller/task-controller.service.ts b/packages/bytebot-agent/src/task-controller/task-controller.service.ts new file mode 100644 index 000000000..0b73e497f --- /dev/null +++ b/packages/bytebot-agent/src/task-controller/task-controller.service.ts @@ -0,0 +1,747 @@ +/** + * Task Controller Client Service + * Phase 6.4: Agent Integration + * + * Fetches per-task credentials and desktop URLs from the Task Controller. + * Replaces global BYTEBOT_DESKTOP_BASE_URL with dynamic per-task URLs. + */ + +import { Injectable, Logger, OnModuleDestroy } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; + +/** + * Task info returned from the task controller + */ +export interface TaskInfo { + taskId: string; + tenantId: string; + phase: string; + desktopEndpoint: string | null; + vncEndpoint: string | null; + routerUrl: string; + credentials: { + apiToken: string; + vncPassword: string; + expiresAt: string; + } | null; + timeoutAt: string | null; + startedAt: string | null; +} + +export type DesktopTakeoverNotAvailableReason = + | 'LEASE_EXPIRED' + | 'DESKTOP_TIMED_OUT'; + +export type DesktopTakeoverAvailability = + | { ok: true; desktopUrl: string; timeoutAt: string | null } + | { + ok: false; + reason: DesktopTakeoverNotAvailableReason; + details?: Record; + }; + +/** + * Heartbeat response from the task controller + * + * v2.2.1: Added `ready`, `phase`, and `message` fields to support proper + * wait-for-desktop mechanism. The agent should wait until `ready=true` + * before attempting to execute actions. + * + * - `shouldContinue`: true if agent should stay alive (false for terminal phases) + * - `ready`: true only when desktop is ready for execution (phase=Running) + * - `phase`: current TaskDesktop phase for debugging + * - `message`: human-readable status message + * + * v2.2.11: Added `estimatedWaitTime` for WaitingForCapacity phase to dynamically + * extend agent timeout when waiting for overflow pool capacity. + */ +export interface HeartbeatResponse { + acknowledged: boolean; + shouldContinue: boolean; + ready: boolean; // v2.2.1: true only when desktop is ready for execution + phase: string; // v2.2.1: current phase for debugging + message: string; // v2.2.1: human-readable status message + timeRemaining: number; // seconds + warningThreshold: boolean; // true if <5 min remaining + estimatedWaitTime?: number; // v2.2.11: seconds remaining in capacity wait (only for WaitingForCapacity) +} + +/** + * Cached task info with expiry + */ +interface CachedTaskInfo { + info: TaskInfo; + fetchedAt: number; + expiresAt: number; +} + +@Injectable() +export class TaskControllerService implements OnModuleDestroy { + private readonly logger = new Logger(TaskControllerService.name); + private readonly controllerUrl: string; + private readonly fallbackDesktopUrl: string; + private readonly cacheTtlMs: number = 30000; // 30 seconds cache + private readonly heartbeatIntervalMs: number; + private readonly taskCache: Map = new Map(); + private heartbeatIntervals: Map = new Map(); + // v2.2.22: 404 grace window for transient task creation delays + private readonly notFound404GraceMs: number = 30000; // 30 seconds grace for initial 404s + private readonly task404GraceStart: Map = new Map(); // Track when 404 grace started per task + + constructor(private readonly configService: ConfigService) { + // Task controller URL - falls back to empty if not configured (Phase 6 not deployed) + this.controllerUrl = this.configService.get( + 'TASK_CONTROLLER_URL', + '', + ); + + // Fallback to legacy desktop URL if task controller not available + this.fallbackDesktopUrl = this.configService.get( + 'BYTEBOT_DESKTOP_BASE_URL', + 'http://bytebot-desktop:9990', + ); + + // Heartbeat interval (default 15 seconds) + this.heartbeatIntervalMs = parseInt( + this.configService.get('HEARTBEAT_INTERVAL_MS', '15000'), + 10, + ); + + if (!this.controllerUrl) { + this.logger.warn( + 'TASK_CONTROLLER_URL not set - using fallback desktop URL for all tasks (Phase 6 compatibility mode)', + ); + } else { + this.logger.log(`Task Controller URL: ${this.controllerUrl}`); + } + } + + onModuleDestroy() { + // Clear all heartbeat intervals + for (const [taskId, interval] of this.heartbeatIntervals) { + clearInterval(interval); + this.logger.debug(`Cleared heartbeat interval for task ${taskId}`); + } + this.heartbeatIntervals.clear(); + this.taskCache.clear(); + this.task404GraceStart.clear(); // v2.2.22 + } + + /** + * Check if Phase 6 task controller is available + */ + isPhase6Enabled(): boolean { + return !!this.controllerUrl; + } + + /** + * Get task info including desktop URL and credentials + * Uses caching to reduce API calls + */ + async getTaskInfo(taskId: string): Promise { + // Check cache first + const cached = this.taskCache.get(taskId); + if (cached && Date.now() < cached.expiresAt) { + this.logger.debug(`Using cached task info for ${taskId}`); + return cached.info; + } + + // If Phase 6 not enabled, return fallback info + if (!this.isPhase6Enabled()) { + return this.getFallbackTaskInfo(taskId); + } + + try { + const url = `${this.controllerUrl}/api/v1/tasks/${taskId}`; + this.logger.debug(`Fetching task info from ${url}`); + + const response = await fetch(url, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + if (response.status === 404) { + this.logger.warn(`Task ${taskId} not found in controller`); + // Fall back to legacy mode + return this.getFallbackTaskInfo(taskId); + } + throw new Error(`Task controller returned ${response.status}`); + } + + const taskInfo: TaskInfo = await response.json(); + + // Cache the result + const expiresAt = Date.now() + this.cacheTtlMs; + this.taskCache.set(taskId, { + info: taskInfo, + fetchedAt: Date.now(), + expiresAt, + }); + + this.logger.log( + `Fetched task info for ${taskId}: phase=${taskInfo.phase}, desktop=${taskInfo.desktopEndpoint}`, + ); + + return taskInfo; + } catch (error: any) { + this.logger.error( + `Failed to fetch task info for ${taskId}: ${error.message}`, + ); + // Fall back to legacy mode on error + return this.getFallbackTaskInfo(taskId); + } + } + + /** + * Strictly determines whether a desktop takeover is available. + * + * Important: When Phase 6 is enabled, we must not fall back to legacy mode + * on 404 or controller errors, otherwise we can incorrectly offer takeover + * for a desktop session that has already expired. + * + * This method is intentionally fail-closed: if we cannot verify the session, + * we treat takeover as not available and avoid mutating task state. + */ + async getDesktopTakeoverAvailability( + taskId: string, + ): Promise { + if (!this.isPhase6Enabled()) { + return { ok: true, desktopUrl: this.fallbackDesktopUrl, timeoutAt: null }; + } + + try { + const url = `${this.controllerUrl}/api/v1/tasks/${taskId}`; + this.logger.debug(`Checking takeover availability via ${url}`); + + const response = await fetch(url, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + if (response.status === 404) { + return { + ok: false, + reason: 'LEASE_EXPIRED', + details: { status: 404 }, + }; + } + + return { + ok: false, + reason: 'LEASE_EXPIRED', + details: { status: response.status }, + }; + } + + const taskInfo: TaskInfo = await response.json(); + + if ( + typeof taskInfo.timeoutAt === 'string' && + taskInfo.timeoutAt.trim().length > 0 + ) { + const timeoutMs = Date.parse(taskInfo.timeoutAt); + if (!Number.isFinite(timeoutMs)) { + this.logger.warn( + `Task ${taskId} returned an invalid timeoutAt; failing takeover closed: ${taskInfo.timeoutAt}`, + ); + return { + ok: false, + reason: 'LEASE_EXPIRED', + details: { + timeoutAt: taskInfo.timeoutAt, + phase: taskInfo.phase, + invalid: true, + }, + }; + } + + if (timeoutMs <= Date.now()) { + return { + ok: false, + reason: 'DESKTOP_TIMED_OUT', + details: { timeoutAt: taskInfo.timeoutAt, phase: taskInfo.phase }, + }; + } + } + + if (!taskInfo.desktopEndpoint) { + return { + ok: false, + reason: 'LEASE_EXPIRED', + details: { phase: taskInfo.phase }, + }; + } + + return { + ok: true, + desktopUrl: taskInfo.desktopEndpoint, + timeoutAt: taskInfo.timeoutAt ?? null, + }; + } catch (error: any) { + this.logger.error( + `Failed to check takeover availability for ${taskId}: ${error.message}`, + ); + return { + ok: false, + reason: 'LEASE_EXPIRED', + details: { error: error.message }, + }; + } + } + + /** + * Get desktop URL for a task + * Primary method for agent to get the correct desktop endpoint + * + * v2.2.1: This method now only returns the URL if the desktop is ready. + * For waiting until ready, use `waitForDesktop()` instead. + */ + async getDesktopUrl(taskId: string): Promise { + const taskInfo = await this.getTaskInfo(taskId); + + if (taskInfo?.desktopEndpoint) { + return taskInfo.desktopEndpoint; + } + + // v2.2.1: Only fall back to legacy URL if Phase 6 is not enabled + // When Phase 6 is enabled, we should wait for the desktop to be ready + if (!this.isPhase6Enabled()) { + this.logger.debug( + `No desktop endpoint for ${taskId}, using fallback: ${this.fallbackDesktopUrl}`, + ); + return this.fallbackDesktopUrl; + } + + // Phase 6 is enabled but no desktop endpoint yet - this shouldn't happen + // after calling waitForDesktop(), but log a warning + this.logger.warn( + `Phase 6 enabled but no desktop endpoint for ${taskId} - task may not be ready`, + ); + throw new Error(`Desktop not ready for task ${taskId}`); + } + + /** + * Wait for desktop to be ready for a task + * + * v2.2.1: New method that polls the heartbeat endpoint until the desktop + * is ready (phase=Running). Uses exponential backoff with jitter. + * + * v2.2.11: Enhanced to dynamically extend timeout when in WaitingForCapacity + * phase. This prevents premature timeouts when waiting for overflow pool + * capacity to be provisioned (which can take 30-90 seconds). + * + * @param taskId - The task ID to wait for + * @param options - Wait options + * @returns The desktop URL once ready + * @throws Error if task times out, is cancelled, or enters a terminal state + */ + async waitForDesktop( + taskId: string, + options: { + timeoutMs?: number; + initialDelayMs?: number; + maxDelayMs?: number; + capacityWaitExtensionMs?: number; // v2.2.11: Additional time to wait during capacity wait + } = {}, + ): Promise { + const { + timeoutMs = 60000, // 60 second default timeout + initialDelayMs = 500, // Start with 500ms delay + maxDelayMs = 5000, // Cap at 5 second delay + capacityWaitExtensionMs = 180000, // v2.2.11: 3 minute extension for capacity wait + } = options; + + // In legacy mode, desktop is always ready + if (!this.isPhase6Enabled()) { + this.logger.debug( + `waitForDesktop: Legacy mode, returning fallback URL immediately`, + ); + return this.fallbackDesktopUrl; + } + + const startTime = Date.now(); + let currentDelay = initialDelayMs; + let attempts = 0; + // v2.2.11: Track effective timeout which can be extended during capacity wait + let effectiveTimeoutMs = timeoutMs; + let capacityWaitLogged = false; + + this.logger.log( + `waitForDesktop: Waiting for desktop to be ready for task ${taskId} (initial timeout: ${timeoutMs}ms)`, + ); + + while (Date.now() - startTime < effectiveTimeoutMs) { + attempts++; + + // Post heartbeat with 'waiting' status + const heartbeat = await this.postHeartbeat(taskId, 'waiting_for_desktop'); + + // Check if we should stop waiting + if (!heartbeat.shouldContinue) { + this.logger.error( + `waitForDesktop: Task ${taskId} signaled to stop: phase=${heartbeat.phase}, message=${heartbeat.message}`, + ); + throw new Error( + `Task ${taskId} terminated while waiting for desktop: ${heartbeat.phase} - ${heartbeat.message}`, + ); + } + + // v2.2.11: If in WaitingForCapacity phase, extend timeout to accommodate capacity provisioning + if (heartbeat.phase === 'WaitingForCapacity') { + // Calculate new effective timeout based on server-provided estimatedWaitTime or extension + if ( + heartbeat.estimatedWaitTime !== undefined && + heartbeat.estimatedWaitTime > 0 + ) { + // Server knows how long capacity wait will last + const serverSuggestedTimeout = + Date.now() - startTime + heartbeat.estimatedWaitTime * 1000 + 30000; // +30s buffer + if (serverSuggestedTimeout > effectiveTimeoutMs) { + effectiveTimeoutMs = serverSuggestedTimeout; + if (!capacityWaitLogged) { + this.logger.log( + `waitForDesktop: Task ${taskId} in WaitingForCapacity phase, extended timeout to ${Math.round(effectiveTimeoutMs / 1000)}s ` + + `(server estimated wait: ${heartbeat.estimatedWaitTime}s)`, + ); + capacityWaitLogged = true; + } + } + } else { + // Use default extension + const extendedTimeout = + Date.now() - startTime + capacityWaitExtensionMs; + if (extendedTimeout > effectiveTimeoutMs) { + effectiveTimeoutMs = extendedTimeout; + if (!capacityWaitLogged) { + this.logger.log( + `waitForDesktop: Task ${taskId} in WaitingForCapacity phase, extended timeout to ${Math.round(effectiveTimeoutMs / 1000)}s`, + ); + capacityWaitLogged = true; + } + } + } + } + + // Check if desktop is ready + if (heartbeat.ready) { + this.logger.log( + `waitForDesktop: Desktop ready for task ${taskId} after ${attempts} attempts (${Date.now() - startTime}ms)`, + ); + + // Invalidate cache to get fresh task info with desktop endpoint + this.invalidateCache(taskId); + + // Get the desktop URL now that it's ready + const taskInfo = await this.getTaskInfo(taskId); + if (taskInfo?.desktopEndpoint) { + return taskInfo.desktopEndpoint; + } + + // Desktop is ready but no endpoint - shouldn't happen, throw error + throw new Error( + `Desktop ready but no endpoint available for task ${taskId}`, + ); + } + + // Log waiting status + this.logger.debug( + `waitForDesktop: Task ${taskId} not ready (attempt ${attempts}): phase=${heartbeat.phase}, message=${heartbeat.message}` + + (heartbeat.estimatedWaitTime !== undefined + ? `, estimatedWait=${heartbeat.estimatedWaitTime}s` + : ''), + ); + + // Wait before next attempt with exponential backoff + jitter + const jitter = Math.random() * 200; // 0-200ms jitter + await this.sleep(Math.min(currentDelay + jitter, maxDelayMs)); + currentDelay = Math.min(currentDelay * 1.5, maxDelayMs); + } + + // Timeout reached + const elapsed = Date.now() - startTime; + this.logger.error( + `waitForDesktop: Timeout waiting for desktop for task ${taskId} after ${attempts} attempts (${elapsed}ms)`, + ); + throw new Error( + `Timeout waiting for desktop for task ${taskId} after ${elapsed}ms`, + ); + } + + /** + * Helper to sleep for a given number of milliseconds + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } + + /** + * Get task credentials + */ + async getCredentials( + taskId: string, + ): Promise<{ apiToken: string; vncPassword: string } | null> { + const taskInfo = await this.getTaskInfo(taskId); + return taskInfo?.credentials || null; + } + + /** + * Post heartbeat to task controller + * Returns whether the task should continue processing + * + * v2.2.1: Now includes `ready` field to indicate when desktop is ready + * for execution. Agent should check `ready` before executing actions. + */ + async postHeartbeat( + taskId: string, + status?: string, + currentStep?: string, + ): Promise { + if (!this.isPhase6Enabled()) { + // In legacy mode, always continue and always ready + return { + acknowledged: true, + shouldContinue: true, + ready: true, + phase: 'Running', + message: 'Legacy mode - no task controller', + timeRemaining: 3600, + warningThreshold: false, + }; + } + + try { + const url = `${this.controllerUrl}/api/v1/tasks/${taskId}/heartbeat`; + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + taskId, + status, + currentStep, + }), + }); + + if (!response.ok) { + // v2.2.22: Handle 404 with grace window for transient task creation delays + // This prevents race conditions where heartbeat arrives before TaskDesktop CR is created + if (response.status === 404) { + const now = Date.now(); + const graceStart = this.task404GraceStart.get(taskId); + + if (!graceStart) { + // First 404 for this task - start grace window + this.task404GraceStart.set(taskId, now); + this.logger.warn( + `Heartbeat 404 for ${taskId}: task not found, starting ${this.notFound404GraceMs}ms grace window`, + ); + // Return shouldContinue: true during grace to allow task controller to create CR + return { + acknowledged: false, + shouldContinue: true, + ready: false, + phase: 'Initializing', + message: `Task ${taskId} not found yet, waiting for creation (grace window started)`, + timeRemaining: Math.floor(this.notFound404GraceMs / 1000), + warningThreshold: false, + }; + } + + const elapsed = now - graceStart; + if (elapsed < this.notFound404GraceMs) { + // Still within grace window + const remaining = this.notFound404GraceMs - elapsed; + this.logger.debug( + `Heartbeat 404 for ${taskId}: still in grace window (${Math.round(remaining / 1000)}s remaining)`, + ); + return { + acknowledged: false, + shouldContinue: true, + ready: false, + phase: 'Initializing', + message: `Task ${taskId} not found yet, waiting for creation (${Math.round(remaining / 1000)}s grace remaining)`, + timeRemaining: Math.floor(remaining / 1000), + warningThreshold: false, + }; + } + + // Grace window expired - now treat as permanent failure + this.logger.error( + `Heartbeat 404 for ${taskId}: grace window expired after ${Math.round(elapsed / 1000)}s, stopping heartbeat`, + ); + this.task404GraceStart.delete(taskId); // Clean up + return { + acknowledged: false, + shouldContinue: false, // Signal to stop processing - task really doesn't exist + ready: false, + phase: 'NotFound', + message: `Task ${taskId} not found in task controller after ${Math.round(elapsed / 1000)}s grace window`, + timeRemaining: 0, + warningThreshold: false, + }; + } + + this.logger.warn(`Heartbeat failed for ${taskId}: ${response.status}`); + // Assume continue on other heartbeat failures, but not ready (unknown state) + return { + acknowledged: false, + shouldContinue: true, + ready: false, + phase: 'Unknown', + message: `Heartbeat failed with status ${response.status}`, + timeRemaining: 0, + warningThreshold: false, + }; + } + + // v2.2.22: Clear 404 grace window on successful response + if (this.task404GraceStart.has(taskId)) { + this.logger.log( + `Heartbeat succeeded for ${taskId}, clearing 404 grace window`, + ); + this.task404GraceStart.delete(taskId); + } + + const result = await response.json(); + + // v2.2.1: Handle backward compatibility with older task controllers + const heartbeatResponse: HeartbeatResponse = { + acknowledged: result.acknowledged ?? false, + shouldContinue: result.shouldContinue ?? true, + ready: result.ready ?? result.shouldContinue === true, // fallback for older controllers + phase: result.phase ?? 'Unknown', + message: result.message ?? '', + timeRemaining: result.timeRemaining ?? 0, + warningThreshold: result.warningThreshold ?? false, + }; + + if (heartbeatResponse.warningThreshold) { + this.logger.warn( + `Task ${taskId} approaching timeout: ${heartbeatResponse.timeRemaining}s remaining`, + ); + } + + if (!heartbeatResponse.shouldContinue) { + this.logger.log( + `Task ${taskId} signaled to stop: shouldContinue=false, phase=${heartbeatResponse.phase}`, + ); + } + + // v2.2.1: Log ready state for debugging + if (!heartbeatResponse.ready && heartbeatResponse.shouldContinue) { + this.logger.debug( + `Task ${taskId} not ready yet: phase=${heartbeatResponse.phase}, message=${heartbeatResponse.message}`, + ); + } + + return heartbeatResponse; + } catch (error: any) { + this.logger.error(`Heartbeat error for ${taskId}: ${error.message}`); + // Assume continue on error, but not ready + return { + acknowledged: false, + shouldContinue: true, + ready: false, + phase: 'Unknown', + message: `Heartbeat error: ${error.message}`, + timeRemaining: 0, + warningThreshold: false, + }; + } + } + + /** + * Start automatic heartbeat for a task + */ + startHeartbeat(taskId: string): void { + // Clear any existing heartbeat for this task + this.stopHeartbeat(taskId); + + if (!this.isPhase6Enabled()) { + this.logger.debug( + `Heartbeat not started for ${taskId} - Phase 6 not enabled`, + ); + return; + } + + this.logger.log( + `Starting heartbeat for ${taskId} every ${this.heartbeatIntervalMs}ms`, + ); + + const interval = setInterval(async () => { + try { + const result = await this.postHeartbeat(taskId, 'processing'); + + if (!result.shouldContinue) { + this.logger.warn(`Heartbeat indicates task ${taskId} should stop`); + this.stopHeartbeat(taskId); + } + } catch (error: any) { + this.logger.error( + `Heartbeat interval error for ${taskId}: ${error.message}`, + ); + } + }, this.heartbeatIntervalMs); + + this.heartbeatIntervals.set(taskId, interval); + + // Send initial heartbeat immediately + this.postHeartbeat(taskId, 'started').catch((error) => { + this.logger.error( + `Initial heartbeat failed for ${taskId}: ${error.message}`, + ); + }); + } + + /** + * Stop automatic heartbeat for a task + */ + stopHeartbeat(taskId: string): void { + const interval = this.heartbeatIntervals.get(taskId); + if (interval) { + clearInterval(interval); + this.heartbeatIntervals.delete(taskId); + this.logger.debug(`Stopped heartbeat for ${taskId}`); + } + } + + /** + * Invalidate cached task info + */ + invalidateCache(taskId: string): void { + this.taskCache.delete(taskId); + this.logger.debug(`Invalidated cache for ${taskId}`); + } + + /** + * Get router URL for a task (for VNC connections and action logging) + */ + async getRouterUrl(taskId: string): Promise { + const taskInfo = await this.getTaskInfo(taskId); + return taskInfo?.routerUrl || null; + } + + /** + * Fallback task info for legacy mode (Phase 6 not deployed) + */ + private getFallbackTaskInfo(taskId: string): TaskInfo { + return { + taskId, + tenantId: 'default', + phase: 'Running', + desktopEndpoint: this.fallbackDesktopUrl, + vncEndpoint: null, + routerUrl: '', + credentials: null, + timeoutAt: null, + startedAt: null, + }; + } +} diff --git a/packages/bytebot-agent/src/tasks/dto/create-task.dto.ts b/packages/bytebot-agent/src/tasks/dto/create-task.dto.ts index ad12e2282..2ad1482f6 100644 --- a/packages/bytebot-agent/src/tasks/dto/create-task.dto.ts +++ b/packages/bytebot-agent/src/tasks/dto/create-task.dto.ts @@ -1,14 +1,16 @@ import { IsArray, + IsBoolean, IsDate, IsNotEmpty, IsNumber, IsOptional, IsString, + IsEnum, ValidateNested, } from 'class-validator'; import { Type } from 'class-transformer'; -import { Role, TaskPriority, TaskType } from '@prisma/client'; +import { ExecutionSurface, Role, TaskPriority, TaskType } from '@prisma/client'; export class TaskFileDto { @IsNotEmpty() @@ -33,6 +35,11 @@ export class CreateTaskDto { @IsString() description: string; + // v2.2.16: Optional title - if not provided, will be AI-generated + @IsOptional() + @IsString() + title?: string; + @IsOptional() @IsString() type?: TaskType; @@ -57,4 +64,59 @@ export class CreateTaskDto { @ValidateNested({ each: true }) @Type(() => TaskFileDto) files?: TaskFileDto[]; + + // v2.3.0 M4: Workflow context (null for Product 1 Tasks, set for Product 2 Workflows) + // These are passed by the workflow orchestrator when dispatching a node run + + @IsOptional() + @IsString() + workspaceId?: string; + + @IsOptional() + @IsString() + nodeRunId?: string; + + // v2.3.0 M4: Tool configuration from workflow node definition + // Controls which tools are available for this task + + @IsOptional() + @IsArray() + @IsString({ each: true }) + allowedTools?: string[]; + + @IsOptional() + @IsBoolean() + gatewayToolsOnly?: boolean; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + highRiskTools?: string[]; + + // Phase 4: Execution surface constraint - task requires desktop environment + // If true, task MUST have a valid desktop pod before executing desktop tools + @IsOptional() + @IsBoolean() + requiresDesktop?: boolean; + + // PR5: Explicit execution surface (TEXT_ONLY vs DESKTOP) + @IsOptional() + @IsEnum(ExecutionSurface) + executionSurface?: ExecutionSurface; + + // v2.4.0: Context propagation for autonomous operation + // When task is part of a multi-step goal, these fields provide context + // to help the AI agent understand the broader context and proceed autonomously + + // Original goal/objective that this task is part of + // Helps agent understand the overall intent without asking for clarification + @IsOptional() + @IsString() + goalContext?: string; + + // Summary of what previous steps accomplished + // Provides continuity between steps in a multi-step goal + @IsOptional() + @IsString() + previousStepResults?: string; } diff --git a/packages/bytebot-agent/src/tasks/dto/update-task.dto.ts b/packages/bytebot-agent/src/tasks/dto/update-task.dto.ts index be988d773..163a7dbc4 100644 --- a/packages/bytebot-agent/src/tasks/dto/update-task.dto.ts +++ b/packages/bytebot-agent/src/tasks/dto/update-task.dto.ts @@ -1,4 +1,4 @@ -import { IsEnum, IsOptional } from 'class-validator'; +import { IsEnum, IsOptional, IsString } from 'class-validator'; import { TaskPriority, TaskStatus } from '@prisma/client'; export class UpdateTaskDto { @@ -18,4 +18,14 @@ export class UpdateTaskDto { @IsOptional() completedAt?: Date; + + // v2.2.5: Error message for failed tasks + @IsOptional() + @IsString() + error?: string; + + // v2.4.1: Task result/outcome data (JSON) + // Stores the outcome description from the AI agent when task completes + @IsOptional() + result?: Record; } diff --git a/packages/bytebot-agent/src/tasks/tasks.controller.ts b/packages/bytebot-agent/src/tasks/tasks.controller.ts index 982c4a4f1..884870f80 100644 --- a/packages/bytebot-agent/src/tasks/tasks.controller.ts +++ b/packages/bytebot-agent/src/tasks/tasks.controller.ts @@ -25,6 +25,8 @@ const anthropicApiKey = process.env.ANTHROPIC_API_KEY; const openaiApiKey = process.env.OPENAI_API_KEY; const proxyUrl = process.env.BYTEBOT_LLM_PROXY_URL; +// v2.2.14: Add API key for LiteLLM proxy authentication +const proxyApiKey = process.env.BYTEBOT_LLM_PROXY_API_KEY; const models = [ ...(anthropicApiKey ? ANTHROPIC_MODELS : []), @@ -66,18 +68,61 @@ export class TasksController { return this.tasksService.findAll(pageNum, limitNum, statusFilter); } + /** + * v2.2.17: Backfill AI-generated titles for existing tasks. + * + * This endpoint generates titles for all tasks that don't have one. + * It's idempotent - safe to call multiple times. + * + * Query parameters: + * - batchSize: Number of tasks to process per batch (default: 10) + * - delayMs: Milliseconds to wait between batches (default: 2000) + * - limit: Maximum number of tasks to process (default: 0 = all) + * - dryRun: If true, simulates without making changes (default: false) + */ + @Post('backfill-titles') + @HttpCode(HttpStatus.OK) + async backfillTitles( + @Query('batchSize') batchSize?: string, + @Query('delayMs') delayMs?: string, + @Query('limit') limit?: string, + @Query('dryRun') dryRun?: string, + ) { + const options = { + batchSize: batchSize ? parseInt(batchSize, 10) : 10, + delayMs: delayMs ? parseInt(delayMs, 10) : 2000, + limit: limit ? parseInt(limit, 10) : 0, + dryRun: dryRun === 'true', + }; + + return this.tasksService.backfillTitles(options); + } + @Get('models') async getModels() { if (proxyUrl) { try { + // v2.2.14: Include Authorization header if API key is configured + const headers: Record = { + 'Content-Type': 'application/json', + }; + if (proxyApiKey) { + headers['Authorization'] = `Bearer ${proxyApiKey}`; + } + const response = await fetch(`${proxyUrl}/model/info`, { method: 'GET', - headers: { - 'Content-Type': 'application/json', - }, + headers, }); if (!response.ok) { + // v2.2.14: Provide specific error message for 401 to aid debugging + if (response.status === 401) { + throw new HttpException( + 'LiteLLM proxy authentication failed. Check BYTEBOT_LLM_PROXY_API_KEY configuration.', + HttpStatus.UNAUTHORIZED, + ); + } throw new HttpException( `Failed to fetch models from proxy: ${response.statusText}`, HttpStatus.BAD_GATEWAY, diff --git a/packages/bytebot-agent/src/tasks/tasks.module.ts b/packages/bytebot-agent/src/tasks/tasks.module.ts index fdad46c9e..abc743cc2 100644 --- a/packages/bytebot-agent/src/tasks/tasks.module.ts +++ b/packages/bytebot-agent/src/tasks/tasks.module.ts @@ -2,13 +2,15 @@ import { Module } from '@nestjs/common'; import { TasksController } from './tasks.controller'; import { TasksService } from './tasks.service'; import { TasksGateway } from './tasks.gateway'; +import { TitleGenerationService } from './title-generation.service'; import { PrismaModule } from '../prisma/prisma.module'; import { MessagesModule } from '../messages/messages.module'; +import { TaskControllerModule } from '../task-controller/task-controller.module'; @Module({ - imports: [PrismaModule, MessagesModule], + imports: [PrismaModule, MessagesModule, TaskControllerModule], controllers: [TasksController], - providers: [TasksService, TasksGateway], - exports: [TasksService, TasksGateway], + providers: [TasksService, TasksGateway, TitleGenerationService], + exports: [TasksService, TasksGateway, TitleGenerationService], }) export class TasksModule {} diff --git a/packages/bytebot-agent/src/tasks/tasks.service.takeover.spec.ts b/packages/bytebot-agent/src/tasks/tasks.service.takeover.spec.ts new file mode 100644 index 000000000..4bafaf3fe --- /dev/null +++ b/packages/bytebot-agent/src/tasks/tasks.service.takeover.spec.ts @@ -0,0 +1,130 @@ +import { ConflictException } from '@nestjs/common'; +import { Role, TaskPriority, TaskStatus, TaskType } from '@prisma/client'; +import { TasksService } from './tasks.service'; + +describe('TasksService.takeOver', () => { + const makeTask = (overrides: Partial = {}) => { + return { + id: 't-1', + description: 'test', + title: null, + type: TaskType.IMMEDIATE, + status: TaskStatus.RUNNING, + priority: TaskPriority.MEDIUM, + control: Role.ASSISTANT, + createdAt: new Date(), + createdBy: Role.USER, + scheduledFor: null, + updatedAt: new Date(), + executedAt: null, + completedAt: null, + queuedAt: null, + error: null, + result: null, + model: { provider: 'openai', name: 'desktop-vision' }, + version: 0, + claimedBy: null, + leaseExpiresAt: null, + workspaceId: null, + nodeRunId: null, + allowedTools: [], + gatewayToolsOnly: false, + highRiskTools: [], + requiresDesktop: true, + executionSurface: null, + files: [], + summaries: [], + messages: [], + artifacts: [], + actionLogs: [], + ...overrides, + }; + }; + + const makeService = ( + overrides: { + prisma?: any; + taskControllerService?: any; + tasksGateway?: any; + eventEmitter?: any; + } = {}, + ) => { + const prisma = + overrides.prisma || + ({ + task: { + findUnique: jest.fn(async () => makeTask()), + update: jest.fn(async () => makeTask({ control: Role.USER })), + }, + } as any); + + const tasksGateway = + overrides.tasksGateway || ({ emitTaskUpdate: jest.fn() } as any); + + const configService = { get: jest.fn(() => '') } as any; + const eventEmitter = overrides.eventEmitter || ({ emit: jest.fn() } as any); + + const taskControllerService = + overrides.taskControllerService || + ({ + isPhase6Enabled: jest.fn(() => false), + getDesktopUrl: jest.fn(async () => 'http://desktop'), + } as any); + + const titleGenerationService = {} as any; + + return new TasksService( + prisma, + tasksGateway, + configService, + eventEmitter, + taskControllerService, + titleGenerationService, + ); + }; + + it('returns typed 409 and does not mutate task state when takeover is not available', async () => { + const prisma = { + task: { + findUnique: jest.fn(async () => makeTask()), + update: jest.fn(), + }, + } as any; + + const taskControllerService = { + isPhase6Enabled: jest.fn(() => true), + getDesktopTakeoverAvailability: jest.fn(async () => ({ + ok: false, + reason: 'DESKTOP_TIMED_OUT', + details: { timeoutAt: '2026-01-22T00:00:00.000Z' }, + })), + } as any; + + const tasksGateway = { emitTaskUpdate: jest.fn() } as any; + const eventEmitter = { emit: jest.fn() } as any; + + const service = makeService({ + prisma, + taskControllerService, + tasksGateway, + eventEmitter, + }); + + let thrown: any = null; + try { + await service.takeOver('t-1'); + } catch (error) { + thrown = error; + } + + expect(thrown).toBeInstanceOf(ConflictException); + expect(thrown.getResponse()).toMatchObject({ + errorCode: 'TAKEOVER_NOT_AVAILABLE', + details: { reason: 'DESKTOP_TIMED_OUT' }, + }); + + expect(prisma.task.update).not.toHaveBeenCalled(); + expect(tasksGateway.emitTaskUpdate).not.toHaveBeenCalled(); + expect(eventEmitter.emit).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/bytebot-agent/src/tasks/tasks.service.ts b/packages/bytebot-agent/src/tasks/tasks.service.ts index 8f7995833..da7797cec 100644 --- a/packages/bytebot-agent/src/tasks/tasks.service.ts +++ b/packages/bytebot-agent/src/tasks/tasks.service.ts @@ -3,6 +3,7 @@ import { NotFoundException, Logger, BadRequestException, + ConflictException, Inject, forwardRef, } from '@nestjs/common'; @@ -17,11 +18,26 @@ import { TaskType, TaskPriority, File, + ExecutionSurface, } from '@prisma/client'; import { AddTaskMessageDto } from './dto/add-task-message.dto'; import { TasksGateway } from './tasks.gateway'; import { ConfigService } from '@nestjs/config'; import { EventEmitter2 } from '@nestjs/event-emitter'; +import { TaskControllerService } from '../task-controller/task-controller.service'; +import { TitleGenerationService } from './title-generation.service'; + +/** + * v2.0.28: Maximum retry attempts for optimistic locking conflicts + */ +const MAX_UPDATE_RETRIES = 3; + +/** + * v2.2.5: Lease timeout for orphaned task recovery (in seconds) + * Tasks running longer than this without renewal are considered orphaned + * Default: 5 minutes = 300 seconds + */ +const TASK_LEASE_TIMEOUT_SECONDS = 300; @Injectable() export class TasksService { @@ -33,6 +49,8 @@ export class TasksService { private readonly tasksGateway: TasksGateway, private readonly configService: ConfigService, private readonly eventEmitter: EventEmitter2, + private readonly taskControllerService: TaskControllerService, + private readonly titleGenerationService: TitleGenerationService, ) { this.logger.log('TasksService initialized'); } @@ -42,12 +60,24 @@ export class TasksService { `Creating new task with description: ${createTaskDto.description}`, ); + // PR5: Keep execution surface + requiresDesktop consistent. + // If a surface is explicitly provided, it wins; otherwise fall back to requiresDesktop. + const requestedSurface = createTaskDto.executionSurface; + const requiresDesktop = + requestedSurface === ExecutionSurface.DESKTOP + ? true + : requestedSurface === ExecutionSurface.TEXT_ONLY + ? false + : createTaskDto.requiresDesktop || false; + const task = await this.prisma.$transaction(async (prisma) => { // Create the task first this.logger.debug('Creating task record in database'); const task = await prisma.task.create({ data: { description: createTaskDto.description, + // v2.2.16: Include title if provided, otherwise will be generated async + title: createTaskDto.title || null, type: createTaskDto.type || TaskType.IMMEDIATE, priority: createTaskDto.priority || TaskPriority.MEDIUM, status: TaskStatus.PENDING, @@ -56,9 +86,25 @@ export class TasksService { ...(createTaskDto.scheduledFor ? { scheduledFor: createTaskDto.scheduledFor } : {}), + // v2.3.0 M4: Workflow context (null for Product 1 Tasks) + workspaceId: createTaskDto.workspaceId || null, + nodeRunId: createTaskDto.nodeRunId || null, + // v2.3.0 M4: Tool configuration from workflow node definition + allowedTools: createTaskDto.allowedTools || [], + gatewayToolsOnly: createTaskDto.gatewayToolsOnly || false, + highRiskTools: createTaskDto.highRiskTools || [], + // Phase 4: Execution surface constraint + requiresDesktop, + // PR5: Explicit execution surface (nullable for backwards compatibility) + executionSurface: requestedSurface || null, }, }); - this.logger.log(`Task created successfully with ID: ${task.id}`); + this.logger.log( + `Task created successfully with ID: ${task.id}` + + (task.workspaceId ? ` (workspace: ${task.workspaceId})` : '') + + (task.requiresDesktop ? ' [desktop required]' : '') + + (task.executionSurface ? ` [surface: ${task.executionSurface}]` : ''), + ); let filesDescription = ''; @@ -93,13 +139,57 @@ export class TasksService { } // Create the initial system message + // v2.4.0: Include goal context and previous step results for autonomous operation this.logger.debug(`Creating initial message for task ID: ${task.id}`); + + // Build enhanced message with context for multi-step goals + let messageText = createTaskDto.description; + + // Prepend context if this is part of a larger goal + if (createTaskDto.goalContext || createTaskDto.previousStepResults) { + const contextParts: string[] = []; + + if (createTaskDto.goalContext) { + contextParts.push(`**Overall Goal:** ${createTaskDto.goalContext}`); + } + + if (createTaskDto.previousStepResults) { + contextParts.push( + `**Previous Steps Completed:**\n${createTaskDto.previousStepResults}`, + ); + } + + contextParts.push(`**Current Task:** ${createTaskDto.description}`); + + // Build the full message with context + messageText = contextParts.join('\n\n'); + + // v2.4.1: Structured logging for context propagation + this.logger.log({ + message: 'Task created with goal context', + taskId: task.id, + hasGoalContext: !!createTaskDto.goalContext, + goalContextLength: createTaskDto.goalContext?.length || 0, + hasPreviousStepResults: !!createTaskDto.previousStepResults, + previousStepResultsLength: + createTaskDto.previousStepResults?.length || 0, + enhancedMessageLength: messageText.length, + nodeRunId: createTaskDto.nodeRunId || null, + workspaceId: createTaskDto.workspaceId || null, + }); + } + + // Append file description if present + if (filesDescription) { + messageText = `${messageText} ${filesDescription}`; + } + await prisma.message.create({ data: { content: [ { type: 'text', - text: `${createTaskDto.description} ${filesDescription}`, + text: messageText, }, ] as Prisma.InputJsonValue, role: Role.USER, @@ -113,9 +203,46 @@ export class TasksService { this.tasksGateway.emitTaskCreated(task); + // v2.2.16: Generate title asynchronously if not provided + // This runs in the background and doesn't block task creation + if (!createTaskDto.title) { + this.generateTitleAsync(task.id, createTaskDto.description); + } + return task; } + /** + * v2.2.16: Generates a title for a task asynchronously. + * Updates the task with the generated title and emits an update event. + * Errors are logged but don't affect the task. + */ + private async generateTitleAsync( + taskId: string, + description: string, + ): Promise { + try { + const title = + await this.titleGenerationService.generateTitle(description); + + // Update the task with the generated title + const updatedTask = await this.prisma.task.update({ + where: { id: taskId }, + data: { title }, + }); + + this.logger.log(`Generated title for task ${taskId}: "${title}"`); + + // Emit update so UI can refresh + this.tasksGateway.emitTaskUpdate(taskId, updatedTask); + } catch (error: any) { + // Log but don't fail - title generation is non-critical + this.logger.warn( + `Failed to generate title for task ${taskId}: ${error.message}`, + ); + } + } + async findScheduledTasks(): Promise { return this.prisma.task.findMany({ where: { @@ -155,6 +282,118 @@ export class TasksService { return task; } + /** + * v2.2.4: Atomically claim the next available PENDING task using SELECT FOR UPDATE SKIP LOCKED. + * + * This method prevents race conditions when multiple agent pods compete for tasks. + * It uses PostgreSQL's FOR UPDATE SKIP LOCKED to: + * 1. Lock the selected task row (preventing other transactions from modifying it) + * 2. Skip any already-locked rows (allowing concurrent agents to claim different tasks) + * 3. Atomically update the task status to RUNNING within the same transaction + * + * IMPORTANT (v2.2.4): Only claims PENDING tasks, not RUNNING tasks. + * The previous implementation (v2.2.3) included RUNNING tasks in the query, which caused + * a race condition: after pod-A claimed a task and its transaction committed, pod-B could + * query and find the same RUNNING task (since the lock was released), leading to both + * pods processing the same task simultaneously. + * + * Orphaned RUNNING tasks (from crashed pods) should be handled by a separate recovery + * mechanism, not by the normal task claiming process. + * + * @returns The claimed task with files, or null if no task is available + */ + async claimNextTask(): Promise<(Task & { files: File[] }) | null> { + const podName = process.env.POD_NAME || 'unknown'; + const timestamp = new Date().toISOString(); + + this.logger.log( + `[${timestamp}] [${podName}] Attempting to claim next task`, + ); + + try { + const claimedTask = await this.prisma.$transaction( + async (tx) => { + // Step 1: Select and lock the next available PENDING task using FOR UPDATE SKIP LOCKED + // v2.2.4: Only claim PENDING tasks - RUNNING tasks should not be re-claimed here + // This prevents the race condition where multiple pods claim the same task + const tasks = await tx.$queryRaw` + SELECT * FROM "Task" + WHERE status = 'PENDING' + ORDER BY priority DESC, "queuedAt" ASC NULLS LAST, "createdAt" ASC + LIMIT 1 + FOR UPDATE SKIP LOCKED + `; + + if (tasks.length === 0) { + this.logger.debug( + `[${timestamp}] [${podName}] No available tasks to claim`, + ); + return null; + } + + const task = tasks[0]; + this.logger.log( + `[${timestamp}] [${podName}] Locked task ${task.id} (status: ${task.status}), claiming it`, + ); + + // Step 2: Atomically update the task status to RUNNING with lease info + // v2.2.5: Set claimedBy and leaseExpiresAt for orphaned task recovery + const leaseExpiresAt = new Date( + Date.now() + TASK_LEASE_TIMEOUT_SECONDS * 1000, + ); + await tx.$executeRaw` + UPDATE "Task" + SET status = 'RUNNING', + "executedAt" = NOW(), + "claimedBy" = ${podName}, + "leaseExpiresAt" = ${leaseExpiresAt}, + version = version + 1 + WHERE id = ${task.id} + `; + + this.logger.log( + `[${timestamp}] [${podName}] Successfully claimed task ${task.id}`, + ); + + // Step 3: Fetch the complete task with files + const fetchedTask = await tx.task.findUnique({ + where: { id: task.id }, + include: { files: true }, + }); + + return fetchedTask; + }, + { + // Use SERIALIZABLE isolation for maximum safety + isolationLevel: Prisma.TransactionIsolationLevel.Serializable, + // Timeout after 10 seconds if lock cannot be acquired + timeout: 10000, + }, + ); + + // v2.2.13: Emit WebSocket event AFTER transaction completes successfully + // This notifies connected clients that the task status changed from PENDING to RUNNING + // The event is emitted outside the transaction to: + // 1. Not hold the DB transaction open during WebSocket broadcast + // 2. Only emit if the transaction committed successfully + // 3. Ensure data consistency - we emit what was actually committed + if (claimedTask) { + this.logger.log( + `[${timestamp}] [${podName}] Emitting task_updated event for task ${claimedTask.id} (status: RUNNING)`, + ); + this.tasksGateway.emitTaskUpdate(claimedTask.id, claimedTask); + } + + return claimedTask; + } catch (error: any) { + this.logger.error( + `[${timestamp}] [${podName}] Error claiming task: ${error.message}`, + ); + // Return null on error to allow retry on next cron cycle + return null; + } + } + async findAll( page = 1, limit = 10, @@ -214,36 +453,122 @@ export class TasksService { } } + /** + * Updates a task with optimistic locking to prevent race conditions. + * + * v2.0.28: Implemented optimistic locking using version field. + * Uses updateMany with version check to atomically detect concurrent modifications. + * Retries up to MAX_UPDATE_RETRIES times on conflict. + * + * @param id - Task ID to update + * @param updateTaskDto - Fields to update + * @returns Updated task + * @throws NotFoundException if task not found + * @throws ConflictException if concurrent modification detected after retries + */ async update(id: string, updateTaskDto: UpdateTaskDto): Promise { - this.logger.log(`Updating task with ID: ${id}`); - this.logger.debug(`Update data: ${JSON.stringify(updateTaskDto)}`); + const startTime = Date.now(); + const timestamp = new Date().toISOString(); - const existingTask = await this.findById(id); + this.logger.log( + `[${timestamp}] Updating task with ID: ${id} (status: ${updateTaskDto.status || 'unchanged'})`, + ); + this.logger.debug( + `[${timestamp}] Update data: ${JSON.stringify(updateTaskDto)}`, + ); - if (!existingTask) { - this.logger.warn(`Task with ID: ${id} not found for update`); - throw new NotFoundException(`Task with ID ${id} not found`); - } + let retries = 0; - let updatedTask = await this.prisma.task.update({ - where: { id }, - data: updateTaskDto, - }); + while (retries < MAX_UPDATE_RETRIES) { + // Step 1: Read current task with version + const existingTask = await this.findById(id); - if (updateTaskDto.status === TaskStatus.COMPLETED) { - this.eventEmitter.emit('task.completed', { taskId: id }); - } else if (updateTaskDto.status === TaskStatus.NEEDS_HELP) { - updatedTask = await this.takeOver(id); - } else if (updateTaskDto.status === TaskStatus.FAILED) { - this.eventEmitter.emit('task.failed', { taskId: id }); - } + if (!existingTask) { + this.logger.warn( + `[${timestamp}] Task with ID: ${id} not found for update`, + ); + throw new NotFoundException(`Task with ID ${id} not found`); + } - this.logger.log(`Successfully updated task ID: ${id}`); - this.logger.debug(`Updated task: ${JSON.stringify(updatedTask)}`); + const currentVersion = + (existingTask as Task & { version?: number }).version ?? 0; - this.tasksGateway.emitTaskUpdate(id, updatedTask); + this.logger.debug( + `[${new Date().toISOString()}] Task ${id} current state: status=${existingTask.status}, version=${currentVersion}`, + ); - return updatedTask; + // Step 2: Atomically update only if version matches + const result = await this.prisma.task.updateMany({ + where: { + id, + version: currentVersion, // Only update if version hasn't changed + }, + data: { + ...updateTaskDto, + version: { increment: 1 }, // Atomically increment version + }, + }); + + // Step 3: Check if update succeeded + if (result.count === 0) { + retries++; + this.logger.warn( + `[${new Date().toISOString()}] Concurrent modification detected for task ${id}, retry ${retries}/${MAX_UPDATE_RETRIES}`, + ); + + if (retries >= MAX_UPDATE_RETRIES) { + const errorMsg = `Task ${id} was modified by another process. Please retry.`; + this.logger.error( + `[${new Date().toISOString()}] ${errorMsg} (exhausted retries)`, + ); + throw new ConflictException(errorMsg); + } + + // Brief delay before retry (exponential backoff) + await new Promise((resolve) => + setTimeout(resolve, Math.pow(2, retries) * 50), + ); + continue; + } + + // Step 4: Fetch the updated task to return + let updatedTask = await this.prisma.task.findUnique({ where: { id } }); + + if (!updatedTask) { + throw new NotFoundException( + `Task with ID ${id} not found after update`, + ); + } + + const endTime = Date.now(); + this.logger.log( + `[${new Date().toISOString()}] Successfully updated task ID: ${id} (status: ${updatedTask.status}, version: ${(updatedTask as Task & { version?: number }).version}, took ${endTime - startTime}ms)`, + ); + this.logger.debug( + `[${new Date().toISOString()}] Updated task: ${JSON.stringify(updatedTask)}`, + ); + + // Step 5: Handle status-specific side effects + if (updateTaskDto.status === TaskStatus.COMPLETED) { + this.eventEmitter.emit('task.completed', { taskId: id }); + } else if (updateTaskDto.status === TaskStatus.NEEDS_HELP) { + updatedTask = await this.takeOver(id); + } else if (updateTaskDto.status === TaskStatus.FAILED) { + this.logger.warn( + `[${new Date().toISOString()}] Task ${id} marked as FAILED`, + ); + this.eventEmitter.emit('task.failed', { taskId: id }); + } + + this.tasksGateway.emitTaskUpdate(id, updatedTask); + + return updatedTask; + } + + // This should never be reached, but TypeScript needs it + throw new ConflictException( + `Task ${id} update failed after ${MAX_UPDATE_RETRIES} retries`, + ); } async delete(id: string): Promise { @@ -299,11 +624,13 @@ export class TasksService { }, }); + // Use per-task desktop endpoint from Task Controller (Phase 6) try { - await fetch( - `${this.configService.get('BYTEBOT_DESKTOP_BASE_URL')}/input-tracking/stop`, - { method: 'POST' }, + const desktopUrl = await this.taskControllerService.getDesktopUrl(taskId); + this.logger.log( + `Stopping input tracking for task ${taskId} at ${desktopUrl}`, ); + await fetch(`${desktopUrl}/input-tracking/stop`, { method: 'POST' }); } catch (error) { this.logger.error('Failed to stop input tracking', error); } @@ -321,9 +648,6 @@ export class TasksService { this.logger.log(`Taking over control for task ID: ${taskId}`); const task = await this.findById(taskId); - if (!task) { - throw new NotFoundException(`Task with ID ${taskId} not found`); - } if (task.control !== Role.ASSISTANT) { throw new BadRequestException( @@ -331,6 +655,26 @@ export class TasksService { ); } + // Guardrail: do not mutate task state if the desktop lease/session is no longer valid. + // When Phase 6 is enabled, the task controller is the source of truth for per-task desktop + // session existence and timeout. + let desktopUrlForTakeover: string | null = null; + if (this.taskControllerService.isPhase6Enabled()) { + const availability = + await this.taskControllerService.getDesktopTakeoverAvailability(taskId); + if (!availability.ok) { + throw new ConflictException({ + errorCode: 'TAKEOVER_NOT_AVAILABLE', + message: 'Desktop takeover is not available (session expired)', + details: { + reason: availability.reason, + ...(availability.details || {}), + }, + }); + } + desktopUrlForTakeover = availability.desktopUrl; + } + const updatedTask = await this.prisma.task.update({ where: { id: taskId }, data: { @@ -338,11 +682,15 @@ export class TasksService { }, }); + // Use per-task desktop endpoint from Task Controller (Phase 6) try { - await fetch( - `${this.configService.get('BYTEBOT_DESKTOP_BASE_URL')}/input-tracking/start`, - { method: 'POST' }, + const desktopUrl = + desktopUrlForTakeover || + (await this.taskControllerService.getDesktopUrl(taskId)); + this.logger.log( + `Starting input tracking for task ${taskId} at ${desktopUrl}`, ); + await fetch(`${desktopUrl}/input-tracking/start`, { method: 'POST' }); } catch (error) { this.logger.error('Failed to start input tracking', error); } @@ -389,4 +737,363 @@ export class TasksService { return updatedTask; } + + /** + * v2.2.5: Renew the lease for a running task. + * Called periodically by the agent processor to indicate the task is still being worked on. + * Extends the lease by TASK_LEASE_TIMEOUT_SECONDS from now. + * + * @param taskId - Task ID to renew lease for + * @param claimedBy - Pod name that should own the lease (for validation) + * @returns Updated task or null if lease renewal failed (task not owned by this pod) + */ + async renewLease(taskId: string, claimedBy: string): Promise { + const timestamp = new Date().toISOString(); + this.logger.debug( + `[${timestamp}] [${claimedBy}] Renewing lease for task ${taskId}`, + ); + + try { + const newLeaseExpiry = new Date( + Date.now() + TASK_LEASE_TIMEOUT_SECONDS * 1000, + ); + + // Only renew if this pod owns the lease and task is still RUNNING + const result = await this.prisma.task.updateMany({ + where: { + id: taskId, + status: TaskStatus.RUNNING, + claimedBy: claimedBy, + }, + data: { + leaseExpiresAt: newLeaseExpiry, + }, + }); + + if (result.count === 0) { + this.logger.warn( + `[${timestamp}] [${claimedBy}] Failed to renew lease for task ${taskId} - not owned by this pod or not running`, + ); + return null; + } + + this.logger.debug( + `[${timestamp}] [${claimedBy}] Lease renewed for task ${taskId} until ${newLeaseExpiry.toISOString()}`, + ); + return await this.prisma.task.findUnique({ where: { id: taskId } }); + } catch (error: any) { + this.logger.error( + `[${timestamp}] [${claimedBy}] Error renewing lease: ${error.message}`, + ); + return null; + } + } + + /** + * v2.2.5: Recover orphaned tasks that have expired leases. + * Called by a cron job to find RUNNING tasks with expired leases and mark them as FAILED. + * + * These tasks were likely being processed by a pod that crashed or became unresponsive. + * Marking as FAILED (rather than PENDING) is safer because: + * - The task may have partially corrupted state + * - Message history may be inconsistent + * - User should review before retrying + * + * @returns Array of recovered task IDs + */ + async recoverOrphanedTasks(): Promise { + const timestamp = new Date().toISOString(); + const podName = process.env.POD_NAME || 'unknown'; + this.logger.log(`[${timestamp}] [${podName}] Checking for orphaned tasks`); + + try { + // Find all RUNNING tasks with expired leases + const orphanedTasks = await this.prisma.task.findMany({ + where: { + status: TaskStatus.RUNNING, + leaseExpiresAt: { + lt: new Date(), // Lease has expired + }, + }, + select: { + id: true, + claimedBy: true, + leaseExpiresAt: true, + executedAt: true, + }, + }); + + if (orphanedTasks.length === 0) { + this.logger.debug( + `[${timestamp}] [${podName}] No orphaned tasks found`, + ); + return []; + } + + this.logger.warn( + `[${timestamp}] [${podName}] Found ${orphanedTasks.length} orphaned task(s)`, + ); + + const recoveredIds: string[] = []; + + for (const task of orphanedTasks) { + const leaseDuration = task.leaseExpiresAt + ? Math.round((Date.now() - task.leaseExpiresAt.getTime()) / 1000) + : 'unknown'; + + this.logger.warn( + `[${timestamp}] [${podName}] Recovering orphaned task ${task.id} ` + + `(claimed by: ${task.claimedBy}, lease expired ${leaseDuration}s ago)`, + ); + + try { + // Mark as FAILED with descriptive error + await this.prisma.task.update({ + where: { id: task.id }, + data: { + status: TaskStatus.FAILED, + error: `Task orphaned - worker ${task.claimedBy} stopped responding. Lease expired at ${task.leaseExpiresAt?.toISOString()}`, + claimedBy: null, + leaseExpiresAt: null, + }, + }); + + recoveredIds.push(task.id); + + // Emit event for frontend notification + this.eventEmitter.emit('task.failed', { taskId: task.id }); + this.tasksGateway.emitTaskUpdate( + task.id, + await this.findById(task.id), + ); + + this.logger.log( + `[${timestamp}] [${podName}] Task ${task.id} marked as FAILED due to lease expiration`, + ); + } catch (error: any) { + this.logger.error( + `[${timestamp}] [${podName}] Failed to recover task ${task.id}: ${error.message}`, + ); + } + } + + return recoveredIds; + } catch (error: any) { + this.logger.error( + `[${timestamp}] [${podName}] Error recovering orphaned tasks: ${error.message}`, + ); + return []; + } + } + + /** + * v2.2.5: Clear lease information when a task completes or fails. + * Should be called when a task transitions to a terminal state. + * + * @param taskId - Task ID to clear lease for + */ + async clearLease(taskId: string): Promise { + const timestamp = new Date().toISOString(); + const podName = process.env.POD_NAME || 'unknown'; + + try { + await this.prisma.task.update({ + where: { id: taskId }, + data: { + claimedBy: null, + leaseExpiresAt: null, + }, + }); + this.logger.debug( + `[${timestamp}] [${podName}] Cleared lease for task ${taskId}`, + ); + } catch (error: any) { + // Non-critical - log and continue + this.logger.warn( + `[${timestamp}] [${podName}] Failed to clear lease for task ${taskId}: ${error.message}`, + ); + } + } + + /** + * v2.2.17: Backfill titles for existing tasks that don't have AI-generated titles. + * + * This method is idempotent - it only processes tasks where title IS NULL. + * Uses batching with delays to avoid overwhelming the LLM proxy. + * Individual task failures don't stop the entire backfill process. + * + * @param options - Optional configuration for the backfill + * @returns Summary of the backfill operation + */ + async backfillTitles( + options: { + batchSize?: number; + delayMs?: number; + limit?: number; + dryRun?: boolean; + } = {}, + ): Promise<{ + total: number; + successful: number; + failed: number; + skipped: number; + errors: Array<{ taskId: string; error: string }>; + durationMs: number; + }> { + const { + batchSize = 10, + delayMs = 2000, + limit = 0, + dryRun = false, + } = options; + + const startTime = Date.now(); + const timestamp = new Date().toISOString(); + + this.logger.log( + `[${timestamp}] Starting title backfill (batchSize=${batchSize}, delayMs=${delayMs}, limit=${limit}, dryRun=${dryRun})`, + ); + + // Find all tasks without titles (idempotent query) + let query = this.prisma.task.findMany({ + where: { + title: null, + }, + orderBy: { + createdAt: 'asc', + }, + select: { + id: true, + description: true, + }, + }); + + // Apply limit if specified + if (limit > 0) { + query = this.prisma.task.findMany({ + where: { + title: null, + }, + orderBy: { + createdAt: 'asc', + }, + select: { + id: true, + description: true, + }, + take: limit, + }); + } + + const tasksToProcess = await query; + + const result = { + total: tasksToProcess.length, + successful: 0, + failed: 0, + skipped: 0, + errors: [] as Array<{ taskId: string; error: string }>, + durationMs: 0, + }; + + if (tasksToProcess.length === 0) { + this.logger.log( + `[${timestamp}] No tasks without titles found. Backfill complete.`, + ); + result.durationMs = Date.now() - startTime; + return result; + } + + this.logger.log( + `[${timestamp}] Found ${tasksToProcess.length} tasks without titles`, + ); + + // Process in batches + for (let i = 0; i < tasksToProcess.length; i += batchSize) { + const batch = tasksToProcess.slice(i, i + batchSize); + const batchNum = Math.floor(i / batchSize) + 1; + const totalBatches = Math.ceil(tasksToProcess.length / batchSize); + + this.logger.log( + `[${new Date().toISOString()}] Processing batch ${batchNum}/${totalBatches} (${batch.length} tasks)`, + ); + + // Process batch concurrently + const batchResults = await Promise.allSettled( + batch.map(async (task) => { + if (dryRun) { + this.logger.debug( + `[DRY RUN] Would generate title for task ${task.id}`, + ); + return { taskId: task.id, title: '[DRY RUN - NOT GENERATED]' }; + } + + try { + const title = await this.titleGenerationService.generateTitle( + task.description, + ); + + // Update the task with the generated title + await this.prisma.task.update({ + where: { id: task.id }, + data: { title }, + }); + + this.logger.log(`Task ${task.id}: "${title}"`); + return { taskId: task.id, title }; + } catch (error: any) { + throw { taskId: task.id, error: error.message }; + } + }), + ); + + // Process results + for (const batchResult of batchResults) { + if (batchResult.status === 'fulfilled') { + result.successful++; + } else { + result.failed++; + const errorInfo = batchResult.reason as { + taskId: string; + error: string; + }; + result.errors.push(errorInfo); + this.logger.warn( + `Task ${errorInfo.taskId}: FAILED - ${errorInfo.error}`, + ); + } + } + + // Progress log + const progress = ( + ((i + batch.length) / tasksToProcess.length) * + 100 + ).toFixed(1); + this.logger.log( + `[${new Date().toISOString()}] Progress: ${i + batch.length}/${tasksToProcess.length} (${progress}%) - ` + + `Success: ${result.successful}, Failed: ${result.failed}`, + ); + + // Delay between batches (except for the last batch) + if (i + batchSize < tasksToProcess.length) { + this.logger.debug(`Waiting ${delayMs}ms before next batch...`); + await new Promise((resolve) => setTimeout(resolve, delayMs)); + } + } + + result.durationMs = Date.now() - startTime; + const successRate = ((result.successful / result.total) * 100).toFixed(2); + + this.logger.log(` +===== TITLE BACKFILL COMPLETE ===== +Duration: ${(result.durationMs / 1000).toFixed(2)}s +Total: ${result.total} +Successful: ${result.successful} +Failed: ${result.failed} +Success Rate: ${successRate}% +=================================== + `); + + return result; + } } diff --git a/packages/bytebot-agent/src/tasks/title-generation.service.ts b/packages/bytebot-agent/src/tasks/title-generation.service.ts new file mode 100644 index 000000000..0fd14e881 --- /dev/null +++ b/packages/bytebot-agent/src/tasks/title-generation.service.ts @@ -0,0 +1,200 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import OpenAI from 'openai'; + +/** + * v2.2.16: Title Generation Service + * v2.2.17: Fixed model compatibility - switched from gpt-oss-120b to gpt-4o-mini + * for better OpenAI SDK compatibility + * + * Generates concise, AI-generated titles for tasks using the gpt-4o-mini model. + * Titles are used in the task list UI to provide a scannable summary instead of + * displaying the full task description. + * + * Best practices followed: + * - Target length: 50 characters (optimal for scanning) + * - Maximum length: 72 characters (hard limit) + * - Style: Imperative mood ("Fix bug", "Add feature") + * - No period at end + * - Capitalize first word only + */ + +const TITLE_GENERATION_PROMPT = `Generate a concise task title based on the following task description. + +Requirements: +- Maximum 50 characters +- Use imperative mood (e.g., "Fix login bug", "Add user authentication") +- No period at the end +- Capitalize only the first word +- Focus on the main action/goal + +Respond with ONLY the title, nothing else. + +Task description:`; + +// v2.2.17: Changed from gpt-oss-120b to gpt-4o-mini for better SDK compatibility +const TITLE_MODEL = 'gpt-4o-mini'; +const MAX_TITLE_LENGTH = 72; +const TARGET_TITLE_LENGTH = 50; + +@Injectable() +export class TitleGenerationService { + private readonly openai: OpenAI | null; + private readonly logger = new Logger(TitleGenerationService.name); + private readonly enabled: boolean; + + constructor(private readonly configService: ConfigService) { + const proxyUrl = this.configService.get('BYTEBOT_LLM_PROXY_URL'); + + if (!proxyUrl) { + this.logger.warn( + 'BYTEBOT_LLM_PROXY_URL is not set. Title generation will use fallback.', + ); + this.enabled = false; + this.openai = null; + } else { + this.enabled = true; + // Initialize OpenAI client with LiteLLM proxy + this.openai = new OpenAI({ + apiKey: 'dummy-key-for-proxy', + baseURL: proxyUrl, + }); + this.logger.log(`TitleGenerationService initialized with model ${TITLE_MODEL}`); + } + } + + /** + * Generates a title for a task description. + * Falls back to truncated description if AI generation fails. + * + * @param description - The full task description + * @returns A concise title (max 72 characters) + */ + async generateTitle(description: string): Promise { + // If AI generation is disabled, use fallback + if (!this.enabled) { + return this.fallbackTitle(description); + } + + try { + const title = await this.generateTitleWithAI(description); + const sanitized = this.sanitizeTitle(title); + + // v2.2.17: If AI returns empty string, use fallback + if (!sanitized || sanitized.length === 0) { + this.logger.warn('AI returned empty title, using fallback'); + return this.fallbackTitle(description); + } + + return sanitized; + } catch (error: any) { + this.logger.warn( + `Failed to generate title with AI: ${error.message}. Using fallback.`, + ); + return this.fallbackTitle(description); + } + } + + /** + * Generates a title using the AI model + */ + private async generateTitleWithAI(description: string): Promise { + if (!this.openai) { + throw new Error('OpenAI client not initialized'); + } + + const startTime = Date.now(); + + // Truncate very long descriptions to avoid token limits + const truncatedDescription = + description.length > 500 ? description.substring(0, 500) + '...' : description; + + const completion = await this.openai.chat.completions.create({ + model: TITLE_MODEL, + messages: [ + { + role: 'user', + content: `${TITLE_GENERATION_PROMPT}\n\n${truncatedDescription}`, + }, + ], + max_tokens: 50, + temperature: 0.3, // Low temperature for consistent, predictable output + }); + + const title = completion.choices[0]?.message?.content?.trim() || ''; + const duration = Date.now() - startTime; + + this.logger.debug( + `Generated title in ${duration}ms: "${title}" (${title.length} chars)`, + ); + + return title; + } + + /** + * Sanitizes and validates the generated title + */ + private sanitizeTitle(title: string): string { + // Remove any leading/trailing whitespace + let sanitized = title.trim(); + + // Remove surrounding quotes if present + if ( + (sanitized.startsWith('"') && sanitized.endsWith('"')) || + (sanitized.startsWith("'") && sanitized.endsWith("'")) + ) { + sanitized = sanitized.slice(1, -1); + } + + // Remove trailing period + if (sanitized.endsWith('.')) { + sanitized = sanitized.slice(0, -1); + } + + // Ensure first letter is capitalized + if (sanitized.length > 0) { + sanitized = sanitized.charAt(0).toUpperCase() + sanitized.slice(1); + } + + // Truncate if too long + if (sanitized.length > MAX_TITLE_LENGTH) { + sanitized = sanitized.substring(0, MAX_TITLE_LENGTH - 3).trim() + '...'; + } + + return sanitized; + } + + /** + * Fallback title generation when AI is unavailable + * Extracts the first sentence or truncates intelligently + */ + private fallbackTitle(description: string): string { + // Try to get the first sentence + const firstSentenceMatch = description.match(/^[^.!?]+[.!?]?/); + let title = firstSentenceMatch + ? firstSentenceMatch[0].trim() + : description.trim(); + + // Remove trailing punctuation + title = title.replace(/[.!?]+$/, ''); + + // Truncate if still too long + if (title.length > TARGET_TITLE_LENGTH) { + // Try to truncate at a word boundary + const truncated = title.substring(0, TARGET_TITLE_LENGTH); + const lastSpace = truncated.lastIndexOf(' '); + if (lastSpace > TARGET_TITLE_LENGTH - 15) { + title = truncated.substring(0, lastSpace) + '...'; + } else { + title = truncated.trim() + '...'; + } + } + + // Ensure first letter is capitalized + if (title.length > 0) { + title = title.charAt(0).toUpperCase() + title.slice(1); + } + + return title; + } +} diff --git a/packages/bytebot-agent/src/tools/tool-executor.service.ts b/packages/bytebot-agent/src/tools/tool-executor.service.ts new file mode 100644 index 000000000..4b9f628b0 --- /dev/null +++ b/packages/bytebot-agent/src/tools/tool-executor.service.ts @@ -0,0 +1,387 @@ +/** + * Tool Executor Service + * v2.3.0 M4: Routes tool execution between desktop and gateway tools + * + * This service provides a unified interface for tool execution that: + * 1. Routes desktop tools to the workspace desktop (computer use) + * 2. Routes gateway tools to Butler Service Gateway (web APIs) + * 3. Enforces tool restrictions based on workflow configuration + * 4. Handles high-risk tool gating (approval required) + * 5. Supports gatewayToolsOnly mode for non-desktop workflows + * + * For Product 1 (Tasks): All desktop tools available, no gateway tools + * For Product 2 (Workflows): Both desktop and gateway tools based on config + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { GatewayService, GatewayToolResult, GATEWAY_TOOLS, GatewayToolName } from '../gateway/gateway.service'; +import { WorkspaceService } from '../workspace/workspace.service'; +import { + MessageContentType, + ToolResultContentBlock, +} from '@bytebot/shared'; + +/** + * Tool execution context from task/workflow + */ +export interface ToolExecutionContext { + taskId: string; + workspaceId?: string | null; + nodeRunId?: string | null; + allowedTools: string[]; + gatewayToolsOnly: boolean; + highRiskTools: string[]; + desktopUrl?: string; +} + +/** + * Tool execution request + */ +export interface ToolExecutionRequest { + toolId: string; + toolName: string; + toolInput: Record; +} + +/** + * Tool execution result + */ +export interface ToolExecutionResult { + toolResultBlock: ToolResultContentBlock; + requiresApproval: boolean; + approvalRequestId?: string; + isHighRisk: boolean; + executionTimeMs: number; +} + +/** + * Desktop tool names (computer use) + */ +const DESKTOP_TOOLS = [ + 'computer', + 'click', + 'type', + 'key', + 'scroll', + 'screenshot', + 'move', + 'drag', + 'cursor_position', +] as const; + +type DesktopToolName = (typeof DESKTOP_TOOLS)[number]; + +@Injectable() +export class ToolExecutorService { + private readonly logger = new Logger(ToolExecutorService.name); + + constructor( + private readonly gatewayService: GatewayService, + private readonly workspaceService: WorkspaceService, + ) { + this.logger.log('ToolExecutorService initialized'); + } + + /** + * Check if a tool is a desktop tool + */ + isDesktopTool(toolName: string): boolean { + return DESKTOP_TOOLS.includes(toolName as DesktopToolName); + } + + /** + * Check if a tool is a gateway tool + */ + isGatewayTool(toolName: string): boolean { + return this.gatewayService.isGatewayTool(toolName); + } + + /** + * Check if a tool is allowed in the given context + */ + isToolAllowed(toolName: string, context: ToolExecutionContext): boolean { + // If gatewayToolsOnly, only allow gateway tools + if (context.gatewayToolsOnly) { + return this.isGatewayTool(toolName); + } + + // If no allowed tools specified, allow all tools + if (!context.allowedTools || context.allowedTools.length === 0) { + return true; + } + + // Check if tool is in allowed list + // Also allow desktop tools if not explicitly restricted + if (this.isDesktopTool(toolName)) { + return context.allowedTools.includes('computer') || context.allowedTools.includes(toolName); + } + + return context.allowedTools.includes(toolName); + } + + /** + * Check if a tool requires approval + */ + isHighRiskTool(toolName: string, context: ToolExecutionContext): boolean { + // Check gateway service for default high-risk status + if (this.gatewayService.isHighRiskTool(toolName)) { + return true; + } + + // Check workflow-specific high-risk list + if (context.highRiskTools && context.highRiskTools.length > 0) { + return context.highRiskTools.includes(toolName); + } + + return false; + } + + /** + * Get available tools for a context + */ + getAvailableTools(context: ToolExecutionContext): { + desktop: string[]; + gateway: typeof GATEWAY_TOOLS[GatewayToolName][]; + } { + let desktopTools: string[] = []; + let gatewayTools = this.gatewayService.getAllTools(); + + // If not gatewayToolsOnly, include desktop tools + if (!context.gatewayToolsOnly) { + desktopTools = [...DESKTOP_TOOLS]; + } + + // Filter by allowed list if specified + if (context.allowedTools && context.allowedTools.length > 0) { + desktopTools = desktopTools.filter( + (tool) => context.allowedTools.includes(tool) || context.allowedTools.includes('computer'), + ); + gatewayTools = gatewayTools.filter((tool) => context.allowedTools.includes(tool.name)); + } + + return { desktop: desktopTools, gateway: gatewayTools }; + } + + /** + * Execute a gateway tool + */ + async executeGatewayTool( + request: ToolExecutionRequest, + context: ToolExecutionContext, + ): Promise { + const startTime = Date.now(); + const isHighRisk = this.isHighRiskTool(request.toolName, context); + + // Check if tool is allowed + if (!this.isToolAllowed(request.toolName, context)) { + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: true, + content: [ + { + type: MessageContentType.Text, + text: `Tool "${request.toolName}" is not allowed in this workflow context`, + }, + ], + }, + requiresApproval: false, + isHighRisk, + executionTimeMs: Date.now() - startTime, + }; + } + + // Execute via gateway service + const result = await this.gatewayService.executeTool({ + toolName: request.toolName, + parameters: request.toolInput, + taskId: context.taskId, + nodeRunId: context.nodeRunId || undefined, + workspaceId: context.workspaceId || undefined, + }); + + // Handle approval-required response + if (result.requiresApproval) { + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: false, // Not an error, just waiting for approval + content: [ + { + type: MessageContentType.Text, + text: `Action requires human approval. Approval request ID: ${result.approvalRequestId}. Please wait for approval before proceeding.`, + }, + ], + }, + requiresApproval: true, + approvalRequestId: result.approvalRequestId, + isHighRisk: true, + executionTimeMs: result.executionTimeMs, + }; + } + + // Return tool result + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: !result.success, + content: [ + { + type: MessageContentType.Text, + text: result.success + ? JSON.stringify(result.result, null, 2) + : `Error: ${result.error}`, + }, + ], + }, + requiresApproval: false, + isHighRisk, + executionTimeMs: result.executionTimeMs, + }; + } + + /** + * Wait for approval and execute tool + */ + async waitForApprovalAndExecute( + approvalRequestId: string, + request: ToolExecutionRequest, + context: ToolExecutionContext, + options: { + pollIntervalMs?: number; + timeoutMs?: number; + } = {}, + ): Promise { + const { pollIntervalMs = 5000, timeoutMs = 300000 } = options; // 5 minute timeout by default + const startTime = Date.now(); + + this.logger.log(`Waiting for approval: ${approvalRequestId}`); + + while (Date.now() - startTime < timeoutMs) { + const status = await this.gatewayService.checkApprovalStatus(approvalRequestId); + + switch (status.status) { + case 'approved': + this.logger.log(`Approval granted for ${approvalRequestId}, executing tool`); + const result = await this.gatewayService.executeApprovedTool(approvalRequestId, { + toolName: request.toolName, + parameters: request.toolInput, + taskId: context.taskId, + nodeRunId: context.nodeRunId || undefined, + workspaceId: context.workspaceId || undefined, + }); + + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: !result.success, + content: [ + { + type: MessageContentType.Text, + text: result.success + ? JSON.stringify(result.result, null, 2) + : `Error: ${result.error}`, + }, + ], + }, + requiresApproval: false, + isHighRisk: true, + executionTimeMs: Date.now() - startTime, + }; + + case 'rejected': + this.logger.log(`Approval rejected for ${approvalRequestId}: ${status.reason}`); + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: true, + content: [ + { + type: MessageContentType.Text, + text: `Action was rejected by human reviewer${status.reason ? `: ${status.reason}` : ''}`, + }, + ], + }, + requiresApproval: false, + isHighRisk: true, + executionTimeMs: Date.now() - startTime, + }; + + case 'expired': + this.logger.log(`Approval expired for ${approvalRequestId}`); + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: true, + content: [ + { + type: MessageContentType.Text, + text: 'Action approval request expired. Please try again if the action is still needed.', + }, + ], + }, + requiresApproval: false, + isHighRisk: true, + executionTimeMs: Date.now() - startTime, + }; + + case 'pending': + default: + // Continue waiting + await this.sleep(pollIntervalMs); + } + } + + // Timeout + return { + toolResultBlock: { + type: MessageContentType.ToolResult, + tool_use_id: request.toolId, + is_error: true, + content: [ + { + type: MessageContentType.Text, + text: `Timeout waiting for approval after ${timeoutMs / 1000} seconds`, + }, + ], + }, + requiresApproval: false, + isHighRisk: true, + executionTimeMs: Date.now() - startTime, + }; + } + + /** + * Build tool execution context from task + */ + buildContextFromTask(task: { + id: string; + workspaceId?: string | null; + nodeRunId?: string | null; + allowedTools?: string[]; + gatewayToolsOnly?: boolean; + highRiskTools?: string[]; + }): ToolExecutionContext { + return { + taskId: task.id, + workspaceId: task.workspaceId, + nodeRunId: task.nodeRunId, + allowedTools: task.allowedTools || [], + gatewayToolsOnly: task.gatewayToolsOnly || false, + highRiskTools: task.highRiskTools || [], + }; + } + + /** + * Helper to sleep for a given number of milliseconds + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-agent/src/tools/tools.module.ts b/packages/bytebot-agent/src/tools/tools.module.ts new file mode 100644 index 000000000..b6394a3d1 --- /dev/null +++ b/packages/bytebot-agent/src/tools/tools.module.ts @@ -0,0 +1,17 @@ +/** + * Tools Module + * v2.3.0 M4: Provides unified tool execution with routing between desktop and gateway + */ + +import { Module, Global } from '@nestjs/common'; +import { ToolExecutorService } from './tool-executor.service'; +import { GatewayModule } from '../gateway/gateway.module'; +import { WorkspaceModule } from '../workspace/workspace.module'; + +@Global() +@Module({ + imports: [GatewayModule, WorkspaceModule], + providers: [ToolExecutorService], + exports: [ToolExecutorService], +}) +export class ToolsModule {} diff --git a/packages/bytebot-agent/src/workspace/workspace.module.ts b/packages/bytebot-agent/src/workspace/workspace.module.ts new file mode 100644 index 000000000..df18b2752 --- /dev/null +++ b/packages/bytebot-agent/src/workspace/workspace.module.ts @@ -0,0 +1,16 @@ +/** + * Workspace Module + * v2.3.0 M4: Provides workspace-aware desktop resolution and granular locking + */ + +import { Module, Global } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { WorkspaceService } from './workspace.service'; + +@Global() +@Module({ + imports: [ConfigModule], + providers: [WorkspaceService], + exports: [WorkspaceService], +}) +export class WorkspaceModule {} diff --git a/packages/bytebot-agent/src/workspace/workspace.service.ts b/packages/bytebot-agent/src/workspace/workspace.service.ts new file mode 100644 index 000000000..dd1be4727 --- /dev/null +++ b/packages/bytebot-agent/src/workspace/workspace.service.ts @@ -0,0 +1,533 @@ +/** + * Workspace Service + * v2.3.0 M4: Workspace-aware desktop resolution and granular locking + * + * This service handles: + * 1. Desktop endpoint resolution for persistent workspaces (Product 2: Workflows) + * 2. Granular lock acquisition/release during desktop tool execution + * 3. Integration with the workflow orchestrator for workspace status + * + * For Product 1 (Tasks), workspaceId is null and the existing TaskControllerService is used. + * For Product 2 (Workflows), this service manages workspace-specific endpoints and locking. + */ + +import { Injectable, Logger, OnModuleDestroy } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; + +/** + * Workspace status from orchestrator + */ +export interface WorkspaceInfo { + id: string; + tenantId: string; + status: 'CREATING' | 'READY' | 'HIBERNATED' | 'TERMINATED' | 'FAILED'; + desktopEndpoint: string | null; + vncEndpoint: string | null; + lockOwnerNodeRunId: string | null; + lockExpiresAt: string | null; + lastHeartbeatAt: string | null; +} + +/** + * Lock acquisition result + */ +export interface LockAcquisitionResult { + acquired: boolean; + message: string; + lockExpiresAt?: string; + retryAfterMs?: number; +} + +/** + * Lock renewal result + */ +export interface LockRenewalResult { + renewed: boolean; + message: string; + lockExpiresAt?: string; +} + +/** + * Default lock lease duration in seconds (30 seconds for desktop tool batches) + */ +const DEFAULT_LOCK_LEASE_SECONDS = 30; + +/** + * Lock renewal threshold - renew when less than this many seconds remaining + */ +const LOCK_RENEWAL_THRESHOLD_SECONDS = 10; + +@Injectable() +export class WorkspaceService implements OnModuleDestroy { + private readonly logger = new Logger(WorkspaceService.name); + private readonly orchestratorUrl: string; + private readonly internalToken: string; + private readonly lockLeaseSeconds: number; + + // Cache workspace info to avoid repeated API calls + private readonly workspaceCache: Map = new Map(); + private readonly cacheTtlMs: number = 10000; // 10 second cache + + // Track active locks for automatic renewal + private readonly activeLocks: Map = new Map(); + + constructor(private readonly configService: ConfigService) { + // Workflow orchestrator URL for workspace operations + this.orchestratorUrl = this.configService.get( + 'WORKFLOW_ORCHESTRATOR_URL', + '', + ); + + // Internal service token for authenticated requests + this.internalToken = this.configService.get( + 'INTERNAL_SERVICE_TOKEN', + '', + ); + + // Lock lease duration (configurable) + this.lockLeaseSeconds = parseInt( + this.configService.get('WORKSPACE_LOCK_LEASE_SECONDS', String(DEFAULT_LOCK_LEASE_SECONDS)), + 10, + ); + + if (!this.orchestratorUrl) { + this.logger.warn( + 'WORKFLOW_ORCHESTRATOR_URL not set - workspace features disabled (Product 1 only)', + ); + } else { + this.logger.log(`Workflow Orchestrator URL: ${this.orchestratorUrl}`); + this.logger.log(`Workspace lock lease: ${this.lockLeaseSeconds} seconds`); + } + } + + onModuleDestroy() { + // Clear all renewal timers and release locks + for (const [key, lock] of this.activeLocks) { + clearTimeout(lock.renewalTimer); + // Best-effort release on shutdown + this.releaseLock(lock.workspaceId, lock.nodeRunId).catch((err) => { + this.logger.warn(`Failed to release lock on shutdown: ${err.message}`); + }); + } + this.activeLocks.clear(); + this.workspaceCache.clear(); + } + + /** + * Check if workspace features are enabled (orchestrator configured) + */ + isWorkspaceEnabled(): boolean { + return !!this.orchestratorUrl; + } + + /** + * Get workspace info from orchestrator + */ + async getWorkspaceInfo(workspaceId: string): Promise { + if (!this.isWorkspaceEnabled()) { + return null; + } + + // Check cache first + const cached = this.workspaceCache.get(workspaceId); + if (cached && Date.now() < cached.expiresAt) { + return cached.info; + } + + try { + const url = `${this.orchestratorUrl}/api/v1/workspaces/${workspaceId}`; + this.logger.debug(`Fetching workspace info from ${url}`); + + const response = await fetch(url, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + }); + + if (!response.ok) { + if (response.status === 404) { + this.logger.warn(`Workspace ${workspaceId} not found`); + return null; + } + throw new Error(`Orchestrator returned ${response.status}`); + } + + const workspaceInfo: WorkspaceInfo = await response.json(); + + // Cache the result + this.workspaceCache.set(workspaceId, { + info: workspaceInfo, + expiresAt: Date.now() + this.cacheTtlMs, + }); + + this.logger.debug( + `Workspace ${workspaceId}: status=${workspaceInfo.status}, desktop=${workspaceInfo.desktopEndpoint}`, + ); + + return workspaceInfo; + } catch (error: any) { + this.logger.error(`Failed to fetch workspace ${workspaceId}: ${error.message}`); + return null; + } + } + + /** + * Get desktop URL for a workspace + * Returns null if workspace not ready or doesn't exist + */ + async getDesktopUrl(workspaceId: string): Promise { + const info = await this.getWorkspaceInfo(workspaceId); + + if (!info) { + return null; + } + + if (info.status !== 'READY') { + this.logger.warn(`Workspace ${workspaceId} not ready: status=${info.status}`); + return null; + } + + return info.desktopEndpoint; + } + + /** + * Wake a hibernated workspace + */ + async wakeWorkspace(workspaceId: string): Promise { + if (!this.isWorkspaceEnabled()) { + return false; + } + + try { + const url = `${this.orchestratorUrl}/api/v1/workspaces/${workspaceId}/wake`; + this.logger.log(`Waking workspace ${workspaceId}`); + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + }); + + if (!response.ok) { + throw new Error(`Wake request failed with status ${response.status}`); + } + + // Invalidate cache + this.workspaceCache.delete(workspaceId); + + this.logger.log(`Workspace ${workspaceId} wake initiated`); + return true; + } catch (error: any) { + this.logger.error(`Failed to wake workspace ${workspaceId}: ${error.message}`); + return false; + } + } + + /** + * Wait for workspace to be ready + * Handles HIBERNATED -> READY transition + */ + async waitForWorkspaceReady( + workspaceId: string, + options: { + timeoutMs?: number; + pollIntervalMs?: number; + } = {}, + ): Promise { + const { + timeoutMs = 120000, // 2 minute timeout (includes wake time) + pollIntervalMs = 2000, // Poll every 2 seconds + } = options; + + const startTime = Date.now(); + + while (Date.now() - startTime < timeoutMs) { + // Invalidate cache to get fresh status + this.workspaceCache.delete(workspaceId); + + const info = await this.getWorkspaceInfo(workspaceId); + + if (!info) { + throw new Error(`Workspace ${workspaceId} not found`); + } + + switch (info.status) { + case 'READY': + if (info.desktopEndpoint) { + this.logger.log(`Workspace ${workspaceId} ready: ${info.desktopEndpoint}`); + return info.desktopEndpoint; + } + throw new Error(`Workspace ${workspaceId} ready but no desktop endpoint`); + + case 'HIBERNATED': + this.logger.log(`Workspace ${workspaceId} hibernated, waking...`); + await this.wakeWorkspace(workspaceId); + break; + + case 'CREATING': + this.logger.debug(`Workspace ${workspaceId} still creating...`); + break; + + case 'TERMINATED': + case 'FAILED': + throw new Error(`Workspace ${workspaceId} in terminal state: ${info.status}`); + + default: + this.logger.warn(`Workspace ${workspaceId} in unknown state: ${info.status}`); + } + + // Wait before next poll + await this.sleep(pollIntervalMs); + } + + throw new Error(`Timeout waiting for workspace ${workspaceId} to be ready`); + } + + /** + * Acquire a granular lock on the workspace for desktop tool execution + * + * The lock is held only during active desktop tool execution (30-60 seconds), + * NOT for the entire node run. This allows concurrent non-desktop work. + */ + async acquireLock( + workspaceId: string, + nodeRunId: string, + leaseSeconds?: number, + ): Promise { + if (!this.isWorkspaceEnabled()) { + // In non-workspace mode, always succeed (no locking needed) + return { acquired: true, message: 'Workspace features disabled' }; + } + + const lease = leaseSeconds || this.lockLeaseSeconds; + + try { + const url = `${this.orchestratorUrl}/api/v1/workspaces/${workspaceId}/lock`; + this.logger.log(`Acquiring lock on workspace ${workspaceId} for nodeRun ${nodeRunId} (${lease}s lease)`); + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + body: JSON.stringify({ + nodeRunId, + leaseSeconds: lease, + }), + }); + + const result = await response.json(); + + if (!response.ok) { + // Lock contention - another node run holds the lock + if (response.status === 409) { + this.logger.warn(`Lock contention on workspace ${workspaceId}: ${result.message}`); + return { + acquired: false, + message: result.message || 'Lock held by another node run', + retryAfterMs: result.retryAfterMs || 5000, + }; + } + throw new Error(`Lock acquisition failed: ${response.status}`); + } + + this.logger.log(`Lock acquired on workspace ${workspaceId} for nodeRun ${nodeRunId}`); + + // Set up automatic renewal + const lockKey = `${workspaceId}:${nodeRunId}`; + const expiresAt = new Date(result.lockExpiresAt); + + // Schedule renewal before expiry + const renewalMs = Math.max((lease - LOCK_RENEWAL_THRESHOLD_SECONDS) * 1000, 5000); + const renewalTimer = setTimeout(() => { + this.renewLockInternal(workspaceId, nodeRunId).catch((err) => { + this.logger.error(`Lock renewal failed: ${err.message}`); + }); + }, renewalMs); + + this.activeLocks.set(lockKey, { + workspaceId, + nodeRunId, + expiresAt, + renewalTimer, + }); + + return { + acquired: true, + message: 'Lock acquired', + lockExpiresAt: result.lockExpiresAt, + }; + } catch (error: any) { + this.logger.error(`Failed to acquire lock on workspace ${workspaceId}: ${error.message}`); + return { + acquired: false, + message: `Lock acquisition error: ${error.message}`, + }; + } + } + + /** + * Internal lock renewal (called by timer) + */ + private async renewLockInternal(workspaceId: string, nodeRunId: string): Promise { + const lockKey = `${workspaceId}:${nodeRunId}`; + const activeLock = this.activeLocks.get(lockKey); + + if (!activeLock) { + this.logger.debug(`Lock ${lockKey} no longer active, skipping renewal`); + return; + } + + const result = await this.renewLock(workspaceId, nodeRunId); + + if (result.renewed) { + // Schedule next renewal + const renewalMs = Math.max((this.lockLeaseSeconds - LOCK_RENEWAL_THRESHOLD_SECONDS) * 1000, 5000); + activeLock.expiresAt = new Date(result.lockExpiresAt!); + activeLock.renewalTimer = setTimeout(() => { + this.renewLockInternal(workspaceId, nodeRunId).catch((err) => { + this.logger.error(`Lock renewal failed: ${err.message}`); + }); + }, renewalMs); + } else { + // Lock lost, clean up + this.activeLocks.delete(lockKey); + this.logger.warn(`Lock renewal failed for ${lockKey}, lock lost`); + } + } + + /** + * Renew an existing lock + */ + async renewLock(workspaceId: string, nodeRunId: string): Promise { + if (!this.isWorkspaceEnabled()) { + return { renewed: true, message: 'Workspace features disabled' }; + } + + try { + const url = `${this.orchestratorUrl}/api/v1/workspaces/${workspaceId}/lock/renew`; + this.logger.debug(`Renewing lock on workspace ${workspaceId} for nodeRun ${nodeRunId}`); + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + body: JSON.stringify({ + nodeRunId, + leaseSeconds: this.lockLeaseSeconds, + }), + }); + + if (!response.ok) { + const error = await response.json(); + return { + renewed: false, + message: error.message || `Renewal failed with status ${response.status}`, + }; + } + + const result = await response.json(); + this.logger.debug(`Lock renewed on workspace ${workspaceId}, expires ${result.lockExpiresAt}`); + + return { + renewed: true, + message: 'Lock renewed', + lockExpiresAt: result.lockExpiresAt, + }; + } catch (error: any) { + this.logger.error(`Failed to renew lock on workspace ${workspaceId}: ${error.message}`); + return { + renewed: false, + message: `Renewal error: ${error.message}`, + }; + } + } + + /** + * Release a lock on the workspace + */ + async releaseLock(workspaceId: string, nodeRunId: string): Promise { + const lockKey = `${workspaceId}:${nodeRunId}`; + + // Clear renewal timer + const activeLock = this.activeLocks.get(lockKey); + if (activeLock) { + clearTimeout(activeLock.renewalTimer); + this.activeLocks.delete(lockKey); + } + + if (!this.isWorkspaceEnabled()) { + return true; + } + + try { + const url = `${this.orchestratorUrl}/api/v1/workspaces/${workspaceId}/lock`; + this.logger.log(`Releasing lock on workspace ${workspaceId} for nodeRun ${nodeRunId}`); + + const response = await fetch(url, { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + }, + body: JSON.stringify({ nodeRunId }), + }); + + if (!response.ok && response.status !== 404) { + throw new Error(`Lock release failed: ${response.status}`); + } + + this.logger.log(`Lock released on workspace ${workspaceId}`); + return true; + } catch (error: any) { + this.logger.error(`Failed to release lock on workspace ${workspaceId}: ${error.message}`); + return false; + } + } + + /** + * Check if this nodeRun currently holds the lock + */ + hasActiveLock(workspaceId: string, nodeRunId: string): boolean { + const lockKey = `${workspaceId}:${nodeRunId}`; + const lock = this.activeLocks.get(lockKey); + + if (!lock) { + return false; + } + + // Check if lock has expired + if (lock.expiresAt < new Date()) { + this.activeLocks.delete(lockKey); + return false; + } + + return true; + } + + /** + * Invalidate cached workspace info + */ + invalidateCache(workspaceId: string): void { + this.workspaceCache.delete(workspaceId); + } + + /** + * Helper to sleep for a given number of milliseconds + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-temporal-worker/Dockerfile b/packages/bytebot-temporal-worker/Dockerfile new file mode 100644 index 000000000..c674e1314 --- /dev/null +++ b/packages/bytebot-temporal-worker/Dockerfile @@ -0,0 +1,93 @@ +# ByteBot Temporal Worker - Production Dockerfile +# +# Multi-stage build for minimal image size and security. +# Based on Node.js 20 LTS with glibc (required by Temporal SDK). +# +# NOTE: Temporal SDK requires glibc - cannot use Alpine (musl) base image. + +# ============================================================================ +# Stage 1: Builder +# ============================================================================ + +FROM node:20-slim AS builder + +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 make g++ git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +# Copy package files +COPY package*.json ./ +COPY tsconfig.json ./ +COPY nest-cli.json ./ + +# Install dependencies +RUN npm ci --only=production=false + +# Copy source code +COPY src/ ./src/ + +# Build application +RUN npm run build + +# ============================================================================ +# Stage 2: Production Dependencies +# ============================================================================ + +FROM node:20-slim AS deps + +WORKDIR /app + +COPY package*.json ./ + +# Install production dependencies only +RUN npm ci --only=production && npm cache clean --force + +# ============================================================================ +# Stage 3: Production Runtime +# ============================================================================ + +FROM node:20-slim AS runtime + +# Install curl for health checks (must be done before USER switch) +RUN apt-get update && apt-get install -y --no-install-recommends curl \ + && rm -rf /var/lib/apt/lists/* + +# Security: Run as non-root user +RUN groupadd -g 1001 nodejs && \ + useradd -u 1001 -g nodejs -m bytebot + +WORKDIR /app + +# Copy production dependencies +COPY --from=deps /app/node_modules ./node_modules + +# Copy built application +COPY --from=builder /app/dist ./dist +COPY --from=builder /app/package.json ./ + +# Set ownership +RUN chown -R bytebot:nodejs /app + +# Switch to non-root user +USER bytebot + +# Environment defaults +ENV NODE_ENV=production +ENV HTTP_PORT=3000 +ENV TEMPORAL_ADDRESS=temporal-frontend.temporal.svc.cluster.local:7233 +ENV TEMPORAL_NAMESPACE=bytebot +ENV TEMPORAL_TASK_QUEUE=bytebot-goal-runs + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD curl -sf http://localhost:${HTTP_PORT}/health/live || exit 1 + +# Expose ports +EXPOSE 3000 +EXPOSE 9464 + +# Start command - runs both HTTP server and Temporal worker +CMD ["node", "dist/worker.js"] diff --git a/packages/bytebot-temporal-worker/k8s/deployment.yaml b/packages/bytebot-temporal-worker/k8s/deployment.yaml new file mode 100644 index 000000000..530431c66 --- /dev/null +++ b/packages/bytebot-temporal-worker/k8s/deployment.yaml @@ -0,0 +1,244 @@ +--- +# Butler Vantage Temporal Worker - Kubernetes Deployment +# +# Production-ready deployment with: +# - 3 replicas for high availability +# - Resource limits and requests +# - Health probes (liveness, readiness, startup) +# - Pod anti-affinity for distribution +# - Security context for non-root execution +# +# Version History: +# - 1.3.1: Phase 13.3 - Enhanced planning prompt with desktop agent capabilities +# - 1.3.0: Phase 13.0 - Resilience and recovery improvements +# - 1.2.0: Phase 12 - In-house model routing + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: bytebot-temporal-worker + namespace: bytebot + labels: + app: bytebot-temporal-worker + component: worker + tier: backend +spec: + replicas: 3 + selector: + matchLabels: + app: bytebot-temporal-worker + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 1 + template: + metadata: + labels: + app: bytebot-temporal-worker + component: worker + tier: backend + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9464" + prometheus.io/path: "/metrics" + spec: + serviceAccountName: bytebot-temporal-worker + securityContext: + runAsNonRoot: true + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + + # Spread across nodes for HA + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app: bytebot-temporal-worker + topologyKey: kubernetes.io/hostname + + containers: + - name: worker + image: jbutler1980/bytebot-temporal-worker:1.3.1 + imagePullPolicy: Always + + ports: + - name: http + containerPort: 3000 + protocol: TCP + - name: metrics + containerPort: 9464 + protocol: TCP + + env: + # Temporal connection + - name: TEMPORAL_ADDRESS + value: "temporal-frontend.temporal.svc.cluster.local:7233" + - name: TEMPORAL_NAMESPACE + value: "bytebot" + - name: TEMPORAL_TASK_QUEUE + value: "bytebot-goal-runs" + + # Worker concurrency (tune based on pod resources) + - name: TEMPORAL_MAX_CONCURRENT_ACTIVITIES + value: "50" + - name: TEMPORAL_MAX_CONCURRENT_WORKFLOWS + value: "100" + - name: TEMPORAL_MAX_CACHED_WORKFLOWS + value: "500" + + # Kafka connection (core cluster - existing deployment) + - name: KAFKA_BROKERS + value: "core-cluster-kafka-bootstrap.kafka.svc.cluster.local:9092" + - name: KAFKA_CLIENT_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + + # ByteBot services + - name: ORCHESTRATOR_URL + value: "http://bytebot-workflow-orchestrator.bytebot.svc.cluster.local:3000" + - name: TASK_CONTROLLER_URL + value: "http://bytebot-task-controller.bytebot.svc.cluster.local:3000" + - name: LLM_PROXY_URL + value: "http://bytebot-llm-proxy.bytebot.svc.cluster.local:3000" + + # Observability + - name: TEMPORAL_METRICS_ENABLED + value: "true" + - name: TEMPORAL_METRICS_PORT + value: "9464" + + # Node.js settings + - name: NODE_ENV + value: "production" + - name: NODE_OPTIONS + value: "--max-old-space-size=1024" + + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "1000m" + memory: "1536Mi" + + # Startup probe - allows 2 minutes for initialization + startupProbe: + httpGet: + path: /health/startup + port: http + failureThreshold: 24 + periodSeconds: 5 + timeoutSeconds: 3 + + # Liveness probe - is the process alive? + livenessProbe: + httpGet: + path: /health/live + port: http + initialDelaySeconds: 0 + periodSeconds: 15 + timeoutSeconds: 10 + failureThreshold: 3 + + # Readiness probe - is it ready to receive work? + readinessProbe: + httpGet: + path: /health/ready + port: http + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + volumeMounts: + - name: tmp + mountPath: /tmp + + volumes: + - name: tmp + emptyDir: {} + + terminationGracePeriodSeconds: 60 + +--- +# Service Account +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bytebot-temporal-worker + namespace: bytebot + +--- +# Service (for health checks and metrics) +apiVersion: v1 +kind: Service +metadata: + name: bytebot-temporal-worker + namespace: bytebot + labels: + app: bytebot-temporal-worker +spec: + type: ClusterIP + ports: + - name: http + port: 3000 + targetPort: http + - name: metrics + port: 9464 + targetPort: metrics + selector: + app: bytebot-temporal-worker + +--- +# PodDisruptionBudget for high availability +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: bytebot-temporal-worker + namespace: bytebot +spec: + minAvailable: 2 + selector: + matchLabels: + app: bytebot-temporal-worker + +--- +# HorizontalPodAutoscaler +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: bytebot-temporal-worker + namespace: bytebot +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: bytebot-temporal-worker + minReplicas: 3 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 70 + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 80 diff --git a/packages/bytebot-temporal-worker/k8s/kafka-topics.yaml b/packages/bytebot-temporal-worker/k8s/kafka-topics.yaml new file mode 100644 index 000000000..749dfaff5 --- /dev/null +++ b/packages/bytebot-temporal-worker/k8s/kafka-topics.yaml @@ -0,0 +1,162 @@ +--- +# ByteBot Kafka Topics - Event Sourcing for Temporal Workflows +# +# Topic naming convention: .. +# Follows enterprise Kafka best practices from Confluent and AWS MSK + +# ============================================================================ +# Goal Events Topic +# ============================================================================ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: bytebot.goal.events + namespace: kafka + labels: + strimzi.io/cluster: core-cluster + app: bytebot + component: temporal-worker +spec: + partitions: 12 + replicas: 3 + config: + # Retention: 30 days for event sourcing + retention.ms: "2592000000" + # Cleanup policy: delete old segments + cleanup.policy: delete + # Segment size: 500MB + segment.bytes: "524288000" + # Min in-sync replicas for durability + min.insync.replicas: "2" + # Compression: gzip for good compression ratio + compression.type: gzip + # Maximum message size: 10MB + max.message.bytes: "10485760" + +--- +# ============================================================================ +# Step Events Topic +# ============================================================================ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: bytebot.step.events + namespace: kafka + labels: + strimzi.io/cluster: core-cluster + app: bytebot + component: temporal-worker +spec: + partitions: 24 # Higher partition count for step-level granularity + replicas: 3 + config: + # Retention: 14 days for step events + retention.ms: "1209600000" + cleanup.policy: delete + segment.bytes: "268435456" # 256MB segments + min.insync.replicas: "2" + compression.type: gzip + max.message.bytes: "5242880" # 5MB max + +--- +# ============================================================================ +# Audit Log Topic +# ============================================================================ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: bytebot.audit.log + namespace: kafka + labels: + strimzi.io/cluster: core-cluster + app: bytebot + component: temporal-worker + compliance: audit +spec: + partitions: 6 + replicas: 3 + config: + # Retention: 1 year for compliance + retention.ms: "31536000000" + # Use compaction + deletion for audit logs + cleanup.policy: "compact,delete" + segment.bytes: "1073741824" # 1GB segments + min.insync.replicas: "2" + compression.type: gzip + # Keep at least 1 year of data before compaction + min.compaction.lag.ms: "86400000" + max.message.bytes: "5242880" + +--- +# ============================================================================ +# Dead Letter Queue Topic +# ============================================================================ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: bytebot.dlq + namespace: kafka + labels: + strimzi.io/cluster: core-cluster + app: bytebot + component: temporal-worker +spec: + partitions: 6 + replicas: 3 + config: + # Retention: 90 days for debugging failed messages + retention.ms: "7776000000" + cleanup.policy: delete + segment.bytes: "268435456" + min.insync.replicas: "2" + compression.type: gzip + +--- +# ============================================================================ +# Metrics Events Topic (for real-time dashboards) +# ============================================================================ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: bytebot.metrics.events + namespace: kafka + labels: + strimzi.io/cluster: core-cluster + app: bytebot + component: temporal-worker +spec: + partitions: 12 + replicas: 3 + config: + # Retention: 24 hours for real-time metrics + retention.ms: "86400000" + cleanup.policy: delete + segment.bytes: "134217728" # 128MB segments + min.insync.replicas: "2" + compression.type: lz4 # Fast compression for metrics + +--- +# ============================================================================ +# Workflow State Changes Topic (for CDC/replays) +# ============================================================================ +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: bytebot.workflow.state + namespace: kafka + labels: + strimzi.io/cluster: core-cluster + app: bytebot + component: temporal-worker +spec: + partitions: 12 + replicas: 3 + config: + # Compacted topic for latest state + cleanup.policy: compact + segment.bytes: "268435456" + min.insync.replicas: "2" + compression.type: gzip + # Aggressive compaction settings + min.cleanable.dirty.ratio: "0.1" + delete.retention.ms: "86400000" diff --git a/packages/bytebot-temporal-worker/k8s/temporal-worker-egress.yaml b/packages/bytebot-temporal-worker/k8s/temporal-worker-egress.yaml new file mode 100644 index 000000000..97af63b36 --- /dev/null +++ b/packages/bytebot-temporal-worker/k8s/temporal-worker-egress.yaml @@ -0,0 +1,75 @@ +# Phase 13.1: Temporal Worker Egress Policy with CIDR Fallback +# +# Research-based improvements (Phase 13): +# - toEntities: all requires identity resolution which can fail during sync +# - CIDR fallback rules catch traffic before identity resolution fails +# - Explicit DNS rules ensure service discovery works +# - This prevents transient EPERM errors during ClusterMesh identity sync +# +# Cluster CIDRs (Pod Networks): +# - aiml: 192.173.0.0/16 (LiteLLM, LLM models) +# - core: 192.169.0.0/16 (Temporal, Kafka) +# - agent: 192.171.0.0/16 (local services) +# - database: 192.170.0.0/16 (PostgreSQL) +# - edge: 192.172.0.0/16 (edge services) +# - store: 192.174.0.0/16 (storage services) +# +# Service CIDRs: +# - 10.144.0.0/12 (all cluster service IPs) +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: temporal-worker-egress + namespace: bytebot + labels: + app.kubernetes.io/component: temporal-worker + app.kubernetes.io/part-of: bytebot + phase: "13.1" +spec: + description: "Phase 13.1: Temporal worker egress with CIDR fallback for identity sync reliability" + endpointSelector: + matchLabels: + app: bytebot-temporal-worker + egress: + # Rule 1: Allow all entity-based traffic (primary) + # This works when identity is properly synchronized + - toEntities: + - all + + # Rule 2: CIDR fallback for cross-cluster pod networks + # Catches traffic during identity synchronization window (200-500ms, up to seconds under load) + # This prevents EPERM errors when identity hasn't propagated yet + - toCIDRSet: + # All cluster pod CIDRs + - cidr: 192.168.0.0/16 # Generic private range + - cidr: 192.169.0.0/16 # core cluster pods + - cidr: 192.170.0.0/16 # database cluster pods + - cidr: 192.171.0.0/16 # agent cluster pods (local) + - cidr: 192.172.0.0/16 # edge cluster pods + - cidr: 192.173.0.0/16 # aiml cluster pods (LiteLLM) + - cidr: 192.174.0.0/16 # store cluster pods + # Service CIDRs + - cidr: 10.144.0.0/12 # Kubernetes service IPs + - cidr: 10.96.0.0/12 # Default Kubernetes service CIDR + + # Rule 3: Explicit DNS egress (critical for service discovery) + - toEndpoints: + - matchLabels: + k8s:io.kubernetes.pod.namespace: kube-system + k8s-app: kube-dns + toPorts: + - ports: + - port: "53" + protocol: UDP + - port: "53" + protocol: TCP + + # Rule 4: External access (world entity for external APIs) + - toEntities: + - world + toPorts: + - ports: + - port: "443" + protocol: TCP + - port: "80" + protocol: TCP diff --git a/packages/bytebot-temporal-worker/nest-cli.json b/packages/bytebot-temporal-worker/nest-cli.json new file mode 100644 index 000000000..579ca3540 --- /dev/null +++ b/packages/bytebot-temporal-worker/nest-cli.json @@ -0,0 +1,10 @@ +{ + "$schema": "https://json.schemastore.org/nest-cli", + "collection": "@nestjs/schematics", + "sourceRoot": "src", + "compilerOptions": { + "deleteOutDir": true, + "builder": "swc", + "typeCheck": true + } +} diff --git a/packages/bytebot-temporal-worker/package-lock.json b/packages/bytebot-temporal-worker/package-lock.json new file mode 100644 index 000000000..5bd438599 --- /dev/null +++ b/packages/bytebot-temporal-worker/package-lock.json @@ -0,0 +1,11115 @@ +{ + "name": "bytebot-temporal-worker", + "version": "1.3.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "bytebot-temporal-worker", + "version": "1.3.0", + "license": "UNLICENSED", + "dependencies": { + "@nestjs/common": "^11.0.1", + "@nestjs/config": "^4.0.2", + "@nestjs/core": "^11.0.1", + "@nestjs/event-emitter": "^3.0.0", + "@nestjs/platform-express": "^11.1.5", + "@nestjs/terminus": "^11.0.0", + "@prisma/client": "^6.16.1", + "@temporalio/activity": "^1.11.0", + "@temporalio/client": "^1.11.0", + "@temporalio/common": "^1.11.0", + "@temporalio/worker": "^1.11.0", + "@temporalio/workflow": "^1.11.0", + "@willsoto/nestjs-prometheus": "^6.0.1", + "axios": "^1.7.9", + "class-transformer": "^0.5.1", + "class-validator": "^0.14.2", + "kafkajs": "^2.2.4", + "opossum": "^8.1.4", + "prom-client": "^15.1.3", + "reflect-metadata": "^0.2.2", + "rxjs": "^7.8.1", + "zod": "^3.24.1" + }, + "devDependencies": { + "@nestjs/cli": "^11.0.0", + "@nestjs/schematics": "^11.0.0", + "@nestjs/testing": "^11.0.1", + "@swc/cli": "^0.7.9", + "@swc/core": "^1.15.8", + "@temporalio/testing": "^1.11.0", + "@types/express": "^5.0.0", + "@types/jest": "^29.5.14", + "@types/node": "^22.10.7", + "@types/opossum": "^8.1.0", + "eslint": "^9.18.0", + "jest": "^29.7.0", + "prettier": "^3.4.2", + "prisma": "^6.16.1", + "rimraf": "^6.0.1", + "source-map-support": "^0.5.21", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.3", + "webpack": "^5.89.0", + "webpack-cli": "^5.1.4" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/@angular-devkit/core": { + "version": "19.2.19", + "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-19.2.19.tgz", + "integrity": "sha512-JbLL+4IMLMBgjLZlnPG4lYDfz4zGrJ/s6Aoon321NJKuw1Kb1k5KpFu9dUY0BqLIe8xPQ2UJBpI+xXdK5MXMHQ==", + "dev": true, + "dependencies": { + "ajv": "8.17.1", + "ajv-formats": "3.0.1", + "jsonc-parser": "3.3.1", + "picomatch": "4.0.2", + "rxjs": "7.8.1", + "source-map": "0.7.4" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + }, + "peerDependencies": { + "chokidar": "^4.0.0" + }, + "peerDependenciesMeta": { + "chokidar": { + "optional": true + } + } + }, + "node_modules/@angular-devkit/core/node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/@angular-devkit/schematics": { + "version": "19.2.19", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-19.2.19.tgz", + "integrity": "sha512-J4Jarr0SohdrHcb40gTL4wGPCQ952IMWF1G/MSAQfBAPvA9ZKApYhpxcY7PmehVePve+ujpus1dGsJ7dPxz8Kg==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.19", + "jsonc-parser": "3.3.1", + "magic-string": "0.30.17", + "ora": "5.4.1", + "rxjs": "7.8.1" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + } + }, + "node_modules/@angular-devkit/schematics-cli": { + "version": "19.2.19", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics-cli/-/schematics-cli-19.2.19.tgz", + "integrity": "sha512-7q9UY6HK6sccL9F3cqGRUwKhM7b/XfD2YcVaZ2WD7VMaRlRm85v6mRjSrfKIAwxcQU0UK27kMc79NIIqaHjzxA==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.19", + "@angular-devkit/schematics": "19.2.19", + "@inquirer/prompts": "7.3.2", + "ansi-colors": "4.1.3", + "symbol-observable": "4.0.0", + "yargs-parser": "21.1.1" + }, + "bin": { + "schematics": "bin/schematics.js" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + } + }, + "node_modules/@angular-devkit/schematics-cli/node_modules/@inquirer/prompts": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/prompts/-/prompts-7.3.2.tgz", + "integrity": "sha512-G1ytyOoHh5BphmEBxSwALin3n1KGNYB6yImbICcRQdzXfOGbuJ9Jske/Of5Sebk339NSGGNfUshnzK8YWkTPsQ==", + "dev": true, + "dependencies": { + "@inquirer/checkbox": "^4.1.2", + "@inquirer/confirm": "^5.1.6", + "@inquirer/editor": "^4.2.7", + "@inquirer/expand": "^4.0.9", + "@inquirer/input": "^4.1.6", + "@inquirer/number": "^3.0.9", + "@inquirer/password": "^4.0.9", + "@inquirer/rawlist": "^4.0.9", + "@inquirer/search": "^3.0.9", + "@inquirer/select": "^4.0.9" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@angular-devkit/schematics/node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz", + "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", + "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@borewit/text-codec": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.1.tgz", + "integrity": "sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "dev": true, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@grpc/grpc-js": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.14.3.tgz", + "integrity": "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA==", + "dependencies": { + "@grpc/proto-loader": "^0.8.0", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/@grpc/proto-loader": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.8.0.tgz", + "integrity": "sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.5.3", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@inquirer/ansi": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@inquirer/ansi/-/ansi-1.0.2.tgz", + "integrity": "sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==", + "dev": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/checkbox": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-4.3.2.tgz", + "integrity": "sha512-VXukHf0RR1doGe6Sm4F0Em7SWYLTHSsbGfJdS9Ja2bX5/D5uwVOEjr07cncLROdBvmnvCATYEWlHqYmXv2IlQA==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/core": "^10.3.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/confirm": { + "version": "5.1.21", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.21.tgz", + "integrity": "sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core": { + "version": "10.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.3.2.tgz", + "integrity": "sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "cli-width": "^4.1.0", + "mute-stream": "^2.0.0", + "signal-exit": "^4.1.0", + "wrap-ansi": "^6.2.0", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/editor": { + "version": "4.2.23", + "resolved": "https://registry.npmjs.org/@inquirer/editor/-/editor-4.2.23.tgz", + "integrity": "sha512-aLSROkEwirotxZ1pBaP8tugXRFCxW94gwrQLxXfrZsKkfjOYC1aRvAZuhpJOb5cu4IBTJdsCigUlf2iCOu4ZDQ==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/external-editor": "^1.0.3", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/expand": { + "version": "4.0.23", + "resolved": "https://registry.npmjs.org/@inquirer/expand/-/expand-4.0.23.tgz", + "integrity": "sha512-nRzdOyFYnpeYTTR2qFwEVmIWypzdAx/sIkCMeTNTcflFOovfqUk+HcFhQQVBftAh9gmGrpFj6QcGEqrDMDOiew==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/external-editor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@inquirer/external-editor/-/external-editor-1.0.3.tgz", + "integrity": "sha512-RWbSrDiYmO4LbejWY7ttpxczuwQyZLBUyygsA9Nsv95hpzUWwnNTVQmAq3xuh7vNwCp07UTmE5i11XAEExx4RA==", + "dev": true, + "dependencies": { + "chardet": "^2.1.1", + "iconv-lite": "^0.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/figures": { + "version": "1.0.15", + "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.15.tgz", + "integrity": "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==", + "dev": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/input": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/@inquirer/input/-/input-4.3.1.tgz", + "integrity": "sha512-kN0pAM4yPrLjJ1XJBjDxyfDduXOuQHrBB8aLDMueuwUGn+vNpF7Gq7TvyVxx8u4SHlFFj4trmj+a2cbpG4Jn1g==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/number": { + "version": "3.0.23", + "resolved": "https://registry.npmjs.org/@inquirer/number/-/number-3.0.23.tgz", + "integrity": "sha512-5Smv0OK7K0KUzUfYUXDXQc9jrf8OHo4ktlEayFlelCjwMXz0299Y8OrI+lj7i4gCBY15UObk76q0QtxjzFcFcg==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/password": { + "version": "4.0.23", + "resolved": "https://registry.npmjs.org/@inquirer/password/-/password-4.0.23.tgz", + "integrity": "sha512-zREJHjhT5vJBMZX/IUbyI9zVtVfOLiTO66MrF/3GFZYZ7T4YILW5MSkEYHceSii/KtRk+4i3RE7E1CUXA2jHcA==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/prompts": { + "version": "7.10.1", + "resolved": "https://registry.npmjs.org/@inquirer/prompts/-/prompts-7.10.1.tgz", + "integrity": "sha512-Dx/y9bCQcXLI5ooQ5KyvA4FTgeo2jYj/7plWfV5Ak5wDPKQZgudKez2ixyfz7tKXzcJciTxqLeK7R9HItwiByg==", + "dev": true, + "dependencies": { + "@inquirer/checkbox": "^4.3.2", + "@inquirer/confirm": "^5.1.21", + "@inquirer/editor": "^4.2.23", + "@inquirer/expand": "^4.0.23", + "@inquirer/input": "^4.3.1", + "@inquirer/number": "^3.0.23", + "@inquirer/password": "^4.0.23", + "@inquirer/rawlist": "^4.1.11", + "@inquirer/search": "^3.2.2", + "@inquirer/select": "^4.4.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/rawlist": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@inquirer/rawlist/-/rawlist-4.1.11.tgz", + "integrity": "sha512-+LLQB8XGr3I5LZN/GuAHo+GpDJegQwuPARLChlMICNdwW7OwV2izlCSCxN6cqpL0sMXmbKbFcItJgdQq5EBXTw==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/search": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@inquirer/search/-/search-3.2.2.tgz", + "integrity": "sha512-p2bvRfENXCZdWF/U2BXvnSI9h+tuA8iNqtUKb9UWbmLYCRQxd8WkvwWvYn+3NgYaNwdUkHytJMGG4MMLucI1kA==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/select": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@inquirer/select/-/select-4.4.2.tgz", + "integrity": "sha512-l4xMuJo55MAe+N7Qr4rX90vypFwCajSakx59qe/tMaC1aEHWLyw68wF4o0A4SLAY4E0nd+Vt+EyskeDIqu1M6w==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/core": "^10.3.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/type": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.10.tgz", + "integrity": "sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==", + "dev": true, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "dev": true, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "dev": true, + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.11.tgz", + "integrity": "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", + "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@jsonjoy.com/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-q6XAnWQDIMA3+FTiOYajoYqySkO+JSat0ytXGSuRdq9uXE7o92gzuQwQM14xaCRlBLGq3v5miDGC4vkVTn54xA==", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/buffers": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/buffers/-/buffers-1.2.1.tgz", + "integrity": "sha512-12cdlDwX4RUM3QxmUbVJWqZ/mrK6dFQH4Zxq6+r1YXKXYBNgZXndx2qbCJwh3+WWkCSn67IjnlG3XYTvmvYtgA==", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/codegen": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/codegen/-/codegen-1.0.0.tgz", + "integrity": "sha512-E8Oy+08cmCf0EK/NMxpaJZmOxPqM+6iSe2S4nlSBrPZOORoDJILxtbSUEDKQyTamm/BVAhIGllOBNU79/dwf0g==", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/json-pack": { + "version": "1.21.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.21.0.tgz", + "integrity": "sha512-+AKG+R2cfZMShzrF2uQw34v3zbeDYUqnQ+jg7ORic3BGtfw9p/+N6RJbq/kkV8JmYZaINknaEQ2m0/f693ZPpg==", + "dependencies": { + "@jsonjoy.com/base64": "^1.1.2", + "@jsonjoy.com/buffers": "^1.2.0", + "@jsonjoy.com/codegen": "^1.0.0", + "@jsonjoy.com/json-pointer": "^1.0.2", + "@jsonjoy.com/util": "^1.9.0", + "hyperdyperid": "^1.2.0", + "thingies": "^2.5.0", + "tree-dump": "^1.1.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/json-pointer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pointer/-/json-pointer-1.0.2.tgz", + "integrity": "sha512-Fsn6wM2zlDzY1U+v4Nc8bo3bVqgfNTGcn6dMgs6FjrEnt4ZCe60o6ByKRjOGlI2gow0aE/Q41QOigdTqkyK5fg==", + "dependencies": { + "@jsonjoy.com/codegen": "^1.0.0", + "@jsonjoy.com/util": "^1.9.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@jsonjoy.com/util": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/util/-/util-1.9.0.tgz", + "integrity": "sha512-pLuQo+VPRnN8hfPqUTLTHk126wuYdXVxE6aDmjSeV4NCAgyxWbiOIeNJVtID3h1Vzpoi9m4jXezf73I6LgabgQ==", + "dependencies": { + "@jsonjoy.com/buffers": "^1.0.0", + "@jsonjoy.com/codegen": "^1.0.0" + }, + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/@lukeed/csprng": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lukeed/csprng/-/csprng-1.1.0.tgz", + "integrity": "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@napi-rs/nice": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice/-/nice-1.1.1.tgz", + "integrity": "sha512-xJIPs+bYuc9ASBl+cvGsKbGrJmS6fAKaSZCnT0lhahT5rhA2VVy9/EcIgd2JhtEuFOJNx7UHNn/qiTPTY4nrQw==", + "dev": true, + "optional": true, + "engines": { + "node": ">= 10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "optionalDependencies": { + "@napi-rs/nice-android-arm-eabi": "1.1.1", + "@napi-rs/nice-android-arm64": "1.1.1", + "@napi-rs/nice-darwin-arm64": "1.1.1", + "@napi-rs/nice-darwin-x64": "1.1.1", + "@napi-rs/nice-freebsd-x64": "1.1.1", + "@napi-rs/nice-linux-arm-gnueabihf": "1.1.1", + "@napi-rs/nice-linux-arm64-gnu": "1.1.1", + "@napi-rs/nice-linux-arm64-musl": "1.1.1", + "@napi-rs/nice-linux-ppc64-gnu": "1.1.1", + "@napi-rs/nice-linux-riscv64-gnu": "1.1.1", + "@napi-rs/nice-linux-s390x-gnu": "1.1.1", + "@napi-rs/nice-linux-x64-gnu": "1.1.1", + "@napi-rs/nice-linux-x64-musl": "1.1.1", + "@napi-rs/nice-openharmony-arm64": "1.1.1", + "@napi-rs/nice-win32-arm64-msvc": "1.1.1", + "@napi-rs/nice-win32-ia32-msvc": "1.1.1", + "@napi-rs/nice-win32-x64-msvc": "1.1.1" + } + }, + "node_modules/@napi-rs/nice-android-arm-eabi": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-android-arm-eabi/-/nice-android-arm-eabi-1.1.1.tgz", + "integrity": "sha512-kjirL3N6TnRPv5iuHw36wnucNqXAO46dzK9oPb0wj076R5Xm8PfUVA9nAFB5ZNMmfJQJVKACAPd/Z2KYMppthw==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-android-arm64": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-android-arm64/-/nice-android-arm64-1.1.1.tgz", + "integrity": "sha512-blG0i7dXgbInN5urONoUCNf+DUEAavRffrO7fZSeoRMJc5qD+BJeNcpr54msPF6qfDD6kzs9AQJogZvT2KD5nw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-darwin-arm64": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-darwin-arm64/-/nice-darwin-arm64-1.1.1.tgz", + "integrity": "sha512-s/E7w45NaLqTGuOjC2p96pct4jRfo61xb9bU1unM/MJ/RFkKlJyJDx7OJI/O0ll/hrfpqKopuAFDV8yo0hfT7A==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-darwin-x64": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-darwin-x64/-/nice-darwin-x64-1.1.1.tgz", + "integrity": "sha512-dGoEBnVpsdcC+oHHmW1LRK5eiyzLwdgNQq3BmZIav+9/5WTZwBYX7r5ZkQC07Nxd3KHOCkgbHSh4wPkH1N1LiQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-freebsd-x64": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-freebsd-x64/-/nice-freebsd-x64-1.1.1.tgz", + "integrity": "sha512-kHv4kEHAylMYmlNwcQcDtXjklYp4FCf0b05E+0h6nDHsZ+F0bDe04U/tXNOqrx5CmIAth4vwfkjjUmp4c4JktQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-arm-gnueabihf": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-arm-gnueabihf/-/nice-linux-arm-gnueabihf-1.1.1.tgz", + "integrity": "sha512-E1t7K0efyKXZDoZg1LzCOLxgolxV58HCkaEkEvIYQx12ht2pa8hoBo+4OB3qh7e+QiBlp1SRf+voWUZFxyhyqg==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-arm64-gnu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-arm64-gnu/-/nice-linux-arm64-gnu-1.1.1.tgz", + "integrity": "sha512-CIKLA12DTIZlmTaaKhQP88R3Xao+gyJxNWEn04wZwC2wmRapNnxCUZkVwggInMJvtVElA+D4ZzOU5sX4jV+SmQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-arm64-musl": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-arm64-musl/-/nice-linux-arm64-musl-1.1.1.tgz", + "integrity": "sha512-+2Rzdb3nTIYZ0YJF43qf2twhqOCkiSrHx2Pg6DJaCPYhhaxbLcdlV8hCRMHghQ+EtZQWGNcS2xF4KxBhSGeutg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-ppc64-gnu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-ppc64-gnu/-/nice-linux-ppc64-gnu-1.1.1.tgz", + "integrity": "sha512-4FS8oc0GeHpwvv4tKciKkw3Y4jKsL7FRhaOeiPei0X9T4Jd619wHNe4xCLmN2EMgZoeGg+Q7GY7BsvwKpL22Tg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-riscv64-gnu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-riscv64-gnu/-/nice-linux-riscv64-gnu-1.1.1.tgz", + "integrity": "sha512-HU0nw9uD4FO/oGCCk409tCi5IzIZpH2agE6nN4fqpwVlCn5BOq0MS1dXGjXaG17JaAvrlpV5ZeyZwSon10XOXw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-s390x-gnu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-s390x-gnu/-/nice-linux-s390x-gnu-1.1.1.tgz", + "integrity": "sha512-2YqKJWWl24EwrX0DzCQgPLKQBxYDdBxOHot1KWEq7aY2uYeX+Uvtv4I8xFVVygJDgf6/92h9N3Y43WPx8+PAgQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-x64-gnu": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-x64-gnu/-/nice-linux-x64-gnu-1.1.1.tgz", + "integrity": "sha512-/gaNz3R92t+dcrfCw/96pDopcmec7oCcAQ3l/M+Zxr82KT4DljD37CpgrnXV+pJC263JkW572pdbP3hP+KjcIg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-x64-musl": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-x64-musl/-/nice-linux-x64-musl-1.1.1.tgz", + "integrity": "sha512-xScCGnyj/oppsNPMnevsBe3pvNaoK7FGvMjT35riz9YdhB2WtTG47ZlbxtOLpjeO9SqqQ2J2igCmz6IJOD5JYw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-openharmony-arm64": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-openharmony-arm64/-/nice-openharmony-arm64-1.1.1.tgz", + "integrity": "sha512-6uJPRVwVCLDeoOaNyeiW0gp2kFIM4r7PL2MczdZQHkFi9gVlgm+Vn+V6nTWRcu856mJ2WjYJiumEajfSm7arPQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-win32-arm64-msvc": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-win32-arm64-msvc/-/nice-win32-arm64-msvc-1.1.1.tgz", + "integrity": "sha512-uoTb4eAvM5B2aj/z8j+Nv8OttPf2m+HVx3UjA5jcFxASvNhQriyCQF1OB1lHL43ZhW+VwZlgvjmP5qF3+59atA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-win32-ia32-msvc": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-win32-ia32-msvc/-/nice-win32-ia32-msvc-1.1.1.tgz", + "integrity": "sha512-CNQqlQT9MwuCsg1Vd/oKXiuH+TcsSPJmlAFc5frFyX/KkOh0UpBLEj7aoY656d5UKZQMQFP7vJNa1DNUNORvug==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-win32-x64-msvc": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-win32-x64-msvc/-/nice-win32-x64-msvc-1.1.1.tgz", + "integrity": "sha512-vB+4G/jBQCAh0jelMTY3+kgFy00Hlx2f2/1zjMoH821IbplbWZOkLiTYXQkygNTzQJTq5cvwBDgn2ppHD+bglQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nestjs/cli": { + "version": "11.0.14", + "resolved": "https://registry.npmjs.org/@nestjs/cli/-/cli-11.0.14.tgz", + "integrity": "sha512-YwP03zb5VETTwelXU+AIzMVbEZKk/uxJL+z9pw0mdG9ogAtqZ6/mpmIM4nEq/NU8D0a7CBRLcMYUmWW/55pfqw==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.19", + "@angular-devkit/schematics": "19.2.19", + "@angular-devkit/schematics-cli": "19.2.19", + "@inquirer/prompts": "7.10.1", + "@nestjs/schematics": "^11.0.1", + "ansis": "4.2.0", + "chokidar": "4.0.3", + "cli-table3": "0.6.5", + "commander": "4.1.1", + "fork-ts-checker-webpack-plugin": "9.1.0", + "glob": "13.0.0", + "node-emoji": "1.11.0", + "ora": "5.4.1", + "tsconfig-paths": "4.2.0", + "tsconfig-paths-webpack-plugin": "4.2.0", + "typescript": "5.9.3", + "webpack": "5.103.0", + "webpack-node-externals": "3.0.0" + }, + "bin": { + "nest": "bin/nest.js" + }, + "engines": { + "node": ">= 20.11" + }, + "peerDependencies": { + "@swc/cli": "^0.1.62 || ^0.3.0 || ^0.4.0 || ^0.5.0 || ^0.6.0 || ^0.7.0", + "@swc/core": "^1.3.62" + }, + "peerDependenciesMeta": { + "@swc/cli": { + "optional": true + }, + "@swc/core": { + "optional": true + } + } + }, + "node_modules/@nestjs/cli/node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/@nestjs/cli/node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true + }, + "node_modules/@nestjs/cli/node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@nestjs/cli/node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/@nestjs/cli/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/@nestjs/cli/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/@nestjs/cli/node_modules/schema-utils": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/@nestjs/cli/node_modules/webpack": { + "version": "5.103.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.103.0.tgz", + "integrity": "sha512-HU1JOuV1OavsZ+mfigY0j8d1TgQgbZ6M+J75zDkpEAwYeXjWSqrGJtgnPblJjd/mAyTNQ7ygw0MiKOn6etz8yw==", + "dev": true, + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.8", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.15.0", + "acorn-import-phases": "^1.0.3", + "browserslist": "^4.26.3", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.3", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.3.1", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^4.3.3", + "tapable": "^2.3.0", + "terser-webpack-plugin": "^5.3.11", + "watchpack": "^2.4.4", + "webpack-sources": "^3.3.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/@nestjs/common": { + "version": "11.1.11", + "resolved": "https://registry.npmjs.org/@nestjs/common/-/common-11.1.11.tgz", + "integrity": "sha512-R/+A8XFqLgN8zNs2twhrOaE7dJbRQhdPX3g46am4RT/x8xGLqDphrXkUIno4cGUZHxbczChBAaAPTdPv73wDZA==", + "dependencies": { + "file-type": "21.2.0", + "iterare": "1.2.1", + "load-esm": "1.0.3", + "tslib": "2.8.1", + "uid": "2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "class-transformer": ">=0.4.1", + "class-validator": ">=0.13.2", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "class-transformer": { + "optional": true + }, + "class-validator": { + "optional": true + } + } + }, + "node_modules/@nestjs/config": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@nestjs/config/-/config-4.0.2.tgz", + "integrity": "sha512-McMW6EXtpc8+CwTUwFdg6h7dYcBUpH5iUILCclAsa+MbCEvC9ZKu4dCHRlJqALuhjLw97pbQu62l4+wRwGeZqA==", + "dependencies": { + "dotenv": "16.4.7", + "dotenv-expand": "12.0.1", + "lodash": "4.17.21" + }, + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "rxjs": "^7.1.0" + } + }, + "node_modules/@nestjs/core": { + "version": "11.1.11", + "resolved": "https://registry.npmjs.org/@nestjs/core/-/core-11.1.11.tgz", + "integrity": "sha512-H9i+zT3RvHi7tDc+lCmWHJ3ustXveABCr+Vcpl96dNOxgmrx4elQSTC4W93Mlav2opfLV+p0UTHY6L+bpUA4zA==", + "hasInstallScript": true, + "dependencies": { + "@nuxt/opencollective": "0.4.1", + "fast-safe-stringify": "2.1.1", + "iterare": "1.2.1", + "path-to-regexp": "8.3.0", + "tslib": "2.8.1", + "uid": "2.0.2" + }, + "engines": { + "node": ">= 20" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/microservices": "^11.0.0", + "@nestjs/platform-express": "^11.0.0", + "@nestjs/websockets": "^11.0.0", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/platform-express": { + "optional": true + }, + "@nestjs/websockets": { + "optional": true + } + } + }, + "node_modules/@nestjs/event-emitter": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@nestjs/event-emitter/-/event-emitter-3.0.1.tgz", + "integrity": "sha512-0Ln/x+7xkU6AJFOcQI9tIhUMXVF7D5itiaQGOyJbXtlAfAIt8gzDdJm+Im7cFzKoWkiW5nCXCPh6GSvdQd/3Dw==", + "dependencies": { + "eventemitter2": "6.4.9" + }, + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "@nestjs/core": "^10.0.0 || ^11.0.0" + } + }, + "node_modules/@nestjs/platform-express": { + "version": "11.1.11", + "resolved": "https://registry.npmjs.org/@nestjs/platform-express/-/platform-express-11.1.11.tgz", + "integrity": "sha512-kyABSskdMRIAMWL0SlbwtDy4yn59RL4HDdwHDz/fxWuv7/53YP8Y2DtV3/sHqY5Er0msMVTZrM38MjqXhYL7gw==", + "dependencies": { + "cors": "2.8.5", + "express": "5.2.1", + "multer": "2.0.2", + "path-to-regexp": "8.3.0", + "tslib": "2.8.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/core": "^11.0.0" + } + }, + "node_modules/@nestjs/schematics": { + "version": "11.0.9", + "resolved": "https://registry.npmjs.org/@nestjs/schematics/-/schematics-11.0.9.tgz", + "integrity": "sha512-0NfPbPlEaGwIT8/TCThxLzrlz3yzDNkfRNpbL7FiplKq3w4qXpJg0JYwqgMEJnLQZm3L/L/5XjoyfJHUO3qX9g==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.17", + "@angular-devkit/schematics": "19.2.17", + "comment-json": "4.4.1", + "jsonc-parser": "3.3.1", + "pluralize": "8.0.0" + }, + "peerDependencies": { + "typescript": ">=4.8.2" + } + }, + "node_modules/@nestjs/schematics/node_modules/@angular-devkit/core": { + "version": "19.2.17", + "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-19.2.17.tgz", + "integrity": "sha512-Ah008x2RJkd0F+NLKqIpA34/vUGwjlprRCkvddjDopAWRzYn6xCkz1Tqwuhn0nR1Dy47wTLKYD999TYl5ONOAQ==", + "dev": true, + "dependencies": { + "ajv": "8.17.1", + "ajv-formats": "3.0.1", + "jsonc-parser": "3.3.1", + "picomatch": "4.0.2", + "rxjs": "7.8.1", + "source-map": "0.7.4" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + }, + "peerDependencies": { + "chokidar": "^4.0.0" + }, + "peerDependenciesMeta": { + "chokidar": { + "optional": true + } + } + }, + "node_modules/@nestjs/schematics/node_modules/@angular-devkit/schematics": { + "version": "19.2.17", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-19.2.17.tgz", + "integrity": "sha512-ADfbaBsrG8mBF6Mfs+crKA/2ykB8AJI50Cv9tKmZfwcUcyAdmTr+vVvhsBCfvUAEokigSsgqgpYxfkJVxhJYeg==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.17", + "jsonc-parser": "3.3.1", + "magic-string": "0.30.17", + "ora": "5.4.1", + "rxjs": "7.8.1" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + } + }, + "node_modules/@nestjs/schematics/node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/@nestjs/terminus": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/@nestjs/terminus/-/terminus-11.0.0.tgz", + "integrity": "sha512-c55LOo9YGovmQHtFUMa/vDaxGZ2cglMTZejqgHREaApt/GArTfgYYGwhRXPLq8ZwiQQlLuYB+79e9iA8mlDSLA==", + "dependencies": { + "boxen": "5.1.2", + "check-disk-space": "3.4.0" + }, + "peerDependencies": { + "@grpc/grpc-js": "*", + "@grpc/proto-loader": "*", + "@mikro-orm/core": "*", + "@mikro-orm/nestjs": "*", + "@nestjs/axios": "^2.0.0 || ^3.0.0 || ^4.0.0", + "@nestjs/common": "^10.0.0 || ^11.0.0", + "@nestjs/core": "^10.0.0 || ^11.0.0", + "@nestjs/microservices": "^10.0.0 || ^11.0.0", + "@nestjs/mongoose": "^11.0.0", + "@nestjs/sequelize": "^10.0.0 || ^11.0.0", + "@nestjs/typeorm": "^10.0.0 || ^11.0.0", + "@prisma/client": "*", + "mongoose": "*", + "reflect-metadata": "0.1.x || 0.2.x", + "rxjs": "7.x", + "sequelize": "*", + "typeorm": "*" + }, + "peerDependenciesMeta": { + "@grpc/grpc-js": { + "optional": true + }, + "@grpc/proto-loader": { + "optional": true + }, + "@mikro-orm/core": { + "optional": true + }, + "@mikro-orm/nestjs": { + "optional": true + }, + "@nestjs/axios": { + "optional": true + }, + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/mongoose": { + "optional": true + }, + "@nestjs/sequelize": { + "optional": true + }, + "@nestjs/typeorm": { + "optional": true + }, + "@prisma/client": { + "optional": true + }, + "mongoose": { + "optional": true + }, + "sequelize": { + "optional": true + }, + "typeorm": { + "optional": true + } + } + }, + "node_modules/@nestjs/testing": { + "version": "11.1.11", + "resolved": "https://registry.npmjs.org/@nestjs/testing/-/testing-11.1.11.tgz", + "integrity": "sha512-Po2aZKXlxuySDEh3Gi05LJ7/BtfTAPRZ3KPTrbpNrTmgGr3rFgEGYpQwN50wXYw0pywoICiFLZSZ/qXsplf6NA==", + "dev": true, + "dependencies": { + "tslib": "2.8.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/core": "^11.0.0", + "@nestjs/microservices": "^11.0.0", + "@nestjs/platform-express": "^11.0.0" + }, + "peerDependenciesMeta": { + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/platform-express": { + "optional": true + } + } + }, + "node_modules/@nuxt/opencollective": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@nuxt/opencollective/-/opencollective-0.4.1.tgz", + "integrity": "sha512-GXD3wy50qYbxCJ652bDrDzgMr3NFEkIS374+IgFQKkCvk9yiYcLvX2XDYr7UyQxf4wK0e+yqDYRubZ0DtOxnmQ==", + "dependencies": { + "consola": "^3.2.3" + }, + "bin": { + "opencollective": "bin/opencollective.js" + }, + "engines": { + "node": "^14.18.0 || >=16.10.0", + "npm": ">=5.10.0" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@prisma/client": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/client/-/client-6.19.1.tgz", + "integrity": "sha512-4SXj4Oo6HyQkLUWT8Ke5R0PTAfVOKip5Roo+6+b2EDTkFg5be0FnBWiuRJc0BC0sRQIWGMLKW1XguhVfW/z3/A==", + "hasInstallScript": true, + "engines": { + "node": ">=18.18" + }, + "peerDependencies": { + "prisma": "*", + "typescript": ">=5.1.0" + }, + "peerDependenciesMeta": { + "prisma": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/@prisma/config": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/config/-/config-6.19.1.tgz", + "integrity": "sha512-bUL/aYkGXLwxVGhJmQMtslLT7KPEfUqmRa919fKI4wQFX4bIFUKiY8Jmio/2waAjjPYrtuDHa7EsNCnJTXxiOw==", + "devOptional": true, + "dependencies": { + "c12": "3.1.0", + "deepmerge-ts": "7.1.5", + "effect": "3.18.4", + "empathic": "2.0.0" + } + }, + "node_modules/@prisma/debug": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/debug/-/debug-6.19.1.tgz", + "integrity": "sha512-h1JImhlAd/s5nhY/e9qkAzausWldbeT+e4nZF7A4zjDYBF4BZmKDt4y0jK7EZapqOm1kW7V0e9agV/iFDy3fWw==", + "devOptional": true + }, + "node_modules/@prisma/engines": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/engines/-/engines-6.19.1.tgz", + "integrity": "sha512-xy95dNJ7DiPf9IJ3oaVfX785nbFl7oNDzclUF+DIiJw6WdWCvPl0LPU0YqQLsrwv8N64uOQkH391ujo3wSo+Nw==", + "devOptional": true, + "hasInstallScript": true, + "dependencies": { + "@prisma/debug": "6.19.1", + "@prisma/engines-version": "7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7", + "@prisma/fetch-engine": "6.19.1", + "@prisma/get-platform": "6.19.1" + } + }, + "node_modules/@prisma/engines-version": { + "version": "7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7", + "resolved": "https://registry.npmjs.org/@prisma/engines-version/-/engines-version-7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7.tgz", + "integrity": "sha512-03bgb1VD5gvuumNf+7fVGBzfpJPjmqV423l/WxsWk2cNQ42JD0/SsFBPhN6z8iAvdHs07/7ei77SKu7aZfq8bA==", + "devOptional": true + }, + "node_modules/@prisma/fetch-engine": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/fetch-engine/-/fetch-engine-6.19.1.tgz", + "integrity": "sha512-mmgcotdaq4VtAHO6keov3db+hqlBzQS6X7tR7dFCbvXjLVTxBYdSJFRWz+dq7F9p6dvWyy1X0v8BlfRixyQK6g==", + "devOptional": true, + "dependencies": { + "@prisma/debug": "6.19.1", + "@prisma/engines-version": "7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7", + "@prisma/get-platform": "6.19.1" + } + }, + "node_modules/@prisma/get-platform": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/get-platform/-/get-platform-6.19.1.tgz", + "integrity": "sha512-zsg44QUiQAnFUyh6Fbt7c9HjMXHwFTqtrgcX7DAZmRgnkPyYT7Sh8Mn8D5PuuDYNtMOYcpLGg576MLfIORsBYw==", + "devOptional": true, + "dependencies": { + "@prisma/debug": "6.19.1" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true + }, + "node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "dev": true, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "devOptional": true + }, + "node_modules/@swc/cli": { + "version": "0.7.9", + "resolved": "https://registry.npmjs.org/@swc/cli/-/cli-0.7.9.tgz", + "integrity": "sha512-AFQu3ZZ9IcdClTknxbug08S9ed/q8F3aYkO5NoZ+6IjQ5UEo1s2HN1GRKNvUslYx2EoVYxd+6xGcp6C7wwtxyQ==", + "dev": true, + "dependencies": { + "@swc/counter": "^0.1.3", + "@xhmikosr/bin-wrapper": "^13.0.5", + "commander": "^8.3.0", + "minimatch": "^9.0.3", + "piscina": "^4.3.1", + "semver": "^7.3.8", + "slash": "3.0.0", + "source-map": "^0.7.3", + "tinyglobby": "^0.2.13" + }, + "bin": { + "spack": "bin/spack.js", + "swc": "bin/swc.js", + "swcx": "bin/swcx.js" + }, + "engines": { + "node": ">= 16.14.0" + }, + "peerDependencies": { + "@swc/core": "^1.2.66", + "chokidar": "^4.0.1" + }, + "peerDependenciesMeta": { + "chokidar": { + "optional": true + } + } + }, + "node_modules/@swc/cli/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@swc/cli/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "dev": true, + "engines": { + "node": ">= 12" + } + }, + "node_modules/@swc/cli/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@swc/core": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.15.8.tgz", + "integrity": "sha512-T8keoJjXaSUoVBCIjgL6wAnhADIb09GOELzKg10CjNg+vLX48P93SME6jTfte9MZIm5m+Il57H3rTSk/0kzDUw==", + "hasInstallScript": true, + "dependencies": { + "@swc/counter": "^0.1.3", + "@swc/types": "^0.1.25" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.15.8", + "@swc/core-darwin-x64": "1.15.8", + "@swc/core-linux-arm-gnueabihf": "1.15.8", + "@swc/core-linux-arm64-gnu": "1.15.8", + "@swc/core-linux-arm64-musl": "1.15.8", + "@swc/core-linux-x64-gnu": "1.15.8", + "@swc/core-linux-x64-musl": "1.15.8", + "@swc/core-win32-arm64-msvc": "1.15.8", + "@swc/core-win32-ia32-msvc": "1.15.8", + "@swc/core-win32-x64-msvc": "1.15.8" + }, + "peerDependencies": { + "@swc/helpers": ">=0.5.17" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.15.8.tgz", + "integrity": "sha512-M9cK5GwyWWRkRGwwCbREuj6r8jKdES/haCZ3Xckgkl8MUQJZA3XB7IXXK1IXRNeLjg6m7cnoMICpXv1v1hlJOg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-darwin-x64": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.15.8.tgz", + "integrity": "sha512-j47DasuOvXl80sKJHSi2X25l44CMc3VDhlJwA7oewC1nV1VsSzwX+KOwE5tLnfORvVJJyeiXgJORNYg4jeIjYQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.15.8.tgz", + "integrity": "sha512-siAzDENu2rUbwr9+fayWa26r5A9fol1iORG53HWxQL1J8ym4k7xt9eME0dMPXlYZDytK5r9sW8zEA10F2U3Xwg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.15.8.tgz", + "integrity": "sha512-o+1y5u6k2FfPYbTRUPvurwzNt5qd0NTumCTFscCNuBksycloXY16J8L+SMW5QRX59n4Hp9EmFa3vpvNHRVv1+Q==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.15.8.tgz", + "integrity": "sha512-koiCqL09EwOP1S2RShCI7NbsQuG6r2brTqUYE7pV7kZm9O17wZ0LSz22m6gVibpwEnw8jI3IE1yYsQTVpluALw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.15.8.tgz", + "integrity": "sha512-4p6lOMU3bC+Vd5ARtKJ/FxpIC5G8v3XLoPEZ5s7mLR8h7411HWC/LmTXDHcrSXRC55zvAVia1eldy6zDLz8iFQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.15.8.tgz", + "integrity": "sha512-z3XBnbrZAL+6xDGAhJoN4lOueIxC/8rGrJ9tg+fEaeqLEuAtHSW2QHDHxDwkxZMjuF/pZ6MUTjHjbp8wLbuRLA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.15.8.tgz", + "integrity": "sha512-djQPJ9Rh9vP8GTS/Df3hcc6XP6xnG5c8qsngWId/BLA9oX6C7UzCPAn74BG/wGb9a6j4w3RINuoaieJB3t+7iQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.15.8.tgz", + "integrity": "sha512-/wfAgxORg2VBaUoFdytcVBVCgf1isWZIEXB9MZEUty4wwK93M/PxAkjifOho9RN3WrM3inPLabICRCEgdHpKKQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.15.8.tgz", + "integrity": "sha512-GpMePrh9Sl4d61o4KAHOOv5is5+zt6BEXCOCgs/H0FLGeii7j9bWDE8ExvKFy2GRRZVNR1ugsnzaGWHKM6kuzA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" + }, + "node_modules/@swc/types": { + "version": "0.1.25", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.25.tgz", + "integrity": "sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g==", + "dependencies": { + "@swc/counter": "^0.1.3" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "dev": true, + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@temporalio/activity": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/activity/-/activity-1.14.0.tgz", + "integrity": "sha512-ayGqfjqW8R1nhow54Y3A5ezoVwFr4SbB8VHaQA3seDFOB+6TyOVSlulYqGgFMxl/FXBkRa/VEswEDqS/xQq7aQ==", + "dependencies": { + "@temporalio/client": "1.14.0", + "@temporalio/common": "1.14.0", + "abort-controller": "^3.0.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/client": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/client/-/client-1.14.0.tgz", + "integrity": "sha512-kjzJ+7M2kHj32cTTSQT5WOjEIOxY0TNV5g6Sw9PzWmKWdtIZig+d7qUIA3VjDe/TieNozxjR2wNAX5sKzYFANA==", + "dependencies": { + "@grpc/grpc-js": "^1.12.4", + "@temporalio/common": "1.14.0", + "@temporalio/proto": "1.14.0", + "abort-controller": "^3.0.0", + "long": "^5.2.3", + "uuid": "^11.1.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/common": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/common/-/common-1.14.0.tgz", + "integrity": "sha512-jVmurBdFHdqw/wIehzVJikS8MhavL630p88TJ64P5PH0nP8S5V8R5vhkmHZ7n0sMRO+A0QFyWYyvnccu6MQZvw==", + "dependencies": { + "@temporalio/proto": "1.14.0", + "long": "^5.2.3", + "ms": "3.0.0-canary.1", + "nexus-rpc": "^0.0.1", + "proto3-json-serializer": "^2.0.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/core-bridge": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/core-bridge/-/core-bridge-1.14.0.tgz", + "integrity": "sha512-62WRbESKVtCx1FafbikQB90EwKNF+mEAaOJKifUIU4lQnk9wlZPRfrf6pwyqr+Uqi7uZhD2YqHXWUNVYbmQU7w==", + "hasInstallScript": true, + "dependencies": { + "@grpc/grpc-js": "^1.12.4", + "@temporalio/common": "1.14.0", + "arg": "^5.0.2", + "cargo-cp-artifact": "^0.1.8", + "which": "^4.0.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/nexus": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/nexus/-/nexus-1.14.0.tgz", + "integrity": "sha512-0tgf+EBuz5vgYUukaYUzVHKr27XNQejXXO1i0x8+4sjR5zN6euNKraHfRzrDWRSm3nTZ6199rCTbR+CPrqaC/g==", + "dependencies": { + "@temporalio/client": "1.14.0", + "@temporalio/common": "1.14.0", + "@temporalio/proto": "1.14.0", + "long": "^5.2.3", + "nexus-rpc": "^0.0.1" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/proto": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/proto/-/proto-1.14.0.tgz", + "integrity": "sha512-duYVjt3x6SkuFzJr+5NlklEgookPqW065qdcvogmdfVjrgiwz4W/07AN3+fL4ufmqt1//0SyF6nyqv9RNADYNA==", + "dependencies": { + "long": "^5.2.3", + "protobufjs": "^7.2.5" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/testing": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/testing/-/testing-1.14.0.tgz", + "integrity": "sha512-b1i31O4PL1YhxKxWb8LtRwRbqaUiZ+BxhOIDq5g94M0SayvVyOw/EtFVOX6XWi+trDwGPVytpv748qtcA+nUlA==", + "dev": true, + "dependencies": { + "@temporalio/activity": "1.14.0", + "@temporalio/client": "1.14.0", + "@temporalio/common": "1.14.0", + "@temporalio/core-bridge": "1.14.0", + "@temporalio/proto": "1.14.0", + "@temporalio/worker": "1.14.0", + "@temporalio/workflow": "1.14.0", + "abort-controller": "^3.0.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/worker": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/worker/-/worker-1.14.0.tgz", + "integrity": "sha512-wo5rgPSt83aT1hLYmh/0X4yOx/6uRbIvBa9LXqGo7s9s1GJkUyJpAahRt8aMoLm4qPsiZtu1gtU5KcASOmgqtg==", + "dependencies": { + "@grpc/grpc-js": "^1.12.4", + "@swc/core": "^1.3.102", + "@temporalio/activity": "1.14.0", + "@temporalio/client": "1.14.0", + "@temporalio/common": "1.14.0", + "@temporalio/core-bridge": "1.14.0", + "@temporalio/nexus": "1.14.0", + "@temporalio/proto": "1.14.0", + "@temporalio/workflow": "1.14.0", + "abort-controller": "^3.0.0", + "heap-js": "^2.6.0", + "memfs": "^4.6.0", + "nexus-rpc": "^0.0.1", + "proto3-json-serializer": "^2.0.0", + "protobufjs": "^7.2.5", + "rxjs": "^7.8.1", + "source-map": "^0.7.4", + "source-map-loader": "^4.0.2", + "supports-color": "^8.1.1", + "swc-loader": "^0.2.3", + "unionfs": "^4.5.1", + "webpack": "^5.94.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/workflow": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/workflow/-/workflow-1.14.0.tgz", + "integrity": "sha512-hxUqCZTkdSwgy5nc/O1DIpYH0Z77cM57RfJvhK4ELmkkb1jh/Q4dshDannH1qQ1zYT0IKRBHSW7m1aMy1+dgDA==", + "dependencies": { + "@temporalio/common": "1.14.0", + "@temporalio/proto": "1.14.0", + "nexus-rpc": "^0.0.1" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@tokenizer/inflate": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.4.1.tgz", + "integrity": "sha512-2mAv+8pkG6GIZiF1kNg1jAjh27IDxEPKwdGul3snfztFerfPGI1LjDezZp3i7BElXompqEtPmoPx6c2wgtWsOA==", + "dependencies": { + "debug": "^4.4.3", + "token-types": "^6.1.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@tokenizer/token": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", + "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==" + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", + "integrity": "sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.6", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz", + "integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==", + "dev": true, + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==" + }, + "node_modules/@types/express": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.6.tgz", + "integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==", + "dev": true, + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^5.0.0", + "@types/serve-static": "^2" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.1.0.tgz", + "integrity": "sha512-jnHMsrd0Mwa9Cf4IdOzbz543y4XJepXrbia2T4b6+spXC2We3t1y6K44D3mR8XMFSXMCf3/l7rCgddfx7UNVBA==", + "dev": true, + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==", + "dev": true + }, + "node_modules/@types/http-errors": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz", + "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", + "dev": true + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "dev": true, + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==" + }, + "node_modules/@types/node": { + "version": "22.19.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", + "integrity": "sha512-1N9SBnWYOJTrNZCdh/yJE+t910Y128BoyY+zBLWhL3r0TYzlTmFdXrPwHL9DyFZmlEXNQQolTZh3KHV31QDhyA==", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/opossum": { + "version": "8.1.9", + "resolved": "https://registry.npmjs.org/@types/opossum/-/opossum-8.1.9.tgz", + "integrity": "sha512-Jm/tYxuJFefiwRYs+/EOsUP3ktk0c8siMgAHPLnA4PXF4wKghzcjqf88dY+Xii5jId5Txw4JV0FMKTpjbd7KJA==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==", + "dev": true + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "dev": true + }, + "node_modules/@types/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz", + "integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-2.2.0.tgz", + "integrity": "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ==", + "dev": true, + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*" + } + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true + }, + "node_modules/@types/validator": { + "version": "13.15.10", + "resolved": "https://registry.npmjs.org/@types/validator/-/validator-13.15.10.tgz", + "integrity": "sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==" + }, + "node_modules/@types/yargs": { + "version": "17.0.35", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz", + "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webpack-cli/configtest": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-2.1.1.tgz", + "integrity": "sha512-wy0mglZpDSiSS0XHrVR+BAdId2+yxPSoJW8fsna3ZpYSlufjvxnP4YbKTCBZnNIcGN4r6ZPXV55X4mYExOfLmw==", + "dev": true, + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + } + }, + "node_modules/@webpack-cli/info": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-2.0.2.tgz", + "integrity": "sha512-zLHQdI/Qs1UyT5UBdWNqsARasIA+AaF8t+4u2aS2nEpBQh2mWIVb8qAklq0eUENnC5mOItrIB4LiS9xMtph18A==", + "dev": true, + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + } + }, + "node_modules/@webpack-cli/serve": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-2.0.5.tgz", + "integrity": "sha512-lqaoKnRYBdo1UgDX8uF24AfGMifWK19TxPmM5FHc2vAGxrJ/qtyUyFBWoY1tISZdelsQ5fBcOusifo5o5wSJxQ==", + "dev": true, + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + }, + "peerDependenciesMeta": { + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/@willsoto/nestjs-prometheus": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@willsoto/nestjs-prometheus/-/nestjs-prometheus-6.0.2.tgz", + "integrity": "sha512-ePyLZYdIrOOdlOWovzzMisIgviXqhPVzFpSMKNNhn6xajhRHeBsjAzSdpxZTc6pnjR9hw1lNAHyKnKl7lAPaVg==", + "peerDependencies": { + "@nestjs/common": "^7.0.0 || ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0", + "prom-client": "^15.0.0" + } + }, + "node_modules/@xhmikosr/archive-type": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/archive-type/-/archive-type-7.1.0.tgz", + "integrity": "sha512-xZEpnGplg1sNPyEgFh0zbHxqlw5dtYg6viplmWSxUj12+QjU9SKu3U/2G73a15pEjLaOqTefNSZ1fOPUOT4Xgg==", + "dev": true, + "dependencies": { + "file-type": "^20.5.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/archive-type/node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@xhmikosr/archive-type/node_modules/file-type": { + "version": "20.5.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-20.5.0.tgz", + "integrity": "sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg==", + "dev": true, + "dependencies": { + "@tokenizer/inflate": "^0.2.6", + "strtok3": "^10.2.0", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/@xhmikosr/bin-check": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/bin-check/-/bin-check-7.1.0.tgz", + "integrity": "sha512-y1O95J4mnl+6MpVmKfMYXec17hMEwE/yeCglFNdx+QvLLtP0yN4rSYcbkXnth+lElBuKKek2NbvOfOGPpUXCvw==", + "dev": true, + "dependencies": { + "execa": "^5.1.1", + "isexe": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/bin-check/node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/@xhmikosr/bin-wrapper": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/bin-wrapper/-/bin-wrapper-13.2.0.tgz", + "integrity": "sha512-t9U9X0sDPRGDk5TGx4dv5xiOvniVJpXnfTuynVKwHgtib95NYEw4MkZdJqhoSiz820D9m0o6PCqOPMXz0N9fIw==", + "dev": true, + "dependencies": { + "@xhmikosr/bin-check": "^7.1.0", + "@xhmikosr/downloader": "^15.2.0", + "@xhmikosr/os-filter-obj": "^3.0.0", + "bin-version-check": "^5.1.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/decompress": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/decompress/-/decompress-10.2.0.tgz", + "integrity": "sha512-MmDBvu0+GmADyQWHolcZuIWffgfnuTo4xpr2I/Qw5Ox0gt+e1Be7oYqJM4te5ylL6mzlcoicnHVDvP27zft8tg==", + "dev": true, + "dependencies": { + "@xhmikosr/decompress-tar": "^8.1.0", + "@xhmikosr/decompress-tarbz2": "^8.1.0", + "@xhmikosr/decompress-targz": "^8.1.0", + "@xhmikosr/decompress-unzip": "^7.1.0", + "graceful-fs": "^4.2.11", + "strip-dirs": "^3.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/decompress-tar": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/decompress-tar/-/decompress-tar-8.1.0.tgz", + "integrity": "sha512-m0q8x6lwxenh1CrsTby0Jrjq4vzW/QU1OLhTHMQLEdHpmjR1lgahGz++seZI0bXF3XcZw3U3xHfqZSz+JPP2Gg==", + "dev": true, + "dependencies": { + "file-type": "^20.5.0", + "is-stream": "^2.0.1", + "tar-stream": "^3.1.7" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/decompress-tar/node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@xhmikosr/decompress-tar/node_modules/file-type": { + "version": "20.5.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-20.5.0.tgz", + "integrity": "sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg==", + "dev": true, + "dependencies": { + "@tokenizer/inflate": "^0.2.6", + "strtok3": "^10.2.0", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/@xhmikosr/decompress-tarbz2": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/decompress-tarbz2/-/decompress-tarbz2-8.1.0.tgz", + "integrity": "sha512-aCLfr3A/FWZnOu5eqnJfme1Z1aumai/WRw55pCvBP+hCGnTFrcpsuiaVN5zmWTR53a8umxncY2JuYsD42QQEbw==", + "dev": true, + "dependencies": { + "@xhmikosr/decompress-tar": "^8.0.1", + "file-type": "^20.5.0", + "is-stream": "^2.0.1", + "seek-bzip": "^2.0.0", + "unbzip2-stream": "^1.4.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/decompress-tarbz2/node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@xhmikosr/decompress-tarbz2/node_modules/file-type": { + "version": "20.5.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-20.5.0.tgz", + "integrity": "sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg==", + "dev": true, + "dependencies": { + "@tokenizer/inflate": "^0.2.6", + "strtok3": "^10.2.0", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/@xhmikosr/decompress-targz": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/decompress-targz/-/decompress-targz-8.1.0.tgz", + "integrity": "sha512-fhClQ2wTmzxzdz2OhSQNo9ExefrAagw93qaG1YggoIz/QpI7atSRa7eOHv4JZkpHWs91XNn8Hry3CwUlBQhfPA==", + "dev": true, + "dependencies": { + "@xhmikosr/decompress-tar": "^8.0.1", + "file-type": "^20.5.0", + "is-stream": "^2.0.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/decompress-targz/node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@xhmikosr/decompress-targz/node_modules/file-type": { + "version": "20.5.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-20.5.0.tgz", + "integrity": "sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg==", + "dev": true, + "dependencies": { + "@tokenizer/inflate": "^0.2.6", + "strtok3": "^10.2.0", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/@xhmikosr/decompress-unzip": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/decompress-unzip/-/decompress-unzip-7.1.0.tgz", + "integrity": "sha512-oqTYAcObqTlg8owulxFTqiaJkfv2SHsxxxz9Wg4krJAHVzGWlZsU8tAB30R6ow+aHrfv4Kub6WQ8u04NWVPUpA==", + "dev": true, + "dependencies": { + "file-type": "^20.5.0", + "get-stream": "^6.0.1", + "yauzl": "^3.1.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/decompress-unzip/node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@xhmikosr/decompress-unzip/node_modules/file-type": { + "version": "20.5.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-20.5.0.tgz", + "integrity": "sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg==", + "dev": true, + "dependencies": { + "@tokenizer/inflate": "^0.2.6", + "strtok3": "^10.2.0", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/@xhmikosr/downloader": { + "version": "15.2.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/downloader/-/downloader-15.2.0.tgz", + "integrity": "sha512-lAqbig3uRGTt0sHNIM4vUG9HoM+mRl8K28WuYxyXLCUT6pyzl4Y4i0LZ3jMEsCYZ6zjPZbO9XkG91OSTd4si7g==", + "dev": true, + "dependencies": { + "@xhmikosr/archive-type": "^7.1.0", + "@xhmikosr/decompress": "^10.2.0", + "content-disposition": "^0.5.4", + "defaults": "^2.0.2", + "ext-name": "^5.0.0", + "file-type": "^20.5.0", + "filenamify": "^6.0.0", + "get-stream": "^6.0.1", + "got": "^13.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@xhmikosr/downloader/node_modules/@tokenizer/inflate": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.2.7.tgz", + "integrity": "sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg==", + "dev": true, + "dependencies": { + "debug": "^4.4.0", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@xhmikosr/downloader/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dev": true, + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/@xhmikosr/downloader/node_modules/defaults": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-2.0.2.tgz", + "integrity": "sha512-cuIw0PImdp76AOfgkjbW4VhQODRmNNcKR73vdCH5cLd/ifj7aamfoXvYgfGkEAjNJZ3ozMIy9Gu2LutUkGEPbA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@xhmikosr/downloader/node_modules/file-type": { + "version": "20.5.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-20.5.0.tgz", + "integrity": "sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg==", + "dev": true, + "dependencies": { + "@tokenizer/inflate": "^0.2.6", + "strtok3": "^10.2.0", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/@xhmikosr/os-filter-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@xhmikosr/os-filter-obj/-/os-filter-obj-3.0.0.tgz", + "integrity": "sha512-siPY6BD5dQ2SZPl3I0OZBHL27ZqZvLEosObsZRQ1NUB8qcxegwt0T9eKtV96JMFQpIz1elhkzqOg4c/Ri6Dp9A==", + "dev": true, + "dependencies": { + "arch": "^3.0.0" + }, + "engines": { + "node": "^14.14.0 || >=16.0.0" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/accepts": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", + "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", + "dependencies": { + "mime-types": "^3.0.0", + "negotiator": "^1.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-phases": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/acorn-import-phases/-/acorn-import-phases-1.0.4.tgz", + "integrity": "sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==", + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "acorn": "^8.14.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", + "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + "dev": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/ansis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/ansis/-/ansis-4.2.0.tgz", + "integrity": "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==", + "dev": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/anymatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/append-field": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/append-field/-/append-field-1.0.0.tgz", + "integrity": "sha512-klpgFSWLW1ZEs8svjfb7g4qWY0YS5imI82dTg+QahUvJ8YqAY0P10Uk8tTyh9ZGuYEZEMaeJYCF5BFuX552hsw==" + }, + "node_modules/arch": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/arch/-/arch-3.0.0.tgz", + "integrity": "sha512-AmIAC+Wtm2AU8lGfTtHsw0Y9Qtftx2YXEEtiBP10xFUtMOA+sHHx6OAddyL52mUKh1vsXQ6/w1mVDptZCyUt4Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/array-timsort": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/array-timsort/-/array-timsort-1.0.3.tgz", + "integrity": "sha512-/+3GRL7dDAGEfM6TseQk/U+mi18TU2Ms9I3UlLdUMhz2hbvGNTKdj9xniwXfUqgYhHxRx0+8UnKkvlNwVU+cWQ==", + "dev": true + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/axios": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", + "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/b4a": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.7.3.tgz", + "integrity": "sha512-5Q2mfq2WfGuFp3uS//0s6baOJLMoVduPYVeNmDYxu5OUA1/cBfvr2RIS7vi62LdNj/urk1hfmj867I3qt6uZ7Q==", + "dev": true, + "peerDependencies": { + "react-native-b4a": "*" + }, + "peerDependenciesMeta": { + "react-native-b4a": { + "optional": true + } + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/bare-events": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.8.2.tgz", + "integrity": "sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ==", + "dev": true, + "peerDependencies": { + "bare-abort-controller": "*" + }, + "peerDependenciesMeta": { + "bare-abort-controller": { + "optional": true + } + } + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.11", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", + "integrity": "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/bin-version": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/bin-version/-/bin-version-6.0.0.tgz", + "integrity": "sha512-nk5wEsP4RiKjG+vF+uG8lFsEn4d7Y6FVDamzzftSunXOoOcOOkzcWdKVlGgFFwlUQCj63SgnUkLLGF8v7lufhw==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "find-versions": "^5.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bin-version-check": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/bin-version-check/-/bin-version-check-5.1.0.tgz", + "integrity": "sha512-bYsvMqJ8yNGILLz1KP9zKLzQ6YpljV3ln1gqhuLkUtyfGi3qXKGuK2p+U4NAvjVFzDFiBBtOpCOSFNuYYEGZ5g==", + "dev": true, + "dependencies": { + "bin-version": "^6.0.0", + "semver": "^7.5.3", + "semver-truncate": "^3.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bintrees": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bintrees/-/bintrees-1.0.2.tgz", + "integrity": "sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw==" + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/body-parser": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.1.tgz", + "integrity": "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw==", + "dependencies": { + "bytes": "^3.1.2", + "content-type": "^1.0.5", + "debug": "^4.4.3", + "http-errors": "^2.0.0", + "iconv-lite": "^0.7.0", + "on-finished": "^2.4.1", + "qs": "^6.14.0", + "raw-body": "^3.0.1", + "type-is": "^2.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/boxen": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", + "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", + "dependencies": { + "ansi-align": "^3.0.0", + "camelcase": "^6.2.0", + "chalk": "^4.1.0", + "cli-boxes": "^2.2.1", + "string-width": "^4.2.2", + "type-fest": "^0.20.2", + "widest-line": "^3.1.0", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "dev": true, + "engines": { + "node": "*" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/c12": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/c12/-/c12-3.1.0.tgz", + "integrity": "sha512-uWoS8OU1MEIsOv8p/5a82c3H31LsWVR5qiyXVfBNOzfffjUWtPnhAb4BYI2uG2HfGmZmFjCtui5XNWaps+iFuw==", + "devOptional": true, + "dependencies": { + "chokidar": "^4.0.3", + "confbox": "^0.2.2", + "defu": "^6.1.4", + "dotenv": "^16.6.1", + "exsolve": "^1.0.7", + "giget": "^2.0.0", + "jiti": "^2.4.2", + "ohash": "^2.0.11", + "pathe": "^2.0.3", + "perfect-debounce": "^1.0.0", + "pkg-types": "^2.2.0", + "rc9": "^2.1.2" + }, + "peerDependencies": { + "magicast": "^0.3.5" + }, + "peerDependenciesMeta": { + "magicast": { + "optional": true + } + } + }, + "node_modules/c12/node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "devOptional": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "dev": true, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "dev": true, + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001762", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001762.tgz", + "integrity": "sha512-PxZwGNvH7Ak8WX5iXzoK1KPZttBXNPuaOvI2ZYU7NrlM+d9Ov+TUvlLOBNGzVXAntMSMMlJPd+jY6ovrVjSmUw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/cargo-cp-artifact": { + "version": "0.1.9", + "resolved": "https://registry.npmjs.org/cargo-cp-artifact/-/cargo-cp-artifact-0.1.9.tgz", + "integrity": "sha512-6F+UYzTaGB+awsTXg0uSJA1/b/B3DDJzpKVRu0UmyI7DmNeaAl2RFHuTGIN6fEgpadRxoXGb7gbC1xo4C3IdyA==", + "bin": { + "cargo-cp-artifact": "bin/cargo-cp-artifact.js" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/chardet": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-2.1.1.tgz", + "integrity": "sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ==", + "dev": true + }, + "node_modules/check-disk-space": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/check-disk-space/-/check-disk-space-3.4.0.tgz", + "integrity": "sha512-drVkSqfwA+TvuEhFipiR1OC9boEGZL5RrWvVsOthdcvQNXyCCuKkEiTOTXZ7qxSf/GLwq4GvzfrQD/Wz325hgw==", + "engines": { + "node": ">=16" + } + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "devOptional": true, + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/citty": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/citty/-/citty-0.1.6.tgz", + "integrity": "sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==", + "devOptional": true, + "dependencies": { + "consola": "^3.2.3" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true + }, + "node_modules/class-transformer": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", + "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" + }, + "node_modules/class-validator": { + "version": "0.14.3", + "resolved": "https://registry.npmjs.org/class-validator/-/class-validator-0.14.3.tgz", + "integrity": "sha512-rXXekcjofVN1LTOSw+u4u9WXVEUvNBVjORW154q/IdmYWy1nMbOU9aNtZB0t8m+FJQ9q91jlr2f9CwwUFdFMRA==", + "dependencies": { + "@types/validator": "^13.15.3", + "libphonenumber-js": "^1.11.1", + "validator": "^13.15.20" + } + }, + "node_modules/cli-boxes": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", + "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "dev": true, + "engines": { + "node": ">= 12" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dev": true, + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz", + "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==", + "dev": true + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "dev": true + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/comment-json": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/comment-json/-/comment-json-4.4.1.tgz", + "integrity": "sha512-r1To31BQD5060QdkC+Iheai7gHwoSZobzunqkf2/kQ6xIAfJyrKNAFUwdKvkK7Qgu7pVTKQEa7ok7Ed3ycAJgg==", + "dev": true, + "dependencies": { + "array-timsort": "^1.0.3", + "core-util-is": "^1.0.3", + "esprima": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/concat-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz", + "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==", + "engines": [ + "node >= 6.0" + ], + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/confbox": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.2.2.tgz", + "integrity": "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==", + "devOptional": true + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/content-disposition": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz", + "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz", + "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", + "engines": { + "node": ">=6.6.0" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true + }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dev": true, + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cross-spawn/node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/cross-spawn/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/debug/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dev": true, + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dedent": { + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.1.tgz", + "integrity": "sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==", + "dev": true, + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deepmerge-ts": { + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/deepmerge-ts/-/deepmerge-ts-7.1.5.tgz", + "integrity": "sha512-HOJkrhaYsweh+W+e74Yn7YStZOilkoPb6fycpwNLKzSPtruFs48nYis0zy5yJz1+ktUhHxoRDJ27RQAWLIJVJw==", + "devOptional": true, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "devOptional": true + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/destr/-/destr-2.0.5.tgz", + "integrity": "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==", + "devOptional": true + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dotenv-expand": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-12.0.1.tgz", + "integrity": "sha512-LaKRbou8gt0RNID/9RoI+J2rvXsBRPMV7p+ElHlPhcSARbCPDYcYG2s1TIzAfWv4YSgyY5taidWzzs31lNV3yQ==", + "dependencies": { + "dotenv": "^16.4.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/effect": { + "version": "3.18.4", + "resolved": "https://registry.npmjs.org/effect/-/effect-3.18.4.tgz", + "integrity": "sha512-b1LXQJLe9D11wfnOKAk3PKxuqYshQ0Heez+y5pnkd3jLj1yx9QhM72zZ9uUrOQyNvrs2GZZd/3maL0ZV18YuDA==", + "devOptional": true, + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "fast-check": "^3.23.1" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.267", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", + "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==" + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/empathic": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/empathic/-/empathic-2.0.0.tgz", + "integrity": "sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA==", + "devOptional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.4", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.4.tgz", + "integrity": "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/envinfo": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.21.0.tgz", + "integrity": "sha512-Lw7I8Zp5YKHFCXL7+Dz95g4CcbMEpgvqZNNq3AmlT5XAV6CgAAk6gyAMqn2zjw08K9BHfcNuKrMiCPLByGafow==", + "dev": true, + "bin": { + "envinfo": "dist/cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", + "integrity": "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/eventemitter2": { + "version": "6.4.9", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.9.tgz", + "integrity": "sha512-JEPTiaOt9f04oa6NOkc4aH+nVp5I3wEjpHbIPqfgCdD5v5bUzy7xQqwcVO2aDQgOWhI28da57HksMrzK9HlRxg==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/events-universal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/events-universal/-/events-universal-1.0.1.tgz", + "integrity": "sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==", + "dev": true, + "dependencies": { + "bare-events": "^2.7.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/express": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", + "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.1", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "depd": "^2.0.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/exsolve": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/exsolve/-/exsolve-1.0.8.tgz", + "integrity": "sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==", + "devOptional": true + }, + "node_modules/ext-list": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/ext-list/-/ext-list-2.2.2.tgz", + "integrity": "sha512-u+SQgsubraE6zItfVA0tBuCBhfU9ogSRnsvygI7wht9TS510oLkBRXBsqopeUG/GBOIQyKZO9wjTqIu/sf5zFA==", + "dev": true, + "dependencies": { + "mime-db": "^1.28.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ext-name": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ext-name/-/ext-name-5.0.0.tgz", + "integrity": "sha512-yblEwXAbGv1VQDmow7s38W77hzAgJAO50ztBLMcUyUBfxv1HC+LGwtiEN+Co6LtlqT/5uwVOxsD4TNIilWhwdQ==", + "dev": true, + "dependencies": { + "ext-list": "^2.0.0", + "sort-keys-length": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fast-check": { + "version": "3.23.2", + "resolved": "https://registry.npmjs.org/fast-check/-/fast-check-3.23.2.tgz", + "integrity": "sha512-h5+1OzzfCC3Ef7VbtKdcv7zsstUQwUDlYpUTvjeUsJAssPgLn7QzbboPtL5ro04Mq0rPOsMzl7q5hIbRs2wD1A==", + "devOptional": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "dependencies": { + "pure-rand": "^6.1.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-fifo": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", + "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==", + "dev": true + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==" + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ] + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "dev": true, + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fflate": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", + "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==", + "dev": true + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/file-type": { + "version": "21.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-21.2.0.tgz", + "integrity": "sha512-vCYBgFOrJQLoTzDyAXAL/RFfKnXXpUYt4+tipVy26nJJhT7ftgGETf2tAQF59EEL61i3MrorV/PG6tf7LJK7eg==", + "dependencies": { + "@tokenizer/inflate": "^0.4.1", + "strtok3": "^10.3.4", + "token-types": "^6.1.1", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/filename-reserved-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/filename-reserved-regex/-/filename-reserved-regex-3.0.0.tgz", + "integrity": "sha512-hn4cQfU6GOT/7cFHXBqeBg2TbrMBgdD0kcjLhvSQYYwm3s4B6cjvBfb7nBALJLAXqmU5xajSa7X2NnUud/VCdw==", + "dev": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/filenamify": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/filenamify/-/filenamify-6.0.0.tgz", + "integrity": "sha512-vqIlNogKeyD3yzrm0yhRMQg8hOVwYcYRfjEoODd49iCprMn4HL85gK3HcykQE53EPIpX3HcAbGA5ELQv216dAQ==", + "dev": true, + "dependencies": { + "filename-reserved-regex": "^3.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz", + "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", + "dependencies": { + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "on-finished": "^2.4.1", + "parseurl": "^1.3.3", + "statuses": "^2.0.1" + }, + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-versions": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/find-versions/-/find-versions-5.1.0.tgz", + "integrity": "sha512-+iwzCJ7C5v5KgcBuueqVoNiHVoQpwiUK5XFLjf0affFTep+Wcw93tPvmb8tqujDNmzhBDPddnWV/qgWSXgq+Hg==", + "dev": true, + "dependencies": { + "semver-regex": "^4.0.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "bin": { + "flat": "cli.js" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-9.1.0.tgz", + "integrity": "sha512-mpafl89VFPJmhnJ1ssH+8wmM2b50n+Rew5x42NeI2U78aRWgtkEtGmctp7iT16UjquJTjorEmIfESj3DxdW84Q==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.16.7", + "chalk": "^4.1.2", + "chokidar": "^4.0.1", + "cosmiconfig": "^8.2.0", + "deepmerge": "^4.2.2", + "fs-extra": "^10.0.0", + "memfs": "^3.4.1", + "minimatch": "^3.0.4", + "node-abort-controller": "^3.0.1", + "schema-utils": "^3.1.1", + "semver": "^7.3.5", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "typescript": ">3.6.0", + "webpack": "^5.11.0" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dev": true, + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "dev": true, + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/form-data/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/form-data/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", + "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/fs-monkey": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.1.0.tgz", + "integrity": "sha512-QMUezzXWII9EV5aTFXW1UBVUO77wYPpjqIF8/AviUCThNeSYZykpoTixUeaNNBwmCev0AMDWMAni+f8Hxb1IFw==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/giget": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/giget/-/giget-2.0.0.tgz", + "integrity": "sha512-L5bGsVkxJbJgdnwyuheIunkGatUF/zssUoxxjACCseZYAVbaqdh9Tsmmlkl8vYan09H7sbvKt4pS8GqKLBrEzA==", + "devOptional": true, + "dependencies": { + "citty": "^0.1.6", + "consola": "^3.4.0", + "defu": "^6.1.4", + "node-fetch-native": "^1.6.6", + "nypm": "^0.6.0", + "pathe": "^2.0.3" + }, + "bin": { + "giget": "dist/cli.mjs" + } + }, + "node_modules/glob": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.0.tgz", + "integrity": "sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA==", + "dev": true, + "dependencies": { + "minimatch": "^10.1.1", + "minipass": "^7.1.2", + "path-scurry": "^2.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob-to-regex.js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/glob-to-regex.js/-/glob-to-regex.js-1.2.0.tgz", + "integrity": "sha512-QMwlOQKU/IzqMUOAZWubUOT8Qft+Y0KQWnX9nK3ch0CJg0tTp4TvGZsTfudYKv2NzoQSyPcnA6TYeIQ3jGichQ==", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" + }, + "node_modules/glob/node_modules/minimatch": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz", + "integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==", + "dev": true, + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/got/-/got-13.0.0.tgz", + "integrity": "sha512-XfBk1CxOOScDcMr9O1yKkNaQyy865NbYs+F7dr4H0LZMVgCj2Le59k6PqbNHoL5ToeaEQUYh6c6yMfVcc6SJxA==", + "dev": true, + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/handlebars/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/heap-js": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/heap-js/-/heap-js-2.7.1.tgz", + "integrity": "sha512-EQfezRg0NCZGNlhlDR3Evrw1FVL2G3LhU7EgPoxufQKruNBSYA8MiRPHeWbU+36o+Fhel0wMwM+sLEiBAlNLJA==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", + "dev": true + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "dev": true, + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/hyperdyperid": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/hyperdyperid/-/hyperdyperid-1.2.0.tgz", + "integrity": "sha512-Y93lCzHYgGWdrJ66yIktxiaGULYc6oGiABxhcO5AufBeOyoIdZF7bIfLaOrbM0iGIOXQQgxxRrFEnb+Y6w1n4A==", + "engines": { + "node": ">=10.18" + } + }, + "node_modules/iconv-lite": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz", + "integrity": "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/inspect-with-kind": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/inspect-with-kind/-/inspect-with-kind-1.0.5.tgz", + "integrity": "sha512-MAQUJuIo7Xqk8EVNP+6d3CKq9c80hi4tjIbIAT6lmGW9W6WzlHiu9PS8uSuUYU+Do+j1baiFp3H25XEVxDIG2g==", + "dev": true, + "dependencies": { + "kind-of": "^6.0.2" + } + }, + "node_modules/interpret": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-3.1.1.tgz", + "integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==", + "dev": true, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-promise": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", + "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==" + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.1.tgz", + "integrity": "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", + "engines": { + "node": ">=16" + } + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/iterare": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/iterare/-/iterare-1.2.1.tgz", + "integrity": "sha512-RKYVTCjAnRthyJes037NX/IiqeidgN1xc3j1RjFfECFp28A1GVwK9nA+i0rJPaHqSZwygLzRnFlzUuHFoWWy+Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-runner/node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "devOptional": true, + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonc-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.3.1.tgz", + "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==", + "dev": true + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/kafkajs": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/kafkajs/-/kafkajs-2.2.4.tgz", + "integrity": "sha512-j/YeapB1vfPT2iOIUn/vxdyKEuhuY2PxMBvf5JWux6iSaukAccrMtXEY/Lb7OvavDhOWME589bpLrEdnVHjfjA==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/libphonenumber-js": { + "version": "1.12.33", + "resolved": "https://registry.npmjs.org/libphonenumber-js/-/libphonenumber-js-1.12.33.tgz", + "integrity": "sha512-r9kw4OA6oDO4dPXkOrXTkArQAafIKAU71hChInV4FxZ69dxCfbwQGDPzqR5/vea94wU705/3AZroEbSoeVWrQw==" + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/load-esm": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/load-esm/-/load-esm-1.0.3.tgz", + "integrity": "sha512-v5xlu8eHD1+6r8EHTg6hfmO97LN8ugKtiXcy5e6oN72iD2r6u0RPfLl6fxM+7Wnh2ZRq15o0russMst44WauPA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + }, + { + "type": "buymeacoffee", + "url": "https://buymeacoffee.com/borewit" + } + ], + "engines": { + "node": ">=13.2.0" + } + }, + "node_modules/loader-runner": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.1.tgz", + "integrity": "sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==", + "engines": { + "node": ">=6.11.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==" + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "dev": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/magic-string": { + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz", + "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/memfs": { + "version": "4.51.1", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-4.51.1.tgz", + "integrity": "sha512-Eyt3XrufitN2ZL9c/uIRMyDwXanLI88h/L3MoWqNY747ha3dMR9dWqp8cRT5ntjZ0U1TNuq4U91ZXK0sMBjYOQ==", + "dependencies": { + "@jsonjoy.com/json-pack": "^1.11.0", + "@jsonjoy.com/util": "^1.9.0", + "glob-to-regex.js": "^1.0.1", + "thingies": "^2.5.0", + "tree-dump": "^1.0.3", + "tslib": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + } + }, + "node_modules/merge-descriptors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz", + "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "dev": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/ms": { + "version": "3.0.0-canary.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-3.0.0-canary.1.tgz", + "integrity": "sha512-kh8ARjh8rMN7Du2igDRO9QJnqCb2xYTJxyQYK7vJJS4TvLLmsbyhiKpSW+t+y26gyOyMd0riphX0GeWKU3ky5g==", + "engines": { + "node": ">=12.13" + } + }, + "node_modules/multer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/multer/-/multer-2.0.2.tgz", + "integrity": "sha512-u7f2xaZ/UG8oLXHvtF/oWTRvT44p9ecwBBqTwgJVq0+4BW1g8OW01TyMEGWBHbyMOYVHXslaut7qEQ1meATXgw==", + "dependencies": { + "append-field": "^1.0.0", + "busboy": "^1.6.0", + "concat-stream": "^2.0.0", + "mkdirp": "^0.5.6", + "object-assign": "^4.1.1", + "type-is": "^1.6.18", + "xtend": "^4.0.2" + }, + "engines": { + "node": ">= 10.16.0" + } + }, + "node_modules/multer/node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/multer/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/multer/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/multer/node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mute-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz", + "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==", + "dev": true, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" + }, + "node_modules/nexus-rpc": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/nexus-rpc/-/nexus-rpc-0.0.1.tgz", + "integrity": "sha512-hAWn8Hh2eewpB5McXR5EW81R3pR/ziuGhKCF3wFyUVCklanPqrIgMNr7jKCbzXeNVad0nUDfWpFRqh2u+zxQtw==", + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==", + "dev": true + }, + "node_modules/node-emoji": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", + "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", + "dev": true, + "dependencies": { + "lodash": "^4.17.21" + } + }, + "node_modules/node-fetch-native": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/node-fetch-native/-/node-fetch-native-1.6.7.tgz", + "integrity": "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==", + "devOptional": true + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.1.1.tgz", + "integrity": "sha512-JYc0DPlpGWB40kH5g07gGTrYuMqV653k3uBKY6uITPWds3M0ov3GaWGp9lbE3Bzngx8+XkfzgvASb9vk9JDFXQ==", + "dev": true, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nypm": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/nypm/-/nypm-0.6.2.tgz", + "integrity": "sha512-7eM+hpOtrKrBDCh7Ypu2lJ9Z7PNZBdi/8AT3AX8xoCj43BBVHD0hPSTEvMtkMpfs8FCqBGhxB+uToIQimA111g==", + "devOptional": true, + "dependencies": { + "citty": "^0.1.6", + "consola": "^3.4.2", + "pathe": "^2.0.3", + "pkg-types": "^2.3.0", + "tinyexec": "^1.0.1" + }, + "bin": { + "nypm": "dist/cli.mjs" + }, + "engines": { + "node": "^14.16.0 || >=16.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ohash": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-2.0.11.tgz", + "integrity": "sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==", + "devOptional": true + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opossum": { + "version": "8.5.0", + "resolved": "https://registry.npmjs.org/opossum/-/opossum-8.5.0.tgz", + "integrity": "sha512-LZNvs+p9/ZbG4oN6unnjh4hTxkB0dyHKI2p7azVt8w+//GKDpfHss6WR7KebbpzGEssYwtSd8Mvwxqcmxg10NA==", + "engines": { + "node": "^24 || ^22 || ^21 || ^20 || ^18 || ^16" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "dev": true, + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/path-scurry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.1.tgz", + "integrity": "sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==", + "dev": true, + "dependencies": { + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/path-to-regexp": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "devOptional": true + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", + "dev": true + }, + "node_modules/perfect-debounce": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/perfect-debounce/-/perfect-debounce-1.0.0.tgz", + "integrity": "sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==", + "devOptional": true + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==" + }, + "node_modules/picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/piscina": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/piscina/-/piscina-4.9.2.tgz", + "integrity": "sha512-Fq0FERJWFEUpB4eSY59wSNwXD4RYqR+nR/WiEVcZW8IWfVBxJJafcgTEZDQo8k3w0sUarJ8RyVbbUF4GQ2LGbQ==", + "dev": true, + "optionalDependencies": { + "@napi-rs/nice": "^1.0.1" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-types": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-2.3.0.tgz", + "integrity": "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==", + "devOptional": true, + "dependencies": { + "confbox": "^0.2.2", + "exsolve": "^1.0.7", + "pathe": "^2.0.3" + } + }, + "node_modules/pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz", + "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==", + "dev": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prisma": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/prisma/-/prisma-6.19.1.tgz", + "integrity": "sha512-XRfmGzh6gtkc/Vq3LqZJcS2884dQQW3UhPo6jNRoiTW95FFQkXFg8vkYEy6og+Pyv0aY7zRQ7Wn1Cvr56XjhQQ==", + "devOptional": true, + "hasInstallScript": true, + "dependencies": { + "@prisma/config": "6.19.1", + "@prisma/engines": "6.19.1" + }, + "bin": { + "prisma": "build/index.js" + }, + "engines": { + "node": ">=18.18" + }, + "peerDependencies": { + "typescript": ">=5.1.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/prom-client": { + "version": "15.1.3", + "resolved": "https://registry.npmjs.org/prom-client/-/prom-client-15.1.3.tgz", + "integrity": "sha512-6ZiOBfCywsD4k1BN9IX0uZhF+tJkV8q8llP64G5Hajs4JOeVLPCwpPVcpXy3BwYiUGgyJzsJJQeOIv7+hDSq8g==", + "dependencies": { + "@opentelemetry/api": "^1.4.0", + "tdigest": "^0.1.1" + }, + "engines": { + "node": "^16 || ^18 || >=20" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proto3-json-serializer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/proto3-json-serializer/-/proto3-json-serializer-2.0.2.tgz", + "integrity": "sha512-SAzp/O4Yh02jGdRc+uIrGoe87dkN/XtwxfZ4ZyafJHymd79ozp5VG5nyZ7ygqPM5+cpLDjjGnYFUkngonyDPOQ==", + "dependencies": { + "protobufjs": "^7.2.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/protobufjs": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", + "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "devOptional": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/qs": { + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz", + "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/rc9": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/rc9/-/rc9-2.1.2.tgz", + "integrity": "sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==", + "devOptional": true, + "dependencies": { + "defu": "^6.1.4", + "destr": "^2.0.3" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "devOptional": true, + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/rechoir": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.8.0.tgz", + "integrity": "sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==", + "dev": true, + "dependencies": { + "resolve": "^1.20.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dev": true, + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", + "dev": true + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "dev": true, + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/restore-cursor/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/rimraf": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-6.1.2.tgz", + "integrity": "sha512-cFCkPslJv7BAXJsYlK1dZsbP8/ZNLkCAQ0bi1hf5EKX2QHegmDFEFA6QhuYJlk7UDdc+02JjO80YSOrWPpw06g==", + "dev": true, + "dependencies": { + "glob": "^13.0.0", + "package-json-from-dist": "^1.0.1" + }, + "bin": { + "rimraf": "dist/esm/bin.mjs" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/router": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz", + "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", + "dependencies": { + "debug": "^4.4.0", + "depd": "^2.0.0", + "is-promise": "^4.0.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^8.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/schema-utils/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/schema-utils/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "dev": true, + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/schema-utils/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/seek-bzip": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/seek-bzip/-/seek-bzip-2.0.0.tgz", + "integrity": "sha512-SMguiTnYrhpLdk3PwfzHeotrcwi8bNV4iemL9tx9poR/yeaMYwB9VzR1w7b57DuWpuqR8n6oZboi0hj3AxZxQg==", + "dev": true, + "dependencies": { + "commander": "^6.0.0" + }, + "bin": { + "seek-bunzip": "bin/seek-bunzip", + "seek-table": "bin/seek-bzip-table" + } + }, + "node_modules/seek-bzip/node_modules/commander": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz", + "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-regex": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/semver-regex/-/semver-regex-4.0.5.tgz", + "integrity": "sha512-hunMQrEy1T6Jr2uEVjrAIqjwWcQTgOAcIM52C8MY1EZSD3DDNft04XzvYKPqjED65bNVVko0YI38nYeEHCX3yw==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/semver-truncate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/semver-truncate/-/semver-truncate-3.0.0.tgz", + "integrity": "sha512-LJWA9kSvMolR51oDE6PN3kALBNaUdkxzAGcexw8gjMA8xr5zUqK0JiR3CgARSqanYF3Z1YHvsErb1KDgh+v7Rg==", + "dev": true, + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-static": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz", + "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", + "dependencies": { + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "parseurl": "^1.3.3", + "send": "^1.2.0" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dev": true, + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/sort-keys": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-1.1.2.tgz", + "integrity": "sha512-vzn8aSqKgytVik0iwdBEi+zevbTYZogewTUM6dtpmGwEcdzbub/TX4bCzRhebDCRC3QzXgJsLRKB2V/Oof7HXg==", + "dev": true, + "dependencies": { + "is-plain-obj": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sort-keys-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sort-keys-length/-/sort-keys-length-1.0.1.tgz", + "integrity": "sha512-GRbEOUqCxemTAk/b32F2xa8wDTs+Z1QHOkbhJDQTvv/6G3ZkbJ+frYWsTcc7cBB3Fu4wy4XlLCuNtJuMn7Gsvw==", + "dev": true, + "dependencies": { + "sort-keys": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-loader": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/source-map-loader/-/source-map-loader-4.0.2.tgz", + "integrity": "sha512-oYwAqCuL0OZhBoSgmdrLa7mv9MjommVMiQIWgcztf+eS4+8BfcUee6nenFnDhKOhzAVnk5gpZdfnz1iiBv+5sg==", + "dependencies": { + "iconv-lite": "^0.6.3", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.72.1" + } + }, + "node_modules/source-map-loader/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/streamx": { + "version": "2.23.0", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.23.0.tgz", + "integrity": "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==", + "dev": true, + "dependencies": { + "events-universal": "^1.0.0", + "fast-fifo": "^1.3.2", + "text-decoder": "^1.1.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-dirs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-dirs/-/strip-dirs-3.0.0.tgz", + "integrity": "sha512-I0sdgcFTfKQlUPZyAqPJmSG3HLO9rWDFnxonnIbskYNM3DwFOeTNB5KzVq3dA1GdRAc/25b5Y7UO2TQfKWw4aQ==", + "dev": true, + "dependencies": { + "inspect-with-kind": "^1.0.5", + "is-plain-obj": "^1.1.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strtok3": { + "version": "10.3.4", + "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.4.tgz", + "integrity": "sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg==", + "dependencies": { + "@tokenizer/token": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/swc-loader": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/swc-loader/-/swc-loader-0.2.6.tgz", + "integrity": "sha512-9Zi9UP2YmDpgmQVbyOPJClY0dwf58JDyDMQ7uRc4krmc72twNI2fvlBWHLqVekBpPc7h5NJkGVT1zNDxFrqhvg==", + "dependencies": { + "@swc/counter": "^0.1.3" + }, + "peerDependencies": { + "@swc/core": "^1.2.147", + "webpack": ">=2" + } + }, + "node_modules/symbol-observable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-4.0.0.tgz", + "integrity": "sha512-b19dMThMV4HVFynSAM1++gBHAbk2Tc/osgLIBZMKsyqh34jb2e8Os7T6ZW/Bt3pJFdBTd2JwAnAAEQV7rSNvcQ==", + "dev": true, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tar-stream": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", + "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", + "dev": true, + "dependencies": { + "b4a": "^1.6.4", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" + } + }, + "node_modules/tdigest": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/tdigest/-/tdigest-0.1.2.tgz", + "integrity": "sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==", + "dependencies": { + "bintrees": "1.0.2" + } + }, + "node_modules/terser": { + "version": "5.44.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.44.1.tgz", + "integrity": "sha512-t/R3R/n0MSwnnazuPpPNVO60LX0SKL45pyl9YlvxIdkH0Of7D5qM2EVe+yASRIlY5pZ73nclYJfNANGWPwFDZw==", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.15.0", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.16", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.16.tgz", + "integrity": "sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q==", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "jest-worker": "^27.4.5", + "schema-utils": "^4.3.0", + "serialize-javascript": "^6.0.2", + "terser": "^5.31.1" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/text-decoder": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.3.tgz", + "integrity": "sha512-3/o9z3X0X0fTupwsYvR03pJ/DjWuqqrfwBgTQzdWDiQSm9KitAyz/9WqsT2JQW7KV2m+bC2ol/zqpW37NHxLaA==", + "dev": true, + "dependencies": { + "b4a": "^1.6.4" + } + }, + "node_modules/thingies": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/thingies/-/thingies-2.5.0.tgz", + "integrity": "sha512-s+2Bwztg6PhWUD7XMfeYm5qliDdSiZm7M7n8KjTkIsm3l/2lgVRc2/Gx/v+ZX8lT4FMA+i8aQvhcWylldc+ZNw==", + "engines": { + "node": ">=10.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "^2" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "dev": true + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "devOptional": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/token-types": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/token-types/-/token-types-6.1.2.tgz", + "integrity": "sha512-dRXchy+C0IgK8WPC6xvCHFRIWYUbqqdEIKPaKo/AcTUNzwLTK6AH7RjdLWsEZcAN/TBdtfUw3PYEgPr5VPr6ww==", + "dependencies": { + "@borewit/text-codec": "^0.2.1", + "@tokenizer/token": "^0.3.0", + "ieee754": "^1.2.1" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/tree-dump": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/tree-dump/-/tree-dump-1.1.0.tgz", + "integrity": "sha512-rMuvhU4MCDbcbnleZTFezWsaZXRFemSqAM+7jPnzUl1fo9w3YEKOxAeui0fz3OI4EU4hf23iyA7uQRVko+UaBA==", + "engines": { + "node": ">=10.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/streamich" + }, + "peerDependencies": { + "tslib": "2" + } + }, + "node_modules/ts-jest": { + "version": "29.4.6", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.6.tgz", + "integrity": "sha512-fSpWtOO/1AjSNQguk43hb/JCo16oJDnMJf3CdEGNkqsEX3t0KX96xvyX1D7PfLCpVoKu4MfVrqUkFyblYoY4lA==", + "dev": true, + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.3", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/ts-node/node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, + "node_modules/tsconfig-paths": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-4.2.0.tgz", + "integrity": "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==", + "dev": true, + "dependencies": { + "json5": "^2.2.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tsconfig-paths-webpack-plugin": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths-webpack-plugin/-/tsconfig-paths-webpack-plugin-4.2.0.tgz", + "integrity": "sha512-zbem3rfRS8BgeNK50Zz5SIQgXzLafiHjOwUAvk/38/o1jHn/V5QAgVUcz884or7WYcPaH3N2CIfUc2u0ul7UcA==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "enhanced-resolve": "^5.7.0", + "tapable": "^2.2.1", + "tsconfig-paths": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/tsconfig-paths/node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", + "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", + "dependencies": { + "content-type": "^1.0.5", + "media-typer": "^1.1.0", + "mime-types": "^3.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==" + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "devOptional": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/uid": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/uid/-/uid-2.0.2.tgz", + "integrity": "sha512-u3xV3X7uzvi5b1MncmZo3i2Aw222Zk1keqLA1YkHldREkAhAqi65wuPfe7lHx8H/Wzy+8CE7S7uS3jekIM5s8g==", + "dependencies": { + "@lukeed/csprng": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uint8array-extras": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.5.0.tgz", + "integrity": "sha512-rvKSBiC5zqCCiDZ9kAOszZcDvdAHwwIKJG33Ykj43OKcWsnmcBRL09YTU4nOeHZ8Y2a7l1MgTd08SBe9A8Qj6A==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unbzip2-stream": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", + "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", + "dev": true, + "dependencies": { + "buffer": "^5.2.1", + "through": "^2.3.8" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==" + }, + "node_modules/unionfs": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/unionfs/-/unionfs-4.6.0.tgz", + "integrity": "sha512-fJAy3gTHjFi5S3TP5EGdjs/OUMFFvI/ady3T8qVuZfkv8Qi8prV/Q8BuFEgODJslhZTT2z2qdD2lGdee9qjEnA==", + "dependencies": { + "fs-monkey": "^1.0.0" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/validator": { + "version": "13.15.26", + "resolved": "https://registry.npmjs.org/validator/-/validator-13.15.26.tgz", + "integrity": "sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/watchpack": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.5.0.tgz", + "integrity": "sha512-e6vZvY6xboSwLz2GD36c16+O/2Z6fKvIf4pOXptw2rY9MVwE/TXc6RGqxD3I3x0a28lwBY7DE+76uTPSsBrrCA==", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/webpack": { + "version": "5.104.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.104.1.tgz", + "integrity": "sha512-Qphch25abbMNtekmEGJmeRUhLDbe+QfiWTiqpKYkpCOWY64v9eyl+KRRLmqOFA2AvKPpc9DC6+u2n76tQLBoaA==", + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.8", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.15.0", + "acorn-import-phases": "^1.0.3", + "browserslist": "^4.28.1", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.4", + "es-module-lexer": "^2.0.0", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.3.1", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^4.3.3", + "tapable": "^2.3.0", + "terser-webpack-plugin": "^5.3.16", + "watchpack": "^2.4.4", + "webpack-sources": "^3.3.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-cli": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/webpack-cli/-/webpack-cli-5.1.4.tgz", + "integrity": "sha512-pIDJHIEI9LR0yxHXQ+Qh95k2EvXpWzZ5l+d+jIo+RdSm9MiHfzazIxwwni/p7+x4eJZuvG1AJwgC4TNQ7NRgsg==", + "dev": true, + "dependencies": { + "@discoveryjs/json-ext": "^0.5.0", + "@webpack-cli/configtest": "^2.1.1", + "@webpack-cli/info": "^2.0.2", + "@webpack-cli/serve": "^2.0.5", + "colorette": "^2.0.14", + "commander": "^10.0.1", + "cross-spawn": "^7.0.3", + "envinfo": "^7.7.3", + "fastest-levenshtein": "^1.0.12", + "import-local": "^3.0.2", + "interpret": "^3.1.1", + "rechoir": "^0.8.0", + "webpack-merge": "^5.7.3" + }, + "bin": { + "webpack-cli": "bin/cli.js" + }, + "engines": { + "node": ">=14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "5.x.x" + }, + "peerDependenciesMeta": { + "@webpack-cli/generators": { + "optional": true + }, + "webpack-bundle-analyzer": { + "optional": true + }, + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/webpack-cli/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "dev": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dev": true, + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-node-externals": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/webpack-node-externals/-/webpack-node-externals-3.0.0.tgz", + "integrity": "sha512-LnL6Z3GGDPht/AigwRh2dvL9PQPFQ8skEpVrWZXLWBYmqcaojHNN0onvHzie6rq7EWKrrBfPYqNEzTJgiwEQDQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/webpack-sources": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.3.3.tgz", + "integrity": "sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg==", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/webpack/node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/webpack/node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/which": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/which/-/which-4.0.0.tgz", + "integrity": "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^16.13.0 || >=18.0.0" + } + }, + "node_modules/widest-line": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", + "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", + "dependencies": { + "string-width": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==", + "dev": true + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/write-file-atomic/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yauzl": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-3.2.0.tgz", + "integrity": "sha512-Ow9nuGZE+qp1u4JIPvg+uCiUr7xGQWdff7JQSk5VGYTAZMDe2q8lxJ10ygv10qmSj031Ty/6FNJpLO4o1Sgc+w==", + "dev": true, + "dependencies": { + "buffer-crc32": "~0.2.3", + "pend": "~1.2.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors-cjs": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.3.tgz", + "integrity": "sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/packages/bytebot-temporal-worker/package.json b/packages/bytebot-temporal-worker/package.json new file mode 100644 index 000000000..8caaa2e5d --- /dev/null +++ b/packages/bytebot-temporal-worker/package.json @@ -0,0 +1,95 @@ +{ + "name": "bytebot-temporal-worker", + "version": "1.4.0", + "description": "ByteBot Temporal Worker - Durable workflow execution for goal runs", + "author": "ByteBot Team", + "private": true, + "license": "UNLICENSED", + "main": "dist/main.js", + "scripts": { + "prebuild": "rimraf dist", + "build": "nest build", + "build:worker": "npm run build && webpack --config webpack.config.js", + "format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"", + "start": "nest start", + "start:dev": "nest start --watch", + "start:debug": "nest start --debug --watch", + "start:prod": "node dist/main", + "start:worker": "node dist/worker.js", + "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix", + "test": "jest", + "test:watch": "jest --watch", + "test:cov": "jest --coverage", + "test:e2e": "jest --config ./test/jest-e2e.json", + "test:workflow": "jest --testPathPattern=workflow", + "prisma:generate": "prisma generate", + "prisma:migrate": "prisma migrate deploy" + }, + "dependencies": { + "@nestjs/common": "^11.0.1", + "@nestjs/config": "^4.0.2", + "@nestjs/core": "^11.0.1", + "@nestjs/event-emitter": "^3.0.0", + "@nestjs/platform-express": "^11.1.5", + "@nestjs/terminus": "^11.0.0", + "@prisma/client": "^6.16.1", + "@temporalio/activity": "^1.11.0", + "@temporalio/client": "^1.11.0", + "@temporalio/common": "^1.11.0", + "@temporalio/worker": "^1.11.0", + "@temporalio/workflow": "^1.11.0", + "@willsoto/nestjs-prometheus": "^6.0.1", + "axios": "^1.7.9", + "class-transformer": "^0.5.1", + "class-validator": "^0.14.2", + "kafkajs": "^2.2.4", + "opossum": "^8.1.4", + "prom-client": "^15.1.3", + "reflect-metadata": "^0.2.2", + "rxjs": "^7.8.1", + "zod": "^3.24.1" + }, + "devDependencies": { + "@nestjs/cli": "^11.0.0", + "@nestjs/schematics": "^11.0.0", + "@nestjs/testing": "^11.0.1", + "@swc/cli": "^0.7.9", + "@swc/core": "^1.15.8", + "@temporalio/testing": "^1.11.0", + "@types/express": "^5.0.0", + "@types/jest": "^29.5.14", + "@types/node": "^22.10.7", + "@types/opossum": "^8.1.0", + "eslint": "^9.18.0", + "jest": "^29.7.0", + "prettier": "^3.4.2", + "prisma": "^6.16.1", + "rimraf": "^6.0.1", + "source-map-support": "^0.5.21", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.3", + "webpack": "^5.89.0", + "webpack-cli": "^5.1.4" + }, + "jest": { + "moduleFileExtensions": [ + "js", + "json", + "ts" + ], + "rootDir": "src", + "testRegex": ".*\\.spec\\.ts$", + "transform": { + "^.+\\.(t|j)s$": "ts-jest" + }, + "collectCoverageFrom": [ + "**/*.(t|j)s" + ], + "coverageDirectory": "../coverage", + "testEnvironment": "node" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/bytebot-temporal-worker/src/activities/execution.activities.spec.ts b/packages/bytebot-temporal-worker/src/activities/execution.activities.spec.ts new file mode 100644 index 000000000..f2c79202f --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/execution.activities.spec.ts @@ -0,0 +1,389 @@ +/** + * Execution Activities Unit Tests + * + * Tests step execution, verification, and failure classification. + */ + +import axios, { AxiosError } from 'axios'; +import { MockActivityEnvironment } from '@temporalio/testing'; +import { executeStep, verifyStep, classifyFailure } from './execution.activities'; +import type { ExecuteStepInput, VerifyStepInput, ExecuteStepOutput, VerifyStepOutput } from '../types/goal-run.types'; + +jest.mock('axios'); +const mockedAxios = axios as jest.Mocked; + +// Type for classifyFailure result +type FailureClassResult = { + category: 'TRANSIENT' | 'SEMANTIC' | 'PERMANENT'; + retryable: boolean; + suggestedAction: 'RETRY' | 'REPLAN' | 'FAIL'; +}; + +describe('Execution Activities', () => { + let mockActivityEnv: MockActivityEnvironment; + + beforeEach(() => { + mockActivityEnv = new MockActivityEnvironment(); + jest.clearAllMocks(); + jest.useFakeTimers(); + }); + + afterEach(() => { + jest.useRealTimers(); + }); + + describe('executeStep', () => { + const defaultInput: ExecuteStepInput = { + goalRunId: 'test-goal-123', + tenantId: 'test-tenant', + step: { + stepNumber: 1, + description: 'Create user API endpoint', + expectedOutcome: 'API endpoint responds with 200', + isHighRisk: false, + dependencies: [], + }, + workspaceId: 'test-workspace', + context: { + previousStepOutcome: undefined, + accumulatedKnowledge: [], + }, + }; + + it('should successfully execute a step', async () => { + jest.useRealTimers(); + + // Mock task dispatch + mockedAxios.post.mockResolvedValueOnce({ + data: { success: true, taskId: 'task-123', status: 'DISPATCHED' }, + }); + + // Mock task completion polling + mockedAxios.get.mockResolvedValueOnce({ + data: { + status: 'COMPLETED', + output: { + summary: 'API endpoint created successfully', + result: 'Endpoint /api/users responds with 200', + artifacts: ['/src/routes/users.ts'], + }, + }, + }); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.success).toBe(true); + expect(result.outcome).toContain('API endpoint created'); + expect(result.artifacts).toContain('/src/routes/users.ts'); + }); + + it('should extract knowledge from execution output', async () => { + jest.useRealTimers(); + + mockedAxios.post.mockResolvedValueOnce({ + data: { success: true, taskId: 'task-123', status: 'DISPATCHED' }, + }); + + mockedAxios.get.mockResolvedValueOnce({ + data: { + status: 'COMPLETED', + output: { + summary: 'Found that the server runs on port 3000. Discovered database uses PostgreSQL.', + facts: ['Server port is 3000', 'Database is PostgreSQL'], + discoveries: ['API uses REST pattern'], + }, + }, + }); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.knowledgeGained).toContain('Server port is 3000'); + expect(result.knowledgeGained).toContain('Database is PostgreSQL'); + expect(result.knowledgeGained).toContain('API uses REST pattern'); + }); + + it('should handle WAITING_USER_INPUT status', async () => { + jest.useRealTimers(); + + mockedAxios.post.mockResolvedValueOnce({ + data: { success: true, taskId: 'task-123', status: 'DISPATCHED' }, + }); + + mockedAxios.get.mockResolvedValueOnce({ + data: { + status: 'WAITING_USER_INPUT', + output: { summary: 'Requires clarification to proceed' }, + }, + }); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.success).toBe(false); + expect(result.waitingForUserInput).toBe(true); + }); + + it('should handle WAITING_PROVIDER status', async () => { + jest.useRealTimers(); + + mockedAxios.post.mockResolvedValueOnce({ + data: { success: true, taskId: 'task-123', status: 'DISPATCHED' }, + }); + + mockedAxios.get.mockResolvedValueOnce({ + data: { + status: 'WAITING_PROVIDER', + output: { summary: 'Waiting for provider/model capacity' }, + }, + }); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.success).toBe(false); + expect(result.waitingForProvider).toBe(true); + expect(result.waitingForUserInput).toBe(false); + }); + + it('should handle failed task execution', async () => { + jest.useRealTimers(); + + mockedAxios.post.mockResolvedValueOnce({ + data: { success: true, taskId: 'task-123', status: 'DISPATCHED' }, + }); + + mockedAxios.get.mockResolvedValueOnce({ + data: { + status: 'FAILED', + error: 'Permission denied: Cannot write to /etc/config', + }, + }); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.success).toBe(false); + expect(result.error).toContain('Permission denied'); + }); + + it('should handle connection refused errors', async () => { + jest.useRealTimers(); + + const error = new Error('ECONNREFUSED'); + (error as any).code = 'ECONNREFUSED'; + (mockedAxios.isAxiosError as unknown) = jest.fn().mockReturnValue(true); + mockedAxios.post.mockRejectedValueOnce(error); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.success).toBe(false); + expect(result.error).toContain('service unavailable'); + }); + + it('should handle rate limiting (429)', async () => { + jest.useRealTimers(); + + const error = { + response: { status: 429 }, + message: 'Too Many Requests', + }; + (mockedAxios.isAxiosError as unknown) = jest.fn().mockReturnValue(true); + mockedAxios.post.mockRejectedValueOnce(error); + + const result = await mockActivityEnv.run(executeStep, defaultInput) as ExecuteStepOutput; + + expect(result.success).toBe(false); + expect(result.error).toContain('Rate limited'); + }); + + it('should include idempotency key in task dispatch', async () => { + jest.useRealTimers(); + + mockedAxios.post.mockResolvedValueOnce({ + data: { success: true, taskId: 'task-123', status: 'DISPATCHED' }, + }); + + mockedAxios.get.mockResolvedValueOnce({ + data: { status: 'COMPLETED', output: { summary: 'Done' } }, + }); + + await mockActivityEnv.run(executeStep, defaultInput); + + expect(mockedAxios.post).toHaveBeenCalledWith( + expect.any(String), + expect.objectContaining({ + idempotencyKey: `${defaultInput.goalRunId}-step-${defaultInput.step.stepNumber}`, + }), + expect.any(Object) + ); + }); + }); + + describe('verifyStep', () => { + const defaultInput: VerifyStepInput = { + goalRunId: 'test-goal-123', + tenantId: 'test-tenant', + step: { + stepNumber: 1, + description: 'Create user API', + expectedOutcome: 'API responds with 200', + isHighRisk: false, + dependencies: [], + }, + executionResult: { + success: true, + outcome: 'API endpoint created', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + }, + }; + + it('should verify successful step with expected outcome', async () => { + mockedAxios.post.mockResolvedValueOnce({ + data: { + verified: true, + verificationDetails: 'API endpoint matches expected outcome', + suggestReplan: false, + }, + }); + + const result = await mockActivityEnv.run(verifyStep, defaultInput) as VerifyStepOutput; + + expect(result.verified).toBe(true); + expect(result.suggestReplan).toBe(false); + }); + + it('should suggest replan when verification fails', async () => { + mockedAxios.post.mockResolvedValueOnce({ + data: { + verified: false, + verificationDetails: 'API returns 404 instead of 200', + suggestReplan: true, + replanReason: 'Route configuration incorrect', + }, + }); + + const result = await mockActivityEnv.run(verifyStep, defaultInput) as VerifyStepOutput; + + expect(result.verified).toBe(false); + expect(result.suggestReplan).toBe(true); + expect(result.replanReason).toBe('Route configuration incorrect'); + }); + + it('should auto-verify when no expected outcome specified', async () => { + const inputWithoutExpected: VerifyStepInput = { + ...defaultInput, + step: { ...defaultInput.step, expectedOutcome: undefined }, + }; + + const result = await mockActivityEnv.run(verifyStep, inputWithoutExpected) as VerifyStepOutput; + + expect(result.verified).toBe(true); + expect(result.verificationDetails).toContain('no expected outcome specified'); + expect(mockedAxios.post).not.toHaveBeenCalled(); + }); + + it('should fallback to simple verification when orchestrator fails', async () => { + mockedAxios.post.mockRejectedValueOnce(new Error('Service unavailable')); + + const result = await mockActivityEnv.run(verifyStep, defaultInput) as VerifyStepOutput; + + expect(result.verified).toBe(true); // Fallback based on success flag + expect(result.verificationDetails).toContain('Fallback verification'); + }); + + it('should fail verification for failed execution result', async () => { + const failedInput: VerifyStepInput = { + ...defaultInput, + executionResult: { + ...defaultInput.executionResult, + success: false, + error: 'Execution failed', + }, + }; + + mockedAxios.post.mockRejectedValueOnce(new Error('Service unavailable')); + + const result = await mockActivityEnv.run(verifyStep, failedInput) as VerifyStepOutput; + + expect(result.verified).toBe(false); + expect(result.suggestReplan).toBe(true); + }); + }); + + describe('classifyFailure', () => { + it('should classify timeout errors as TRANSIENT', async () => { + const error = new Error('Request timeout after 30000ms'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('TRANSIENT'); + expect(result.retryable).toBe(true); + expect(result.suggestedAction).toBe('RETRY'); + }); + + it('should classify connection errors as TRANSIENT', async () => { + const error = new Error('ECONNREFUSED: Connection refused to localhost:3000'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('TRANSIENT'); + expect(result.retryable).toBe(true); + }); + + it('should classify rate limit errors as TRANSIENT', async () => { + const error = new Error('Rate limit exceeded, retry after 60 seconds'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('TRANSIENT'); + }); + + it('should classify validation errors as SEMANTIC', async () => { + const error = new Error('Validation error: Email format invalid'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('SEMANTIC'); + expect(result.retryable).toBe(false); + expect(result.suggestedAction).toBe('REPLAN'); + }); + + it('should classify step failures as SEMANTIC', async () => { + const error = new Error('Step failed: Could not find expected file'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('SEMANTIC'); + expect(result.suggestedAction).toBe('REPLAN'); + }); + + it('should classify permission errors as PERMANENT', async () => { + const error = new Error('Permission denied: Cannot access resource'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('PERMANENT'); + expect(result.retryable).toBe(false); + expect(result.suggestedAction).toBe('FAIL'); + }); + + it('should classify resource deleted errors as PERMANENT', async () => { + const error = new Error('Resource not found: Workspace deleted'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('PERMANENT'); + expect(result.suggestedAction).toBe('FAIL'); + }); + + it('should default to SEMANTIC for unknown errors', async () => { + const error = new Error('Some unexpected error occurred'); + + const result = await mockActivityEnv.run(classifyFailure, error) as FailureClassResult; + + expect(result.category).toBe('SEMANTIC'); + expect(result.suggestedAction).toBe('REPLAN'); + }); + }); +}); diff --git a/packages/bytebot-temporal-worker/src/activities/execution.activities.ts b/packages/bytebot-temporal-worker/src/activities/execution.activities.ts new file mode 100644 index 000000000..333d2c78e --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/execution.activities.ts @@ -0,0 +1,541 @@ +/** + * Execution Activities - Step execution and verification + * + * These activities handle the execution of individual steps by dispatching + * tasks to ByteBot agents and verifying the results. + * + * Phase 11.3: Updated to route step dispatch through orchestrator's internal API. + * The orchestrator uses its TaskDispatchService to dispatch tasks to bytebot-agent:9991. + * + * Architecture: + * Temporal Worker → Orchestrator (/api/v1/internal/dispatch-step) + * → TaskDispatchService → bytebot-agent:9991/tasks + * → Agent polls and claims tasks + * → Orchestrator polls for completion + * → Results returned to Temporal Worker + */ + +import { Context } from '@temporalio/activity'; +import axios from 'axios'; + +import type { + ExecuteStepInput, + ExecuteStepOutput, + VerifyStepInput, + VerifyStepOutput, +} from '../types/goal-run.types'; + +// ============================================================================ +// Configuration +// ============================================================================ + +// Phase 11.3: Orchestrator is the primary endpoint for step dispatch +// The orchestrator's internal API routes requests through TaskDispatchService +const ORCHESTRATOR_URL = process.env.ORCHESTRATOR_URL ?? 'http://bytebot-workflow-orchestrator:8080'; +const EXECUTION_TIMEOUT_MS = parseInt(process.env.EXECUTION_TIMEOUT_MS ?? '300000', 10); // 5 min +const HEARTBEAT_INTERVAL_MS = parseInt(process.env.HEARTBEAT_INTERVAL_MS ?? '10000', 10); // 10s +const DISPATCH_TIMEOUT_MS = parseInt(process.env.DISPATCH_TIMEOUT_MS ?? '30000', 10); // 30s for dispatch +const STATUS_POLL_INTERVAL_MS = parseInt(process.env.STATUS_POLL_INTERVAL_MS ?? '5000', 10); // 5s poll interval + +// ============================================================================ +// Activity Interface +// ============================================================================ + +export interface ExecutionActivities { + executeStep(input: ExecuteStepInput): Promise; + verifyStep(input: VerifyStepInput): Promise; + classifyFailure(error: Error): Promise<{ + category: 'TRANSIENT' | 'SEMANTIC' | 'PERMANENT'; + retryable: boolean; + suggestedAction: 'RETRY' | 'REPLAN' | 'FAIL'; + }>; +} + +// ============================================================================ +// Activity Implementations +// ============================================================================ + +/** + * Executes a single step by dispatching a task to a ByteBot agent. + * + * Key features: + * - Periodic heartbeats to prevent timeout + * - Captures actual outcome for context (Manus pattern) + * - Extracts knowledge gained during execution + * + * Phase 11.3: Routes dispatch through orchestrator's internal API. + * The orchestrator uses TaskDispatchService to: + * 1. Create task in bytebot-agent system + * 2. Poll for task completion + * 3. Update checklist items and emit events + */ +export async function executeStep(input: ExecuteStepInput): Promise { + const context = Context.current(); + const abortController = new AbortController(); + + // Set up periodic heartbeat + const heartbeatInterval = setInterval(() => { + context.heartbeat({ + step: input.step.stepNumber, + status: 'executing', + timestamp: new Date().toISOString(), + }); + }, HEARTBEAT_INTERVAL_MS); + + try { + context.heartbeat({ step: input.step.stepNumber, status: 'starting' }); + + // Phase 11.3: Dispatch step via orchestrator's internal API + // This routes through TaskDispatchService which handles: + // - Idempotent task creation in bytebot-agent + // - Task routing to bytebot-agent:9991 + // - Activity event emission for UI updates + const dispatchResponse = await axios.post<{ + success: boolean; + taskId?: string; + status?: 'PENDING' | 'DISPATCHED'; + error?: string; + }>( + `${ORCHESTRATOR_URL}/api/v1/internal/dispatch-step`, + { + goalRunId: input.goalRunId, + tenantId: input.tenantId, + step: { + stepNumber: input.step.stepNumber, + description: input.step.description, + expectedOutcome: input.step.expectedOutcome, + isHighRisk: input.step.isHighRisk, + dependencies: input.step.dependencies, + // suggestedTools and requiresDesktop are determined by the orchestrator + // based on step analysis during dispatch + }, + workspaceId: input.workspaceId, + context: { + previousStepOutcome: input.context?.previousStepOutcome, + accumulatedKnowledge: input.context?.accumulatedKnowledge, + }, + idempotencyKey: `${input.goalRunId}-step-${input.step.stepNumber}`, + }, + { + timeout: DISPATCH_TIMEOUT_MS, + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Request': 'true', + }, + signal: abortController.signal, + } + ); + + if (!dispatchResponse.data.success || !dispatchResponse.data.taskId) { + clearInterval(heartbeatInterval); + return { + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + error: dispatchResponse.data.error || 'Failed to dispatch step', + }; + } + + const taskId = dispatchResponse.data.taskId; + context.heartbeat({ step: input.step.stepNumber, status: 'dispatched', taskId }); + + // Poll orchestrator's internal API for task completion + // The orchestrator's TaskDispatchService polls the agent and updates status + const result = await pollForTaskCompletionViaOrchestrator( + taskId, + input.tenantId, + EXECUTION_TIMEOUT_MS, + (status) => { + context.heartbeat({ + step: input.step.stepNumber, + status: 'polling', + taskStatus: status, + }); + } + ); + + clearInterval(heartbeatInterval); + + if (result.status === 'COMPLETED') { + // Extract knowledge from the execution result + const knowledgeGained = extractKnowledge(result.output); + + return { + success: true, + outcome: result.output?.summary ?? result.output?.result ?? 'Step completed successfully', + artifacts: result.output?.artifacts ?? [], + knowledgeGained, + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + }; + } else if (result.status === 'WAITING_USER_INPUT') { + return { + success: false, + outcome: result.output?.summary ?? 'Waiting for user input', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: true, + waitingForProvider: false, + }; + } else if (result.status === 'WAITING_PROVIDER') { + return { + success: false, + outcome: result.output?.summary ?? 'Waiting for provider/model capacity', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: true, + }; + } else { + return { + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + error: result.error ?? 'Step execution failed', + }; + } + } catch (error) { + clearInterval(heartbeatInterval); + + const errorMessage = error instanceof Error ? error.message : String(error); + + // Check for specific error types + if (axios.isAxiosError(error)) { + if (error.code === 'ECONNREFUSED') { + return { + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + error: `Orchestrator service unavailable: ${errorMessage}`, + }; + } + if (error.response?.status === 429) { + return { + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + error: 'Rate limited, please retry', + }; + } + if (error.response?.status === 403) { + return { + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + error: 'Internal authentication failed', + }; + } + } + + return { + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + error: errorMessage, + }; + } +} + +/** + * Verifies that a step was executed correctly. + * Uses LLM to compare expected vs actual outcome. + */ +export async function verifyStep(input: VerifyStepInput): Promise { + const context = Context.current(); + context.heartbeat('Starting step verification'); + + // If no expected outcome, auto-verify based on success + if (!input.step.expectedOutcome) { + return { + verified: input.executionResult.success, + verificationDetails: input.executionResult.success + ? 'Step completed successfully (no expected outcome specified)' + : 'Step failed', + suggestReplan: !input.executionResult.success, + replanReason: input.executionResult.error, + }; + } + + try { + // Call orchestrator's verification endpoint + const response = await axios.post<{ + verified: boolean; + verificationDetails: string; + suggestReplan: boolean; + replanReason?: string; + }>( + `${ORCHESTRATOR_URL}/api/v1/internal/verify`, + { + goalRunId: input.goalRunId, + tenantId: input.tenantId, + step: input.step, + expectedOutcome: input.step.expectedOutcome, + actualOutcome: input.executionResult.outcome, + success: input.executionResult.success, + error: input.executionResult.error, + }, + { + timeout: 60000, + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Request': 'true', + }, + } + ); + + context.heartbeat('Verification complete'); + + return { + verified: response.data.verified, + verificationDetails: response.data.verificationDetails, + suggestReplan: response.data.suggestReplan, + replanReason: response.data.replanReason, + }; + } catch (error) { + // Fallback: simple verification based on success flag + return { + verified: input.executionResult.success, + verificationDetails: `Fallback verification: ${input.executionResult.success ? 'success' : 'failed'}`, + suggestReplan: !input.executionResult.success, + replanReason: input.executionResult.error, + }; + } +} + +/** + * Classifies a failure according to Google SRE patterns. + * Used to determine retry strategy. + */ +export async function classifyFailure(error: Error): Promise<{ + category: 'TRANSIENT' | 'SEMANTIC' | 'PERMANENT'; + retryable: boolean; + suggestedAction: 'RETRY' | 'REPLAN' | 'FAIL'; +}> { + const message = error.message.toLowerCase(); + + // Transient failures (retry with backoff) + if ( + message.includes('timeout') || + message.includes('econnrefused') || + message.includes('econnreset') || + message.includes('rate limit') || + message.includes('503') || + message.includes('502') || + message.includes('unavailable') + ) { + return { + category: 'TRANSIENT', + retryable: true, + suggestedAction: 'RETRY', + }; + } + + // Semantic failures (need replanning) + if ( + message.includes('step failed') || + message.includes('validation') || + message.includes('assertion') || + message.includes('unexpected') || + message.includes('wrong approach') + ) { + return { + category: 'SEMANTIC', + retryable: false, + suggestedAction: 'REPLAN', + }; + } + + // Permanent failures (cannot recover) + if ( + message.includes('permission denied') || + message.includes('not found') || + message.includes('deleted') || + message.includes('cancelled') || + message.includes('budget exhausted') + ) { + return { + category: 'PERMANENT', + retryable: false, + suggestedAction: 'FAIL', + }; + } + + // Default to semantic (trigger replan) + return { + category: 'SEMANTIC', + retryable: false, + suggestedAction: 'REPLAN', + }; +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +interface TaskResult { + status: 'COMPLETED' | 'FAILED' | 'TIMEOUT' | 'WAITING_USER_INPUT' | 'WAITING_PROVIDER'; + output?: { + summary?: string; + result?: string; + artifacts?: string[]; + }; + error?: string; +} + +/** + * Phase 11.3: Polls for task completion via orchestrator's internal API. + * + * The orchestrator's TaskDispatchService maintains task status and polls + * the bytebot-agent for updates. We query the orchestrator's internal API + * which provides a unified view of task status. + */ +async function pollForTaskCompletionViaOrchestrator( + taskId: string, + tenantId: string, + timeoutMs: number, + onStatusChange: (status: string) => void +): Promise { + const startTime = Date.now(); + let pollInterval = STATUS_POLL_INTERVAL_MS; // Start at configured interval + const maxPollInterval = 10000; // Max 10s + let consecutiveErrors = 0; + const maxConsecutiveErrors = 10; + + while (Date.now() - startTime < timeoutMs) { + try { + const response = await axios.get<{ + status: 'PENDING' | 'RUNNING' | 'COMPLETED' | 'FAILED' | 'WAITING_USER_INPUT' | 'WAITING_PROVIDER'; + output?: { + summary?: string; + result?: string; + artifacts?: string[]; + }; + error?: string; + }>(`${ORCHESTRATOR_URL}/api/v1/internal/task-status/${taskId}`, { + headers: { + 'X-Tenant-Id': tenantId, + 'X-Internal-Request': 'true', + }, + timeout: 10000, + }); + + // Reset error counter on success + consecutiveErrors = 0; + onStatusChange(response.data.status); + + if (response.data.status === 'COMPLETED') { + return { + status: 'COMPLETED', + output: response.data.output, + }; + } + + if (response.data.status === 'FAILED') { + return { + status: 'FAILED', + error: response.data.error ?? 'Task failed', + }; + } + + if (response.data.status === 'WAITING_USER_INPUT') { + return { + status: 'WAITING_USER_INPUT', + output: response.data.output, + }; + } + + if (response.data.status === 'WAITING_PROVIDER') { + return { + status: 'WAITING_PROVIDER', + output: response.data.output, + }; + } + + // Still running, wait and poll again + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + // Gradually increase poll interval for long-running tasks + pollInterval = Math.min(pollInterval * 1.2, maxPollInterval); + } catch (error) { + consecutiveErrors++; + + // If too many consecutive errors, return failure + if (consecutiveErrors >= maxConsecutiveErrors) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { + status: 'FAILED', + error: `Polling failed after ${maxConsecutiveErrors} consecutive errors: ${errorMessage}`, + }; + } + + // Network error during polling, wait with exponential backoff and retry + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + pollInterval = Math.min(pollInterval * 2, maxPollInterval); + } + } + + return { + status: 'TIMEOUT', + error: `Task did not complete within ${timeoutMs}ms`, + }; +} + +/** + * Extracts knowledge/facts from execution output. + * Used to build context for subsequent steps (Anthropic pattern). + */ +function extractKnowledge(output?: { + summary?: string; + result?: string; + facts?: string[]; + discoveries?: string[]; +}): string[] { + const knowledge: string[] = []; + + if (output?.facts) { + knowledge.push(...output.facts); + } + + if (output?.discoveries) { + knowledge.push(...output.discoveries); + } + + // Extract key information from summary + if (output?.summary) { + // Simple heuristic: look for "found", "discovered", "confirmed", "learned" + const keyPhrases = output.summary.match( + /(found|discovered|confirmed|learned|identified|determined)[^.]+\./gi + ); + if (keyPhrases) { + knowledge.push(...keyPhrases.map((p) => p.trim())); + } + } + + return knowledge; +} diff --git a/packages/bytebot-temporal-worker/src/activities/index.ts b/packages/bytebot-temporal-worker/src/activities/index.ts new file mode 100644 index 000000000..63371547b --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/index.ts @@ -0,0 +1,9 @@ +/** + * Activity Exports + * + * Export all activities for the Temporal worker. + */ + +export * from './planning.activities'; +export * from './execution.activities'; +export * from './kafka.activities'; diff --git a/packages/bytebot-temporal-worker/src/activities/kafka.activities.spec.ts b/packages/bytebot-temporal-worker/src/activities/kafka.activities.spec.ts new file mode 100644 index 000000000..759f96a49 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/kafka.activities.spec.ts @@ -0,0 +1,438 @@ +/** + * Kafka Activities Unit Tests + * + * Tests event emission to Kafka topics with mocked KafkaJS client. + */ + +import { MockActivityEnvironment } from '@temporalio/testing'; +import { emitGoalEvent, emitStepEvent, emitAuditEvent, emitEventBatch } from './kafka.activities'; + +// Mock KafkaJS +const mockSend = jest.fn().mockResolvedValue(undefined); +const mockConnect = jest.fn().mockResolvedValue(undefined); +const mockDisconnect = jest.fn().mockResolvedValue(undefined); + +jest.mock('kafkajs', () => ({ + Kafka: jest.fn().mockImplementation(() => ({ + producer: jest.fn().mockReturnValue({ + connect: mockConnect, + disconnect: mockDisconnect, + send: mockSend, + }), + })), + CompressionTypes: { GZIP: 1 }, + logLevel: { WARN: 4 }, +})); + +describe('Kafka Activities', () => { + let mockActivityEnv: MockActivityEnvironment; + + beforeEach(() => { + mockActivityEnv = new MockActivityEnvironment(); + jest.clearAllMocks(); + }); + + describe('emitGoalEvent', () => { + it('should emit GOAL_STARTED event to correct topic', async () => { + const input = { + eventType: 'GOAL_STARTED' as const, + goalRunId: 'goal-123', + tenantId: 'tenant-abc', + payload: { goalDescription: 'Test goal' }, + }; + + await mockActivityEnv.run(emitGoalEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.goal.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + key: 'goal-123', + value: expect.stringContaining('GOAL_STARTED'), + headers: expect.objectContaining({ + 'event-type': 'GOAL_STARTED', + 'tenant-id': 'tenant-abc', + 'correlation-id': 'goal-123', + }), + }), + ]), + }) + ); + }); + + it('should emit GOAL_COMPLETED event with result payload', async () => { + const input = { + eventType: 'GOAL_COMPLETED' as const, + goalRunId: 'goal-456', + tenantId: 'tenant-xyz', + payload: { + stepsCompleted: 5, + duration: 120000, + summary: 'Goal completed successfully', + }, + }; + + await mockActivityEnv.run(emitGoalEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.goal.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + value: expect.stringContaining('stepsCompleted'), + }), + ]), + }) + ); + }); + + it('should emit GOAL_FAILED event with error details', async () => { + const input = { + eventType: 'GOAL_FAILED' as const, + goalRunId: 'goal-789', + tenantId: 'tenant-123', + payload: { + errorType: 'MAX_REPLANS_EXCEEDED', + errorMessage: 'Failed after 3 replan attempts', + }, + }; + + await mockActivityEnv.run(emitGoalEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.goal.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + value: expect.stringContaining('MAX_REPLANS_EXCEEDED'), + }), + ]), + }) + ); + }); + + it('should also emit audit event for goal events', async () => { + const input = { + eventType: 'GOAL_CANCELLED' as const, + goalRunId: 'goal-cancel-123', + tenantId: 'tenant-audit', + payload: { reason: 'User requested cancellation' }, + }; + + await mockActivityEnv.run(emitGoalEvent, input); + + // Should emit to both goal events and audit log + expect(mockSend).toHaveBeenCalledTimes(2); + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ topic: 'bytebot.goal.events' }) + ); + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ topic: 'bytebot.audit.log' }) + ); + }); + }); + + describe('emitStepEvent', () => { + it('should emit STEP_STARTED event with step number', async () => { + const input = { + eventType: 'STEP_STARTED' as const, + goalRunId: 'goal-step-123', + tenantId: 'tenant-step', + stepNumber: 3, + payload: { description: 'Execute API test' }, + }; + + await mockActivityEnv.run(emitStepEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.step.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + key: 'goal-step-123-3', // Composite key + value: expect.stringContaining('STEP_STARTED'), + }), + ]), + }) + ); + }); + + it('should emit STEP_COMPLETED event with outcome', async () => { + const input = { + eventType: 'STEP_COMPLETED' as const, + goalRunId: 'goal-step-456', + tenantId: 'tenant-step', + stepNumber: 2, + payload: { + outcome: 'API endpoint created successfully', + artifacts: ['/src/api/users.ts'], + knowledgeGained: ['API uses REST pattern'], + }, + }; + + await mockActivityEnv.run(emitStepEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.step.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + value: expect.stringContaining('STEP_COMPLETED'), + }), + ]), + }) + ); + }); + + it('should emit STEP_APPROVAL_REQUESTED for high-risk steps', async () => { + const input = { + eventType: 'STEP_APPROVAL_REQUESTED' as const, + goalRunId: 'goal-highrisk', + tenantId: 'tenant-approval', + stepNumber: 1, + payload: { + description: 'Delete production database tables', + riskLevel: 'HIGH', + approvalRequired: true, + }, + }; + + await mockActivityEnv.run(emitStepEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.step.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + value: expect.stringContaining('STEP_APPROVAL_REQUESTED'), + }), + ]), + }) + ); + }); + + it('should emit STEP_REJECTED event with reason', async () => { + const input = { + eventType: 'STEP_REJECTED' as const, + goalRunId: 'goal-rejected', + tenantId: 'tenant-reject', + stepNumber: 1, + payload: { + rejectedBy: 'admin-user', + reason: 'Too risky for production environment', + }, + }; + + await mockActivityEnv.run(emitStepEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.step.events', + messages: expect.arrayContaining([ + expect.objectContaining({ + value: expect.stringContaining('STEP_REJECTED'), + }), + ]), + }) + ); + }); + }); + + describe('emitAuditEvent', () => { + it('should emit audit event with workflow metadata', async () => { + const input = { + eventType: 'goal.started', + goalRunId: 'goal-audit-123', + tenantId: 'tenant-audit', + userId: 'user-abc', + action: 'GOAL_STARTED', + details: { source: 'web-ui' }, + }; + + await mockActivityEnv.run(emitAuditEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + topic: 'bytebot.audit.log', + messages: expect.arrayContaining([ + expect.objectContaining({ + key: 'goal-audit-123', + value: expect.stringContaining('goal.started'), + }), + ]), + }) + ); + }); + + it('should include user information when available', async () => { + const input = { + eventType: 'step.approved', + goalRunId: 'goal-approval-audit', + tenantId: 'tenant-approval', + userId: 'approver-123', + action: 'STEP_APPROVED', + details: { stepNumber: 1, approvalTime: '2025-01-04T10:00:00Z' }, + }; + + await mockActivityEnv.run(emitAuditEvent, input); + + const sentValue = JSON.parse(mockSend.mock.calls[0][0].messages[0].value); + expect(sentValue.userId).toBe('approver-123'); + expect(sentValue.action).toBe('STEP_APPROVED'); + }); + }); + + describe('emitEventBatch', () => { + it('should emit multiple events in a single batch', async () => { + const events = [ + { + topic: 'GOAL_EVENTS' as const, + key: 'goal-batch-1', + event: { + eventId: 'evt-1', + eventType: 'GOAL_STARTED' as const, + goalRunId: 'goal-batch-1', + tenantId: 'tenant-batch', + timestamp: '2025-01-04T10:00:00Z', + payload: {}, + }, + }, + { + topic: 'STEP_EVENTS' as const, + key: 'goal-batch-1-1', + event: { + eventId: 'evt-2', + eventType: 'STEP_STARTED' as const, + goalRunId: 'goal-batch-1', + tenantId: 'tenant-batch', + stepNumber: 1, + timestamp: '2025-01-04T10:00:01Z', + payload: {}, + }, + }, + ]; + + await mockActivityEnv.run(emitEventBatch, events); + + // Should send to both topics + expect(mockSend).toHaveBeenCalled(); + }); + + it('should handle empty batch gracefully', async () => { + await mockActivityEnv.run(emitEventBatch, []); + + expect(mockSend).not.toHaveBeenCalled(); + }); + + it('should group events by topic for efficiency', async () => { + const events = [ + { + topic: 'STEP_EVENTS' as const, + key: 'goal-1-1', + event: { + eventId: 'evt-1', + eventType: 'STEP_STARTED' as const, + goalRunId: 'goal-1', + tenantId: 'tenant-1', + stepNumber: 1, + timestamp: '2025-01-04T10:00:00Z', + payload: {}, + }, + }, + { + topic: 'STEP_EVENTS' as const, + key: 'goal-1-2', + event: { + eventId: 'evt-2', + eventType: 'STEP_COMPLETED' as const, + goalRunId: 'goal-1', + tenantId: 'tenant-1', + stepNumber: 1, + timestamp: '2025-01-04T10:00:01Z', + payload: {}, + }, + }, + ]; + + await mockActivityEnv.run(emitEventBatch, events); + + // Both events to same topic should be in single send + const stepEventCalls = mockSend.mock.calls.filter( + (call) => call[0].topic === 'bytebot.step.events' + ); + expect(stepEventCalls.length).toBeGreaterThan(0); + }); + }); + + describe('Error Handling', () => { + it('should handle Kafka connection errors', async () => { + mockSend.mockRejectedValueOnce(new Error('Connection refused')); + + const input = { + eventType: 'GOAL_STARTED' as const, + goalRunId: 'goal-error', + tenantId: 'tenant-error', + payload: {}, + }; + + await expect(mockActivityEnv.run(emitGoalEvent, input)).rejects.toThrow( + 'Connection refused' + ); + }); + + it('should handle Kafka timeout errors', async () => { + mockSend.mockRejectedValueOnce(new Error('Request timed out')); + + const input = { + eventType: 'STEP_STARTED' as const, + goalRunId: 'goal-timeout', + tenantId: 'tenant-timeout', + stepNumber: 1, + payload: {}, + }; + + await expect(mockActivityEnv.run(emitStepEvent, input)).rejects.toThrow( + 'Request timed out' + ); + }); + }); + + describe('Event Structure', () => { + it('should include all required fields in goal event', async () => { + const input = { + eventType: 'GOAL_STARTED' as const, + goalRunId: 'goal-structure', + tenantId: 'tenant-structure', + payload: { test: 'value' }, + }; + + await mockActivityEnv.run(emitGoalEvent, input); + + const sentValue = JSON.parse(mockSend.mock.calls[0][0].messages[0].value); + expect(sentValue).toHaveProperty('eventId'); + expect(sentValue).toHaveProperty('eventType', 'GOAL_STARTED'); + expect(sentValue).toHaveProperty('goalRunId', 'goal-structure'); + expect(sentValue).toHaveProperty('tenantId', 'tenant-structure'); + expect(sentValue).toHaveProperty('timestamp'); + expect(sentValue).toHaveProperty('payload'); + }); + + it('should use GZIP compression for messages', async () => { + const input = { + eventType: 'GOAL_STARTED' as const, + goalRunId: 'goal-compress', + tenantId: 'tenant-compress', + payload: {}, + }; + + await mockActivityEnv.run(emitGoalEvent, input); + + expect(mockSend).toHaveBeenCalledWith( + expect.objectContaining({ + compression: 1, // GZIP + }) + ); + }); + }); +}); diff --git a/packages/bytebot-temporal-worker/src/activities/kafka.activities.ts b/packages/bytebot-temporal-worker/src/activities/kafka.activities.ts new file mode 100644 index 000000000..5f4588181 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/kafka.activities.ts @@ -0,0 +1,344 @@ +/** + * Kafka Activities - Event emission for observability + * + * These activities emit events to Kafka for: + * - Real-time monitoring dashboards + * - Audit logging and compliance + * - Cross-service event sourcing + * + * Topic structure: + * - bytebot.goal.events: Goal lifecycle events + * - bytebot.step.events: Step execution events + * - bytebot.audit.log: Compliance and audit trail + */ + +import { Context } from '@temporalio/activity'; +import { Kafka, Producer, CompressionTypes, logLevel } from 'kafkajs'; +import { v4 as uuidv4 } from 'uuid'; + +import type { + GoalEvent, + StepEvent, + AuditEvent, +} from '../types/goal-run.types'; + +// ============================================================================ +// Configuration +// ============================================================================ + +// Kafka is in core cluster - accessible via ClusterMesh global service +const KAFKA_BROKERS = (process.env.KAFKA_BROKERS ?? 'core-cluster-kafka-bootstrap.kafka.svc.cluster.local:9092').split(','); +const KAFKA_CLIENT_ID = process.env.KAFKA_CLIENT_ID ?? 'bytebot-temporal-worker'; + +// Topic names following naming convention: .. +const TOPICS = { + GOAL_EVENTS: process.env.KAFKA_TOPIC_GOAL_EVENTS ?? 'bytebot.goal.events', + STEP_EVENTS: process.env.KAFKA_TOPIC_STEP_EVENTS ?? 'bytebot.step.events', + AUDIT_LOG: process.env.KAFKA_TOPIC_AUDIT ?? 'bytebot.audit.log', +}; + +// ============================================================================ +// Kafka Client Singleton +// ============================================================================ + +let kafka: Kafka | null = null; +let producer: Producer | null = null; +let isConnected = false; + +async function getProducer(): Promise { + if (producer && isConnected) { + return producer; + } + + if (!kafka) { + kafka = new Kafka({ + clientId: KAFKA_CLIENT_ID, + brokers: KAFKA_BROKERS, + logLevel: logLevel.WARN, + retry: { + initialRetryTime: 100, + retries: 5, + maxRetryTime: 30000, + }, + }); + } + + if (!producer) { + producer = kafka.producer({ + allowAutoTopicCreation: false, // Topics should be pre-created + transactionTimeout: 30000, + }); + } + + if (!isConnected) { + await producer.connect(); + isConnected = true; + } + + return producer; +} + +// Graceful shutdown +process.on('SIGTERM', async () => { + if (producer && isConnected) { + await producer.disconnect(); + isConnected = false; + } +}); + +// ============================================================================ +// Activity Interface +// ============================================================================ + +export interface KafkaActivities { + emitGoalEvent(input: { + eventType: GoalEvent['eventType']; + goalRunId: string; + tenantId: string; + payload: Record; + }): Promise; + + emitStepEvent(input: { + eventType: StepEvent['eventType']; + goalRunId: string; + tenantId: string; + stepNumber: number; + payload: Record; + }): Promise; + + emitAuditEvent(input: { + eventType: string; + goalRunId: string; + tenantId: string; + userId?: string; + action: string; + details: Record; + }): Promise; +} + +// ============================================================================ +// Activity Implementations +// ============================================================================ + +/** + * Emits a goal lifecycle event to Kafka. + * + * Events: GOAL_STARTED, GOAL_COMPLETED, GOAL_FAILED, GOAL_CANCELLED, GOAL_PAUSED, GOAL_RESUMED + */ +export async function emitGoalEvent(input: { + eventType: GoalEvent['eventType']; + goalRunId: string; + tenantId: string; + payload: Record; +}): Promise { + const context = Context.current(); + context.heartbeat(`Emitting goal event: ${input.eventType}`); + + const event: GoalEvent = { + eventId: uuidv4(), + eventType: input.eventType, + goalRunId: input.goalRunId, + tenantId: input.tenantId, + timestamp: new Date().toISOString(), + payload: input.payload, + }; + + await emitEvent(TOPICS.GOAL_EVENTS, input.goalRunId, event); + + // Also emit to audit log for compliance + await emitAuditEvent({ + eventType: `goal.${input.eventType.toLowerCase()}`, + goalRunId: input.goalRunId, + tenantId: input.tenantId, + action: input.eventType, + details: input.payload, + }); +} + +/** + * Emits a step execution event to Kafka. + * + * Events: STEP_STARTED, STEP_COMPLETED, STEP_FAILED, STEP_SKIPPED, + * STEP_APPROVAL_REQUESTED, STEP_APPROVED, STEP_REJECTED + */ +export async function emitStepEvent(input: { + eventType: StepEvent['eventType']; + goalRunId: string; + tenantId: string; + stepNumber: number; + payload: Record; +}): Promise { + const context = Context.current(); + context.heartbeat(`Emitting step event: ${input.eventType} for step ${input.stepNumber}`); + + const event: StepEvent = { + eventId: uuidv4(), + eventType: input.eventType, + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: input.stepNumber, + timestamp: new Date().toISOString(), + payload: input.payload, + }; + + // Use composite key for ordering: goalRunId-stepNumber + const key = `${input.goalRunId}-${input.stepNumber}`; + await emitEvent(TOPICS.STEP_EVENTS, key, event); +} + +/** + * Emits an audit event for compliance and traceability. + * + * Audit events are immutable and retained for compliance periods. + */ +export async function emitAuditEvent(input: { + eventType: string; + goalRunId: string; + tenantId: string; + userId?: string; + action: string; + details: Record; +}): Promise { + const context = Context.current(); + context.heartbeat(`Emitting audit event: ${input.eventType}`); + + const workflowInfo = getWorkflowMetadata(); + + const event: AuditEvent = { + eventId: uuidv4(), + eventType: input.eventType, + goalRunId: input.goalRunId, + tenantId: input.tenantId, + userId: input.userId, + timestamp: new Date().toISOString(), + action: input.action, + details: input.details, + metadata: workflowInfo, + }; + + await emitEvent(TOPICS.AUDIT_LOG, input.goalRunId, event); +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/** + * Emits an event to Kafka with retry handling. + */ +async function emitEvent( + topic: string, + key: string, + event: GoalEvent | StepEvent | AuditEvent +): Promise { + try { + const prod = await getProducer(); + + await prod.send({ + topic, + compression: CompressionTypes.GZIP, + messages: [ + { + key, + value: JSON.stringify(event), + headers: { + 'content-type': 'application/json', + 'event-type': event.eventType, + 'tenant-id': event.tenantId, + 'correlation-id': event.goalRunId, + 'timestamp': event.timestamp, + }, + }, + ], + }); + } catch (error) { + // Log but don't fail the workflow for event emission failures + console.error(`Failed to emit event to ${topic}:`, error); + + // Try to reconnect on next attempt + if (error instanceof Error && error.message.includes('disconnect')) { + isConnected = false; + } + + // Rethrow for Temporal to handle retry + throw error; + } +} + +/** + * Gets workflow metadata for audit context. + */ +function getWorkflowMetadata(): { + workflowId?: string; + runId?: string; + activityId?: string; +} { + try { + const context = Context.current(); + return { + workflowId: context.info.workflowExecution?.workflowId, + runId: context.info.workflowExecution?.runId, + activityId: context.info.activityId, + }; + } catch { + return {}; + } +} + +// ============================================================================ +// Batch Operations (for high-throughput scenarios) +// ============================================================================ + +/** + * Emits multiple events in a single batch for efficiency. + * Used when emitting many events at once (e.g., workflow completion summary). + */ +export async function emitEventBatch( + events: Array<{ + topic: 'GOAL_EVENTS' | 'STEP_EVENTS' | 'AUDIT_LOG'; + key: string; + event: GoalEvent | StepEvent | AuditEvent; + }> +): Promise { + if (events.length === 0) return; + + const context = Context.current(); + context.heartbeat(`Emitting batch of ${events.length} events`); + + try { + const prod = await getProducer(); + + // Group by topic + const byTopic = new Map }>>(); + + for (const { topic, key, event } of events) { + const topicName = TOPICS[topic]; + if (!byTopic.has(topicName)) { + byTopic.set(topicName, []); + } + byTopic.get(topicName)!.push({ + key, + value: JSON.stringify(event), + headers: { + 'content-type': 'application/json', + 'event-type': event.eventType, + 'tenant-id': event.tenantId, + }, + }); + } + + // Send all topics in parallel + await Promise.all( + Array.from(byTopic.entries()).map(([topic, messages]) => + prod.send({ + topic, + compression: CompressionTypes.GZIP, + messages, + }) + ) + ); + } catch (error) { + console.error('Failed to emit event batch:', error); + throw error; + } +} diff --git a/packages/bytebot-temporal-worker/src/activities/planning.activities.spec.ts b/packages/bytebot-temporal-worker/src/activities/planning.activities.spec.ts new file mode 100644 index 000000000..0c566353e --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/planning.activities.spec.ts @@ -0,0 +1,243 @@ +/** + * Planning Activities Unit Tests + * + * Tests the planning activity functions with mocked external services. + */ + +import axios from 'axios'; +import { MockActivityEnvironment } from '@temporalio/testing'; +import { planGoal, refinePlan } from './planning.activities'; +import type { PlanGoalInput, PlanGoalOutput } from '../types/goal-run.types'; + +jest.mock('axios'); +const mockedAxios = axios as unknown as jest.MockedFunction; + +describe('Planning Activities', () => { + let mockActivityEnv: MockActivityEnvironment; + + beforeEach(() => { + mockActivityEnv = new MockActivityEnvironment(); + jest.clearAllMocks(); + }); + + describe('planGoal', () => { + const defaultInput: PlanGoalInput = { + goalRunId: 'test-goal-123', + tenantId: 'test-tenant', + goalDescription: 'Create a new API endpoint for user management', + previousFailures: [], + accumulatedKnowledge: [], + constraints: { maxSteps: 10 }, + }; + + it('should successfully plan a goal via orchestrator', async () => { + const mockResponse = { + data: { + kind: 'PLAN', + steps: [ + { stepNumber: 1, description: 'Design API schema', expectedOutcome: 'Schema defined', isHighRisk: false }, + { stepNumber: 2, description: 'Implement endpoint', expectedOutcome: 'Endpoint working', isHighRisk: false }, + { stepNumber: 3, description: 'Write tests', expectedOutcome: 'Tests passing', isHighRisk: false }, + ], + planSummary: 'Create user management API in 3 steps', + estimatedDurationMs: 300000, + confidence: 0.85, + }, + }; + + mockedAxios.mockResolvedValueOnce(mockResponse as any); + + const result = await mockActivityEnv.run(planGoal, defaultInput) as PlanGoalOutput; + + expect(result.kind).toBe('PLAN'); + if (result.kind !== 'PLAN') throw new Error('Expected PLAN result'); + expect(result.steps).toHaveLength(3); + expect(result.planSummary).toBe('Create user management API in 3 steps'); + expect(result.confidence).toBe(0.85); + expect(mockedAxios).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'POST', + url: expect.stringContaining('/api/v1/internal/plan'), + data: expect.objectContaining({ + goalRunId: defaultInput.goalRunId, + tenantId: defaultInput.tenantId, + }), + }), + ); + }); + + it('should include previous failures in planning context', async () => { + const inputWithFailures: PlanGoalInput = { + ...defaultInput, + previousFailures: [ + { stepNumber: 1, error: 'Permission denied when creating file' }, + { stepNumber: 2, error: 'Database connection timeout' }, + ], + }; + + const mockResponse = { + data: { + kind: 'PLAN', + steps: [{ stepNumber: 1, description: 'Revised step', expectedOutcome: 'Done', isHighRisk: false }], + planSummary: 'Revised plan', + confidence: 0.7, + }, + }; + + mockedAxios.mockResolvedValueOnce(mockResponse as any); + + await mockActivityEnv.run(planGoal, inputWithFailures); + + expect(mockedAxios).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'POST', + data: expect.objectContaining({ + context: expect.stringContaining('Previous Attempts'), + }), + }), + ); + }); + + it('should fall back to LLM planning when orchestrator fails', async () => { + // First call fails (orchestrator) + mockedAxios + .mockRejectedValueOnce(new Error('Orchestrator unavailable')) + .mockResolvedValueOnce({ + data: { + choices: [ + { + message: { + content: JSON.stringify({ + steps: [{ stepNumber: 1, description: 'Fallback step', expectedOutcome: 'Done', isHighRisk: false }], + planSummary: 'Fallback plan', + confidence: 0.6, + }), + }, + }, + ], + }, + }); + + const result = await mockActivityEnv.run(planGoal, defaultInput) as PlanGoalOutput; + + expect(result.kind).toBe('PLAN'); + if (result.kind !== 'PLAN') throw new Error('Expected PLAN result'); + expect(result.steps).toHaveLength(1); + expect(result.steps[0].description).toBe('Fallback step'); + expect(mockedAxios).toHaveBeenCalledTimes(2); + }); + + it('should return GOAL_INTAKE_REQUIRED when orchestrator blocks planning', async () => { + mockedAxios.mockResolvedValueOnce({ + data: { + kind: 'GOAL_INTAKE_REQUIRED', + promptId: 'pr-1', + goalSpecId: 'gs-1', + reason: 'GOAL_SPEC_INCOMPLETE', + }, + } as any); + + const result = await mockActivityEnv.run(planGoal, defaultInput) as PlanGoalOutput; + + expect(result.kind).toBe('GOAL_INTAKE_REQUIRED'); + if (result.kind !== 'GOAL_INTAKE_REQUIRED') throw new Error('Expected GOAL_INTAKE_REQUIRED result'); + expect(result.promptId).toBe('pr-1'); + expect(result.goalSpecId).toBe('gs-1'); + expect(result.reason).toBe('GOAL_SPEC_INCOMPLETE'); + }); + + it('should normalize step numbers if not provided', async () => { + const mockResponse = { + data: { + kind: 'PLAN', + steps: [ + { description: 'Step without number', expectedOutcome: 'Done' }, + { description: 'Another step', expectedOutcome: 'Done' }, + ], + planSummary: 'Plan', + }, + }; + + mockedAxios.mockResolvedValueOnce(mockResponse as any); + + const result = await mockActivityEnv.run(planGoal, defaultInput) as PlanGoalOutput; + + expect(result.kind).toBe('PLAN'); + if (result.kind !== 'PLAN') throw new Error('Expected PLAN result'); + expect(result.steps[0].stepNumber).toBe(1); + expect(result.steps[1].stepNumber).toBe(2); + }); + + it('should heartbeat during planning', async () => { + const heartbeatSpy = jest.fn(); + mockActivityEnv = new MockActivityEnvironment(); + mockActivityEnv.on('heartbeat', heartbeatSpy); + + const mockResponse = { + data: { + kind: 'PLAN', + steps: [{ stepNumber: 1, description: 'Step', expectedOutcome: 'Done', isHighRisk: false }], + planSummary: 'Plan', + }, + }; + + mockedAxios.mockResolvedValueOnce(mockResponse as any); + + await mockActivityEnv.run(planGoal, defaultInput); + + expect(heartbeatSpy).toHaveBeenCalled(); + }); + }); + + describe('refinePlan', () => { + it('should refine an existing plan based on feedback', async () => { + const input = { + currentPlan: [ + { stepNumber: 1, description: 'Original step', expectedOutcome: 'Done', isHighRisk: false, dependencies: [] }, + ], + feedback: 'Step 1 is too vague, please be more specific', + goalRunId: 'test-goal-123', + tenantId: 'test-tenant', + }; + + const mockResponse = { + data: { + steps: [ + { stepNumber: 1, description: 'Refined specific step', expectedOutcome: 'Specific outcome', isHighRisk: false }, + ], + planSummary: 'Refined plan', + confidence: 0.8, + }, + }; + + mockedAxios.mockResolvedValueOnce(mockResponse as any); + + const result = await mockActivityEnv.run(refinePlan, input) as PlanGoalOutput; + + expect(result.kind).toBe('PLAN'); + if (result.kind !== 'PLAN') throw new Error('Expected PLAN result'); + expect(result.steps[0].description).toBe('Refined specific step'); + expect(mockedAxios).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'POST', + url: expect.stringContaining('/api/v1/internal/refine-plan'), + data: expect.objectContaining({ + currentPlan: input.currentPlan, + feedback: input.feedback, + }), + }), + ); + }); + + it('should throw error when refinement fails', async () => { + const input = { + currentPlan: [], + feedback: 'Improve plan', + }; + + mockedAxios.mockRejectedValueOnce(new Error('Service unavailable')); + + await expect(mockActivityEnv.run(refinePlan, input)).rejects.toThrow('Plan refinement failed'); + }); + }); +}); diff --git a/packages/bytebot-temporal-worker/src/activities/planning.activities.ts b/packages/bytebot-temporal-worker/src/activities/planning.activities.ts new file mode 100644 index 000000000..9feb1e13e --- /dev/null +++ b/packages/bytebot-temporal-worker/src/activities/planning.activities.ts @@ -0,0 +1,631 @@ +/** + * Planning Activities - LLM-powered goal planning for Butler Vantage + * + * These activities call the Butler Vantage planner service to generate step-by-step + * plans for goal execution. Plans are designed for a DESKTOP-BASED agent that + * operates through computer-use tools (click, type, scroll, screenshot). + * + * Phase 10.1: Enhanced with metrics for LLM call tracking + * Phase 11.0: Added circuit breaker, enhanced heartbeats, in-house model flow + * Phase 13.0: Fixed fallback model configuration and improved circuit breaker recovery + * Phase 13.3: Enhanced planning prompt with desktop agent capability awareness + * Phase 14.2: Fixed overly granular planning - clarified task-level vs action-level steps + * + * Model Flow (Phase 13): + * - Planning (planGoal, refinePlan): Uses PLANNING_MODEL (default: gpt-oss-120b) + * - Execution (via activities): Uses EXECUTION_MODEL (default: claude-sonnet-4-5) + * - Fallback: Uses FALLBACK_MODEL (default: claude-sonnet-4-5 - verified in LiteLLM) + * + * Agent Paradigm (Phase 13.3): + * - Butler Vantage is a desktop-based agent operating in a virtual desktop environment + * - Uses computer-use tools: click, type, key, scroll, screenshot, move, drag, cursor_position + * - Interacts with websites through a browser within the desktop + * - Cannot make direct API calls - all data retrieval is through visual observation + * + * Planning Granularity (Phase 14.2): + * - Steps are TASK-LEVEL (meaningful sub-goals), NOT ACTION-LEVEL (individual mouse/keyboard actions) + * - Example GOOD step: "Navigate to google.com in the browser" + * - Example BAD step: "Click address bar, type URL, press Enter" + * - The execution engine handles translating task-level steps into individual actions + */ + +import { Context } from '@temporalio/activity'; + +import type { + PlanGoalInput, + PlanGoalOutput, + Step, +} from '../types/goal-run.types'; + +// Phase 10.1: Metrics integration +import { getMetricsService } from '../metrics'; + +// Phase 11: Circuit breaker for resilient HTTP calls +import { + resilientRequest, + LLM_CIRCUIT_BREAKER_CONFIG, + ORCHESTRATOR_CIRCUIT_BREAKER_CONFIG, +} from '../utils/circuit-breaker'; + +// ============================================================================ +// Configuration +// ============================================================================ + +const ORCHESTRATOR_URL = process.env.ORCHESTRATOR_URL ?? 'http://bytebot-workflow-orchestrator:3000'; +const LLM_PROXY_URL = process.env.LLM_PROXY_URL ?? 'http://bytebot-llm-proxy:3000'; +const LLM_API_KEY = process.env.LLM_API_KEY ?? process.env.OPENAI_API_KEY ?? ''; +const PLANNING_TIMEOUT_MS = parseInt(process.env.PLANNING_TIMEOUT_MS ?? '120000', 10); + +// Phase 13: In-house model configuration with proper fallback +// Use gpt-oss-120b for planning (oversight), claude-sonnet-4-5 for execution +// Fallback uses claude-sonnet-4-5 which is configured in LiteLLM +const PLANNING_MODEL = process.env.PLANNING_MODEL ?? 'gpt-oss-120b'; +const EXECUTION_MODEL = process.env.EXECUTION_MODEL ?? 'claude-sonnet-4-5'; +// Phase 13: Fixed fallback model - gpt-4 was not in LiteLLM config +// Using claude-sonnet-4-5 as fallback (reliable, available in LiteLLM) +const FALLBACK_MODEL = process.env.FALLBACK_MODEL ?? 'claude-sonnet-4-5'; + +// Phase 11: Heartbeat configuration +const HEARTBEAT_INTERVAL_MS = parseInt(process.env.PLANNING_HEARTBEAT_INTERVAL_MS ?? '30000', 10); + +// ============================================================================ +// Activity Interface +// ============================================================================ + +export interface PlanningActivities { + planGoal(input: PlanGoalInput): Promise; + refinePlan(input: { currentPlan: Step[]; feedback: string }): Promise; +} + +// ============================================================================ +// Activity Implementations +// ============================================================================ + +/** + * Plans the steps required to achieve a goal. + * Uses the Butler Vantage planner service or LLM directly. + * + * Phase 11: Enhanced with circuit breaker and periodic heartbeats + * Phase 13.3: Planning prompt now describes desktop agent capabilities + * + * @param input - Goal planning input including description and context + * @returns Structured plan with steps optimized for desktop agent execution + */ +export async function planGoal(input: PlanGoalInput): Promise { + const context = Context.current(); + const metricsService = getMetricsService(); + const startTime = Date.now(); + + // Phase 11: Set up periodic heartbeat for long-running planning + let heartbeatCount = 0; + const heartbeatInterval = setInterval(() => { + heartbeatCount++; + context.heartbeat({ + phase: 'planning', + goalRunId: input.goalRunId, + status: 'in_progress', + heartbeatCount, + elapsedMs: Date.now() - startTime, + timestamp: new Date().toISOString(), + }); + }, HEARTBEAT_INTERVAL_MS); + + try { + // Initial heartbeat + context.heartbeat({ + phase: 'planning', + goalRunId: input.goalRunId, + status: 'starting', + model: PLANNING_MODEL, + timestamp: new Date().toISOString(), + }); + + // Build planning context with failure history (Manus pattern: "leave wrong turns in context") + const planningContext = buildPlanningContext(input); + + type InternalPlanResponse = + | { + kind: 'PLAN'; + steps: Array<{ + stepNumber: number; + description: string; + expectedOutcome?: string; + isHighRisk?: boolean; + dependencies?: number[]; + estimatedDurationMs?: number; + }>; + planSummary: string; + estimatedDurationMs?: number; + confidence?: number; + } + | { + kind: 'GOAL_INTAKE_REQUIRED'; + promptId: string; + goalSpecId: string; + reason: string; + }; + + // Phase 11: Use circuit breaker for orchestrator call + const response = await resilientRequest( + { + method: 'POST', + url: `${ORCHESTRATOR_URL}/api/v1/internal/plan`, + data: { + goalRunId: input.goalRunId, + tenantId: input.tenantId, + goalDescription: input.goalDescription, + context: planningContext, + constraints: input.constraints, + // Phase 11: Pass model preference to orchestrator + preferredModel: PLANNING_MODEL, + }, + timeout: PLANNING_TIMEOUT_MS, + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Request': 'true', + }, + }, + { ...ORCHESTRATOR_CIRCUIT_BREAKER_CONFIG, enableHeartbeats: false } // We handle heartbeats manually + ); + + clearInterval(heartbeatInterval); + + // Final heartbeat + context.heartbeat({ + phase: 'planning', + goalRunId: input.goalRunId, + status: 'completed', + durationMs: Date.now() - startTime, + timestamp: new Date().toISOString(), + }); + + // Record metrics + const durationMs = Date.now() - startTime; + metricsService?.recordActivityExecution('planning', 'success'); + metricsService?.recordActivityDuration('planning', durationMs); + + if (response.data.kind === 'GOAL_INTAKE_REQUIRED') { + return { + kind: 'GOAL_INTAKE_REQUIRED', + promptId: response.data.promptId, + goalSpecId: response.data.goalSpecId, + reason: response.data.reason, + }; + } + + // Validate and normalize steps + const steps: Step[] = response.data.steps.map((step, index) => ({ + stepNumber: step.stepNumber ?? index + 1, + description: step.description, + expectedOutcome: step.expectedOutcome, + isHighRisk: step.isHighRisk ?? false, + dependencies: step.dependencies ?? [], + estimatedDurationMs: step.estimatedDurationMs, + })); + + return { + kind: 'PLAN', + steps, + planSummary: response.data.planSummary, + estimatedDurationMs: response.data.estimatedDurationMs, + confidence: response.data.confidence, + }; + } catch (error) { + clearInterval(heartbeatInterval); + + context.heartbeat({ + phase: 'planning', + goalRunId: input.goalRunId, + status: 'fallback', + error: error instanceof Error ? error.message : String(error), + timestamp: new Date().toISOString(), + }); + + // Fallback to direct LLM planning if orchestrator fails + return await fallbackLLMPlanning(input); + } +} + +/** + * Refines an existing plan based on feedback. + * Used during replanning after step failures. + * + * Phase 11: Enhanced with circuit breaker and periodic heartbeats + */ +export async function refinePlan(input: { + currentPlan: Step[]; + feedback: string; + goalRunId?: string; + tenantId?: string; +}): Promise { + const context = Context.current(); + const metricsService = getMetricsService(); + const startTime = Date.now(); + + // Phase 11: Set up periodic heartbeat + let heartbeatCount = 0; + const heartbeatInterval = setInterval(() => { + heartbeatCount++; + context.heartbeat({ + phase: 'refinement', + goalRunId: input.goalRunId, + status: 'in_progress', + heartbeatCount, + elapsedMs: Date.now() - startTime, + timestamp: new Date().toISOString(), + }); + }, HEARTBEAT_INTERVAL_MS); + + try { + context.heartbeat({ + phase: 'refinement', + goalRunId: input.goalRunId, + status: 'starting', + model: PLANNING_MODEL, + timestamp: new Date().toISOString(), + }); + + // Phase 11: Use circuit breaker for orchestrator call + const response = await resilientRequest<{ + steps: Array<{ + stepNumber: number; + description: string; + expectedOutcome?: string; + isHighRisk?: boolean; + dependencies?: number[]; + }>; + planSummary: string; + confidence?: number; + }>( + { + method: 'POST', + url: `${ORCHESTRATOR_URL}/api/v1/internal/refine-plan`, + data: { + currentPlan: input.currentPlan, + feedback: input.feedback, + goalRunId: input.goalRunId, + tenantId: input.tenantId, + // Phase 11: Pass model preference + preferredModel: PLANNING_MODEL, + }, + timeout: PLANNING_TIMEOUT_MS, + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Request': 'true', + }, + }, + { ...ORCHESTRATOR_CIRCUIT_BREAKER_CONFIG, enableHeartbeats: false } + ); + + clearInterval(heartbeatInterval); + + // Record metrics + const durationMs = Date.now() - startTime; + metricsService?.recordActivityExecution('refinement', 'success'); + metricsService?.recordActivityDuration('refinement', durationMs); + + context.heartbeat({ + phase: 'refinement', + goalRunId: input.goalRunId, + status: 'completed', + durationMs, + timestamp: new Date().toISOString(), + }); + + const steps: Step[] = response.data.steps.map((step, index) => ({ + stepNumber: step.stepNumber ?? index + 1, + description: step.description, + expectedOutcome: step.expectedOutcome, + isHighRisk: step.isHighRisk ?? false, + dependencies: step.dependencies ?? [], + })); + + return { + kind: 'PLAN', + steps, + planSummary: response.data.planSummary, + confidence: response.data.confidence, + }; + } catch (error) { + clearInterval(heartbeatInterval); + + metricsService?.recordActivityExecution('refinement', 'failure'); + metricsService?.recordError('PLAN_REFINEMENT_FAILED', true); + + throw new Error( + `Plan refinement failed: ${error instanceof Error ? error.message : String(error)}` + ); + } +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/** + * Builds a rich planning context including failure history. + * Implements the Manus AI pattern: "leave wrong turns in context" + */ +function buildPlanningContext(input: PlanGoalInput): string { + const parts: string[] = []; + + // Goal description + parts.push(`## Goal\n${input.goalDescription}`); + + // Previous failures (wrong turns to learn from) + if (input.previousFailures && input.previousFailures.length > 0) { + parts.push('\n## Previous Attempts (Learn from these)'); + for (const failure of input.previousFailures) { + parts.push(`- Step ${failure.stepNumber} failed: ${failure.error}`); + } + parts.push('\nPlease create a new plan that avoids these issues.'); + } + + // Accumulated knowledge + if (input.accumulatedKnowledge && input.accumulatedKnowledge.length > 0) { + parts.push('\n## Known Information'); + for (const knowledge of input.accumulatedKnowledge) { + parts.push(`- ${knowledge}`); + } + } + + // Constraints + if (input.constraints) { + parts.push('\n## Constraints'); + parts.push(`- Maximum steps: ${input.constraints.maxSteps}`); + } + + return parts.join('\n'); +} + +/** + * Fallback LLM planning when orchestrator is unavailable. + * Calls LLM proxy directly with a structured prompt. + * + * Phase 10.1: Enhanced with LLM metrics tracking + * Phase 11.0: Updated to use in-house gpt-oss-120b model with circuit breaker + * + * Model Priority: + * 1. PLANNING_MODEL (gpt-oss-120b) - In-house, cost-free + * 2. FALLBACK_MODEL (gpt-4) - External fallback if circuit open + */ +async function fallbackLLMPlanning(input: PlanGoalInput): Promise { + const context = Context.current(); + const metricsService = getMetricsService(); + const startTime = Date.now(); + + // Phase 11: Set up periodic heartbeat for long-running LLM calls + let heartbeatCount = 0; + const heartbeatInterval = setInterval(() => { + heartbeatCount++; + context.heartbeat({ + phase: 'fallback_planning', + goalRunId: input.goalRunId, + status: 'waiting_for_llm', + model: PLANNING_MODEL, + heartbeatCount, + elapsedMs: Date.now() - startTime, + timestamp: new Date().toISOString(), + }); + }, HEARTBEAT_INTERVAL_MS); + + context.heartbeat({ + phase: 'fallback_planning', + goalRunId: input.goalRunId, + status: 'starting', + model: PLANNING_MODEL, + timestamp: new Date().toISOString(), + }); + + // Phase 14.2: Fixed planning prompt with proper granularity guidance + // Key fix: Clarified "task-level" vs "action-level" steps to prevent overly granular planning + // Based on industry best practices from Anthropic computer-use, OpenAI Operator, Manus AI, and ReAct + const planningPrompt = `You are Butler Vantage, a DESKTOP-BASED autonomous agent. You plan and execute tasks by controlling a virtual desktop environment through visual observation and computer-use actions. + +## Your Capabilities + +You operate within a virtual desktop environment and can: +- **Visual Observation**: Take screenshots to see the current screen state +- **Mouse Control**: Click, double-click, right-click, drag, move cursor to coordinates +- **Keyboard Input**: Type text, press keys, use keyboard shortcuts (Ctrl+C, Alt+Tab, etc.) +- **Scrolling**: Scroll up/down/left/right within windows and web pages +- **Application Interaction**: Open applications, switch windows, interact with UI elements +- **Web Browsing**: Navigate websites, fill forms, click buttons, read content from web pages + +## Important Constraints + +You CANNOT: +- Make direct API calls to external services (no REST, GraphQL, or programmatic HTTP requests) +- Execute command-line scripts or terminal commands +- Access databases directly +- Read files outside the desktop environment +- Perform actions that require developer/admin tools not visible in the GUI + +## CRITICAL: Step Granularity Guidelines + +Steps should be **TASK-LEVEL**, not **ACTION-LEVEL**. + +### BAD Examples (Too Granular - DO NOT DO THIS): +- "Move cursor to address bar" +- "Click on the address bar" +- "Type 'https://www.google.com'" +- "Press Enter key" +- "Move cursor to search box" + +### GOOD Examples (Correct Granularity): +- "Navigate to google.com in the browser" +- "Search for 'cheap flights to Paris' on Google" +- "Fill out the login form with provided credentials" +- "Download the PDF report from the dashboard" + +### The Difference: +- **Task-level step**: Accomplishes a meaningful sub-goal (e.g., "Navigate to website") +- **Action-level step**: A single mouse/keyboard action (e.g., "Click", "Type", "Press Enter") + +Your steps should be task-level. The execution engine handles the individual mouse/keyboard actions. + +## Task to Plan + +${buildPlanningContext(input)} + +## Response Format + +Respond with a JSON object containing: +- steps: Array of { stepNumber, description, expectedOutcome, isHighRisk, dependencies } +- planSummary: Brief description of how you will accomplish this through desktop interactions +- confidence: Number between 0 and 1 (lower if task requires capabilities you don't have) +- capabilityAssessment: Brief note if any part of the task may be difficult or impossible + +## Planning Rules + +1. **Task-Level Steps**: Each step should accomplish a meaningful sub-goal, NOT a single mouse/keyboard action +2. **Desktop-First**: Every step must be achievable through visual observation and computer-use actions +3. **No API Calls**: Never plan steps that involve "calling an API", "querying a database", or "running a script" +4. **Verifiable Outcomes**: Each step should have an observable outcome (like "website loaded", NOT "cursor moved") +5. **High-Risk Marking**: Mark steps that submit forms, make purchases, send messages, or modify external data as isHighRisk: true +6. **Dependencies**: Include dependencies where step order matters (e.g., must search before reading results) +7. **2-10 Steps**: Break complex goals into 2-10 logical task-level steps +8. **Honest Assessment**: If a goal cannot be fully achieved through desktop interactions, explain why in capabilityAssessment and set confidence appropriately low +9. **Browser Preference**: For web tasks, prefer well-known websites (Google, official sites) over obscure ones + +## Example Good Plan +Goal: "Search for flights from NYC to Paris and find the cheapest option" + +Good plan: +1. "Navigate to Google Flights in the browser" (NOT: "Click address bar, type URL, press Enter") +2. "Search for flights from NYC to Paris for next week" (NOT: "Click origin field, type NYC, click destination...") +3. "Sort results by price to find cheapest option" (NOT: "Click sort dropdown, click Price option") +4. "Record the cheapest flight details" (NOT: "Move cursor to price, screenshot") + +Generate the plan:`; + + // Phase 11: Use gpt-oss-120b (in-house model) as primary + const modelName = PLANNING_MODEL; + + try { + // Phase 11: Use circuit breaker with LLM configuration + const response = await resilientRequest<{ + choices: Array<{ message: { content: string } }>; + }>( + { + method: 'POST', + url: `${LLM_PROXY_URL}/v1/chat/completions`, + data: { + model: modelName, + messages: [{ role: 'user', content: planningPrompt }], + response_format: { type: 'json_object' }, + max_tokens: 4096, + }, + timeout: PLANNING_TIMEOUT_MS, + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${LLM_API_KEY}`, + 'X-Tenant-Id': input.tenantId, + }, + }, + LLM_CIRCUIT_BREAKER_CONFIG, + // Phase 11: Fallback to external model if circuit is open + async () => { + console.warn(`[Planning] Circuit open for ${modelName}, falling back to ${FALLBACK_MODEL}`); + + context.heartbeat({ + phase: 'fallback_planning', + goalRunId: input.goalRunId, + status: 'circuit_open_fallback', + primaryModel: modelName, + fallbackModel: FALLBACK_MODEL, + timestamp: new Date().toISOString(), + }); + + const fallbackResponse = await resilientRequest<{ + choices: Array<{ message: { content: string } }>; + }>( + { + method: 'POST', + url: `${LLM_PROXY_URL}/v1/chat/completions`, + data: { + model: FALLBACK_MODEL, + messages: [{ role: 'user', content: planningPrompt }], + response_format: { type: 'json_object' }, + max_tokens: 4096, + }, + timeout: PLANNING_TIMEOUT_MS, + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${LLM_API_KEY}`, + 'X-Tenant-Id': input.tenantId, + }, + }, + { ...LLM_CIRCUIT_BREAKER_CONFIG, name: 'llm_proxy_fallback' } + ); + + metricsService?.recordLLMCall(FALLBACK_MODEL, 'success'); + return fallbackResponse.data; + } + ); + + clearInterval(heartbeatInterval); + + // Record successful LLM call metrics + const durationMs = Date.now() - startTime; + metricsService?.recordLLMCall(modelName, 'success'); + metricsService?.recordLLMLatency(modelName, durationMs); + metricsService?.recordActivityExecution('fallback_planning', 'success'); + metricsService?.recordActivityDuration('fallback_planning', durationMs); + + context.heartbeat({ + phase: 'fallback_planning', + goalRunId: input.goalRunId, + status: 'completed', + model: modelName, + durationMs, + timestamp: new Date().toISOString(), + }); + + // Parse OpenAI-compatible response format + const content = response.data.choices[0]?.message?.content ?? '{}'; + const parsed = JSON.parse(content); + + const steps: Step[] = (parsed.steps || []).map( + (step: Partial, index: number) => ({ + stepNumber: step.stepNumber ?? index + 1, + description: step.description ?? '', + expectedOutcome: step.expectedOutcome, + isHighRisk: step.isHighRisk ?? false, + dependencies: step.dependencies ?? [], + }) + ); + + // Phase 13.3: Log capability assessment for monitoring + if (parsed.capabilityAssessment) { + console.info(`[Planning] Capability assessment: ${parsed.capabilityAssessment}`); + } + if (parsed.confidence !== undefined && parsed.confidence < 0.5) { + console.warn(`[Planning] Low confidence plan (${parsed.confidence}): ${parsed.capabilityAssessment || 'No assessment provided'}`); + } + + return { + kind: 'PLAN', + steps, + planSummary: parsed.planSummary ?? 'Generated plan', + confidence: parsed.confidence, + capabilityAssessment: parsed.capabilityAssessment, + }; + } catch (error) { + clearInterval(heartbeatInterval); + + // Record failed LLM call metrics + const durationMs = Date.now() - startTime; + const isTimeout = error instanceof Error && + (error.message.includes('timeout') || error.message.includes('ETIMEDOUT')); + + metricsService?.recordLLMCall(modelName, isTimeout ? 'timeout' : 'error'); + metricsService?.recordLLMLatency(modelName, durationMs); + metricsService?.recordActivityExecution('fallback_planning', isTimeout ? 'timeout' : 'failure'); + metricsService?.recordError('LLM_PLANNING_FAILED', true); + + throw new Error( + `Fallback planning failed: ${error instanceof Error ? error.message : String(error)}` + ); + } +} diff --git a/packages/bytebot-temporal-worker/src/app.module.ts b/packages/bytebot-temporal-worker/src/app.module.ts new file mode 100644 index 000000000..d2763af25 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/app.module.ts @@ -0,0 +1,42 @@ +/** + * Butler Vantage Temporal Worker - NestJS Application Module + * + * This module sets up the NestJS application for: + * - Health check endpoints (liveness, readiness) + * - Prometheus metrics (Phase 10.1: Enhanced Observability) + * - Configuration management + * + * The actual Temporal worker runs separately via worker.ts + * + * Phase 10.1 Enhancements: + * - Custom business metrics for workflows, steps, activities + * - LLM call tracking and latency histograms + * - Human-in-the-loop event metrics + * - Error classification and tracking + */ + +import { Module } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { TerminusModule } from '@nestjs/terminus'; + +import { HealthController } from './health/health.controller'; +import { MetricsModule } from './metrics'; + +@Module({ + imports: [ + // Configuration + ConfigModule.forRoot({ + isGlobal: true, + envFilePath: ['.env.local', '.env'], + }), + + // Health checks + TerminusModule, + + // Phase 10.1: Enhanced Prometheus metrics with custom business metrics + MetricsModule, + ], + controllers: [HealthController], + providers: [], +}) +export class AppModule {} diff --git a/packages/bytebot-temporal-worker/src/config/search-attributes.config.ts b/packages/bytebot-temporal-worker/src/config/search-attributes.config.ts new file mode 100644 index 000000000..d5b004be1 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/config/search-attributes.config.ts @@ -0,0 +1,228 @@ +/** + * Custom Search Attributes Configuration - Phase 10.2 + * + * Defines custom search attributes for ByteBot workflow visibility. + * These attributes enable filtering and searching workflows in Temporal UI. + * + * Best Practices (Temporal 2025): + * - Use for search/visibility, not business logic (use queries for that) + * - Don't store PII in search attributes (not encrypted) + * - Use keyword type for exact matches, text for partial matches + * - Keep cardinality reasonable to avoid performance issues + * + * @see https://docs.temporal.io/search-attribute + * @see https://docs.temporal.io/typescript/search-attributes + */ + +// ============================================================================ +// Search Attribute Type Definitions +// ============================================================================ + +/** + * Custom search attribute names used by ByteBot workflows. + * These must be registered in Temporal before use. + * + * Registration command: + * temporal operator search-attribute create --namespace bytebot --name ByteBotTenantId --type Keyword + * temporal operator search-attribute create --namespace bytebot --name ByteBotGoalRunId --type Keyword + * temporal operator search-attribute create --namespace bytebot --name ByteBotPhase --type Keyword + * temporal operator search-attribute create --namespace bytebot --name ByteBotStepCount --type Int + * temporal operator search-attribute create --namespace bytebot --name ByteBotHasHighRiskSteps --type Bool + * temporal operator search-attribute create --namespace bytebot --name ByteBotIsAwaitingApproval --type Bool + * temporal operator search-attribute create --namespace bytebot --name ByteBotErrorType --type Keyword + */ +export const SEARCH_ATTRIBUTES = { + /** + * Tenant ID for multi-tenant filtering + * Type: Keyword (exact match) + */ + TENANT_ID: 'ByteBotTenantId', + + /** + * Goal Run ID for direct lookup + * Type: Keyword (exact match) + */ + GOAL_RUN_ID: 'ByteBotGoalRunId', + + /** + * Current workflow phase (PLANNING, EXECUTING, VERIFYING, etc.) + * Type: Keyword (exact match) + */ + PHASE: 'ByteBotPhase', + + /** + * Total number of steps in the plan + * Type: Int + */ + STEP_COUNT: 'ByteBotStepCount', + + /** + * Whether the workflow has high-risk steps requiring approval + * Type: Bool + */ + HAS_HIGH_RISK_STEPS: 'ByteBotHasHighRiskSteps', + + /** + * Whether workflow is currently awaiting human approval + * Type: Bool + */ + IS_AWAITING_APPROVAL: 'ByteBotIsAwaitingApproval', + + /** + * Error type if workflow failed (for error analysis) + * Type: Keyword (exact match) + */ + ERROR_TYPE: 'ByteBotErrorType', +} as const; + +// ============================================================================ +// Type-Safe Search Attribute Interfaces +// ============================================================================ + +/** + * Type-safe interface for ByteBot search attributes. + * Used when setting initial attributes or upserting. + * + * Phase 11: Added index signature for Temporal SearchAttributes compatibility + */ +export interface ByteBotSearchAttributes { + [key: string]: string[] | number[] | boolean[] | undefined; + [SEARCH_ATTRIBUTES.TENANT_ID]?: string[]; + [SEARCH_ATTRIBUTES.GOAL_RUN_ID]?: string[]; + [SEARCH_ATTRIBUTES.PHASE]?: string[]; + [SEARCH_ATTRIBUTES.STEP_COUNT]?: number[]; + [SEARCH_ATTRIBUTES.HAS_HIGH_RISK_STEPS]?: boolean[]; + [SEARCH_ATTRIBUTES.IS_AWAITING_APPROVAL]?: boolean[]; + [SEARCH_ATTRIBUTES.ERROR_TYPE]?: string[]; +} + +/** + * Valid phase values for the ByteBotPhase search attribute + */ +export type ByteBotPhaseAttribute = + | 'INITIALIZING' + | 'PLANNING' + | 'EXECUTING' + | 'WAITING_USER_INPUT' + | 'WAITING_PROVIDER' + | 'VERIFYING' + | 'REPLANNING' + | 'PAUSED' + | 'AWAITING_APPROVAL' + | 'COMPLETED' + | 'FAILED' + | 'CANCELLED'; + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/** + * Creates initial search attributes for a new workflow execution. + */ +export function createInitialSearchAttributes( + tenantId: string, + goalRunId: string +): ByteBotSearchAttributes { + return { + [SEARCH_ATTRIBUTES.TENANT_ID]: [tenantId], + [SEARCH_ATTRIBUTES.GOAL_RUN_ID]: [goalRunId], + [SEARCH_ATTRIBUTES.PHASE]: ['INITIALIZING'], + [SEARCH_ATTRIBUTES.STEP_COUNT]: [0], + [SEARCH_ATTRIBUTES.HAS_HIGH_RISK_STEPS]: [false], + [SEARCH_ATTRIBUTES.IS_AWAITING_APPROVAL]: [false], + }; +} + +/** + * Creates search attribute update for phase change. + */ +export function createPhaseUpdate(phase: ByteBotPhaseAttribute): ByteBotSearchAttributes { + return { + [SEARCH_ATTRIBUTES.PHASE]: [phase], + }; +} + +/** + * Creates search attribute update for step plan completion. + */ +export function createPlanSearchAttributes( + stepCount: number, + hasHighRiskSteps: boolean +): ByteBotSearchAttributes { + return { + [SEARCH_ATTRIBUTES.STEP_COUNT]: [stepCount], + [SEARCH_ATTRIBUTES.HAS_HIGH_RISK_STEPS]: [hasHighRiskSteps], + [SEARCH_ATTRIBUTES.PHASE]: ['EXECUTING'], + }; +} + +/** + * Creates search attribute update for approval state. + */ +export function createApprovalStateUpdate(isAwaitingApproval: boolean): ByteBotSearchAttributes { + const update: ByteBotSearchAttributes = { + [SEARCH_ATTRIBUTES.IS_AWAITING_APPROVAL]: [isAwaitingApproval], + }; + + if (isAwaitingApproval) { + update[SEARCH_ATTRIBUTES.PHASE] = ['AWAITING_APPROVAL']; + } + + return update; +} + +/** + * Creates search attribute update for error state. + */ +export function createErrorSearchAttributes(errorType: string): ByteBotSearchAttributes { + return { + [SEARCH_ATTRIBUTES.PHASE]: ['FAILED'], + [SEARCH_ATTRIBUTES.ERROR_TYPE]: [errorType], + }; +} + +// ============================================================================ +// Kubernetes Job for Search Attribute Registration +// ============================================================================ + +/** + * Shell commands to register custom search attributes. + * Run these once per Temporal namespace. + * + * Example job manifest at: + * kubernetes/manifests/temporal-search-attributes/job.yaml + */ +export const REGISTRATION_COMMANDS = ` +# Register ByteBot custom search attributes in Temporal namespace +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.TENANT_ID} --type Keyword +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.GOAL_RUN_ID} --type Keyword +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.PHASE} --type Keyword +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.STEP_COUNT} --type Int +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.HAS_HIGH_RISK_STEPS} --type Bool +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.IS_AWAITING_APPROVAL} --type Bool +temporal operator search-attribute create --namespace bytebot --name ${SEARCH_ATTRIBUTES.ERROR_TYPE} --type Keyword +`.trim(); + +// ============================================================================ +// Example Temporal Queries +// ============================================================================ + +/** + * Example queries for filtering workflows in Temporal UI or CLI. + * + * Find all workflows for a tenant: + * ByteBotTenantId = "tenant-123" + * + * Find workflows awaiting approval: + * ByteBotIsAwaitingApproval = true + * + * Find failed workflows with specific error: + * ByteBotPhase = "FAILED" AND ByteBotErrorType = "MAX_RETRIES_EXCEEDED" + * + * Find workflows with many steps: + * ByteBotStepCount > 10 + * + * Find high-risk workflows in execution: + * ByteBotHasHighRiskSteps = true AND ByteBotPhase = "EXECUTING" + */ diff --git a/packages/bytebot-temporal-worker/src/config/temporal.config.ts b/packages/bytebot-temporal-worker/src/config/temporal.config.ts new file mode 100644 index 000000000..729fae828 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/config/temporal.config.ts @@ -0,0 +1,233 @@ +/** + * Temporal Configuration + * + * Production-ready configuration for Temporal workers. + * Follows Temporal's recommended performance settings. + * + * Phase 10.4: Enhanced with rate limiting configuration + * + * @see https://docs.temporal.io/develop/worker-performance + * @see https://typescript.temporal.io/api/interfaces/worker.WorkerOptions + */ + +import { NativeConnection, Runtime } from '@temporalio/worker'; + +// ============================================================================ +// Environment Configuration +// ============================================================================ + +export interface TemporalConfig { + address: string; + namespace: string; + taskQueue: string; + + // Worker settings + maxConcurrentActivityTaskExecutions: number; + maxConcurrentWorkflowTaskExecutions: number; + maxConcurrentLocalActivityExecutions: number; + maxCachedWorkflows: number; + + // Phase 10.4: Rate Limiting Configuration + maxActivitiesPerSecond?: number; // Per-worker activity rate limit + maxTaskQueueActivitiesPerSecond?: number; // Task queue global rate limit + maxConcurrentActivityTaskPolls: number; // Concurrent activity pollers + maxConcurrentWorkflowTaskPolls: number; // Concurrent workflow pollers + + // Connection settings + enableTLS: boolean; + tlsCertPath?: string; + tlsKeyPath?: string; + + // Observability + enableMetrics: boolean; + metricsPort: number; +} + +export function getTemporalConfig(): TemporalConfig { + return { + // Connection + address: process.env.TEMPORAL_ADDRESS ?? 'temporal-frontend.temporal.svc.cluster.local:7233', + namespace: process.env.TEMPORAL_NAMESPACE ?? 'bytebot', + taskQueue: process.env.TEMPORAL_TASK_QUEUE ?? 'bytebot-goal-runs', + + // Worker concurrency (tune based on pod resources) + // Best practice: Start conservative and tune up based on metrics + maxConcurrentActivityTaskExecutions: parseInt( + process.env.TEMPORAL_MAX_CONCURRENT_ACTIVITIES ?? '50', // Reduced from 100 + 10 + ), + maxConcurrentWorkflowTaskExecutions: parseInt( + process.env.TEMPORAL_MAX_CONCURRENT_WORKFLOWS ?? '100', // Reduced from 200 + 10 + ), + maxConcurrentLocalActivityExecutions: parseInt( + process.env.TEMPORAL_MAX_CONCURRENT_LOCAL_ACTIVITIES ?? '50', // Reduced from 100 + 10 + ), + maxCachedWorkflows: parseInt(process.env.TEMPORAL_MAX_CACHED_WORKFLOWS ?? '500', 10), + + // Phase 10.4: Rate Limiting Configuration + // maxActivitiesPerSecond: Per-worker limit on activities/second + // Protects downstream services (LLM, databases) from overload + maxActivitiesPerSecond: process.env.TEMPORAL_MAX_ACTIVITIES_PER_SECOND + ? parseFloat(process.env.TEMPORAL_MAX_ACTIVITIES_PER_SECOND) + : 10, // Conservative default: 10 activities/sec per worker + + // maxTaskQueueActivitiesPerSecond: Global task queue rate limit + // Applied server-side across all workers on this task queue + maxTaskQueueActivitiesPerSecond: process.env.TEMPORAL_MAX_TASK_QUEUE_ACTIVITIES_PER_SECOND + ? parseFloat(process.env.TEMPORAL_MAX_TASK_QUEUE_ACTIVITIES_PER_SECOND) + : undefined, // No global limit by default (let workers self-regulate) + + // Poller configuration: Controls parallelism of task fetching + // Recommended: half of max concurrent executions + maxConcurrentActivityTaskPolls: parseInt( + process.env.TEMPORAL_MAX_ACTIVITY_POLLERS ?? '25', + 10 + ), + maxConcurrentWorkflowTaskPolls: parseInt( + process.env.TEMPORAL_MAX_WORKFLOW_POLLERS ?? '50', + 10 + ), + + // TLS (for Temporal Cloud or mTLS) + enableTLS: process.env.TEMPORAL_TLS_ENABLED === 'true', + tlsCertPath: process.env.TEMPORAL_TLS_CERT_PATH, + tlsKeyPath: process.env.TEMPORAL_TLS_KEY_PATH, + + // Observability + enableMetrics: process.env.TEMPORAL_METRICS_ENABLED !== 'false', + metricsPort: parseInt(process.env.TEMPORAL_METRICS_PORT ?? '9464', 10), + }; +} + +// ============================================================================ +// Connection Factory +// ============================================================================ + +let connection: NativeConnection | null = null; + +export async function createTemporalConnection(): Promise { + if (connection) { + return connection; + } + + const config = getTemporalConfig(); + + // Configure runtime for production + Runtime.install({ + telemetryOptions: { + metrics: config.enableMetrics + ? { + prometheus: { + bindAddress: `0.0.0.0:${config.metricsPort}`, + }, + } + : undefined, + }, + }); + + // Create connection with optional TLS + if (config.enableTLS && config.tlsCertPath && config.tlsKeyPath) { + const fs = await import('fs'); + connection = await NativeConnection.connect({ + address: config.address, + tls: { + clientCertPair: { + crt: fs.readFileSync(config.tlsCertPath), + key: fs.readFileSync(config.tlsKeyPath), + }, + }, + }); + } else { + connection = await NativeConnection.connect({ + address: config.address, + }); + } + + return connection; +} + +export async function closeTemporalConnection(): Promise { + if (connection) { + await connection.close(); + connection = null; + } +} + +// ============================================================================ +// Task Queue Configuration +// ============================================================================ + +export const TASK_QUEUES = { + // Main task queue for goal run workflows + GOAL_RUNS: 'bytebot-goal-runs', + + // Separate queue for planning activities (LLM-heavy) + PLANNING: 'bytebot-planning', + + // Separate queue for execution activities (agent-heavy) + EXECUTION: 'bytebot-execution', + + // Low-priority queue for non-critical activities + LOW_PRIORITY: 'bytebot-low-priority', +} as const; + +// ============================================================================ +// Retry Policies +// ============================================================================ + +export const RETRY_POLICIES = { + // For transient failures (network, rate limits) + TRANSIENT: { + initialInterval: '1s', + backoffCoefficient: 2, + maximumInterval: '60s', + maximumAttempts: 5, + }, + + // For LLM calls (may timeout) + LLM: { + initialInterval: '2s', + backoffCoefficient: 2, + maximumInterval: '120s', + maximumAttempts: 3, + }, + + // For critical operations (database, external APIs) + CRITICAL: { + initialInterval: '500ms', + backoffCoefficient: 2, + maximumInterval: '30s', + maximumAttempts: 10, + }, + + // For non-critical operations (metrics, logs) + NON_CRITICAL: { + initialInterval: '100ms', + backoffCoefficient: 1.5, + maximumInterval: '5s', + maximumAttempts: 3, + }, +} as const; + +// ============================================================================ +// Workflow Timeouts +// ============================================================================ + +export const WORKFLOW_TIMEOUTS = { + // Default workflow execution timeout + DEFAULT_EXECUTION: '24h', + + // Default workflow run timeout (single run) + DEFAULT_RUN: '1h', + + // Maximum workflow execution timeout + MAX_EXECUTION: '7d', + + // Activity timeouts + PLANNING_ACTIVITY: '5m', + EXECUTION_ACTIVITY: '10m', + VERIFICATION_ACTIVITY: '2m', + KAFKA_ACTIVITY: '10s', +} as const; diff --git a/packages/bytebot-temporal-worker/src/config/workflow-versions.ts b/packages/bytebot-temporal-worker/src/config/workflow-versions.ts new file mode 100644 index 000000000..d4e2bb8c7 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/config/workflow-versions.ts @@ -0,0 +1,167 @@ +/** + * Workflow Versioning Configuration - Phase 10.5 + * + * This module defines patch IDs and version constants for workflow evolution. + * Using Temporal's patched() API enables safe deployment of workflow changes + * without breaking running workflow executions. + * + * Best Practices (Temporal 2025): + * - Always put newest code at the top of if-patched blocks + * - Use semantic patch IDs that describe the change + * - Keep patch code until all affected workflows complete + * - Use replay testing to verify patch correctness + * - Consider Worker Versioning for major changes + * + * @see https://docs.temporal.io/develop/typescript/versioning + * @see https://docs.temporal.io/patching + */ + +// ============================================================================ +// Patch IDs +// ============================================================================ + +/** + * Patch IDs for workflow versioning. + * + * Naming convention: -- + * Example: v1.1-search-attributes + * + * Once a patch is deployed and running workflows have used it, + * the patch code must remain until all affected workflows complete. + */ +export const WORKFLOW_PATCHES = { + /** + * Phase 10.2: Added search attribute upserts + * Affects: goalRunWorkflow + * Added: 2026-01-05 + * Safe to remove after: All workflows started before this date complete + */ + V1_1_SEARCH_ATTRIBUTES: 'v1.1-search-attributes', + + /** + * Phase 10.3: Added update handlers + * Affects: goalRunWorkflow + * Added: 2026-01-05 + * Safe to remove after: All workflows started before this date complete + */ + V1_1_UPDATE_HANDLERS: 'v1.1-update-handlers', + + /** + * Phase 10.1: Enhanced metrics collection + * Affects: Activities + * Added: 2026-01-05 + * Note: Activity changes don't require patching (only workflow changes do) + */ + V1_1_ENHANCED_METRICS: 'v1.1-enhanced-metrics', + + /** + * Future: Example of how to add new patches + * When adding workflow changes, add a new patch ID here + */ + // V1_2_NEW_FEATURE: 'v1.2-new-feature', +} as const; + +// ============================================================================ +// Version Constants +// ============================================================================ + +/** + * Current workflow version for logging and debugging. + * Update this when deploying significant workflow changes. + */ +export const WORKFLOW_VERSION = '1.1.0'; + +/** + * Minimum compatible workflow version. + * Workflows started with versions below this may have issues. + */ +export const MIN_COMPATIBLE_VERSION = '1.0.0'; + +// ============================================================================ +// Version Metadata +// ============================================================================ + +/** + * Version history for documentation and debugging. + */ +export const VERSION_HISTORY = [ + { + version: '1.0.0', + date: '2026-01-01', + changes: ['Initial release with PEVR workflow'], + patches: [], + }, + { + version: '1.0.5', + date: '2026-01-05', + changes: ['Phase 9.2d: Fixed LLM proxy endpoint and authentication'], + patches: [], + }, + { + version: '1.1.0', + date: '2026-01-05', + changes: [ + 'Phase 10.1: Enhanced observability with custom Prometheus metrics', + 'Phase 10.2: Custom search attributes for workflow visibility', + 'Phase 10.3: Update handlers for synchronous operations', + 'Phase 10.4: Worker rate limiting configuration', + 'Phase 10.5: Workflow versioning support', + ], + patches: [ + WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES, + WORKFLOW_PATCHES.V1_1_UPDATE_HANDLERS, + ], + }, +] as const; + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/** + * Gets the current version info. + */ +export function getCurrentVersionInfo() { + return VERSION_HISTORY[VERSION_HISTORY.length - 1]; +} + +/** + * Checks if a version is compatible with the current workflow. + */ +export function isVersionCompatible(version: string): boolean { + const [major, minor] = version.split('.').map(Number); + const [minMajor, minMinor] = MIN_COMPATIBLE_VERSION.split('.').map(Number); + + if (major > minMajor) return true; + if (major === minMajor && minor >= minMinor) return true; + return false; +} + +// ============================================================================ +// Deprecation Tracking +// ============================================================================ + +/** + * Tracks which patches can be safely removed. + * + * To remove a patch: + * 1. Query Temporal for workflows with the patch marker + * 2. Wait for all such workflows to complete + * 3. Remove the old code path (keep only new code) + * 4. Deploy and test + * + * Query example: + * temporal workflow list -q "TemporalChangeVersion CONTAINS 'v1.1-search-attributes'" + */ +export const DEPRECATION_STATUS = { + [WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES]: { + canRemove: false, + reason: 'Recently deployed - wait for workflow completion', + checkAfter: '2026-02-05', // 1 month after deployment + }, + [WORKFLOW_PATCHES.V1_1_UPDATE_HANDLERS]: { + canRemove: false, + reason: 'Recently deployed - wait for workflow completion', + checkAfter: '2026-02-05', + }, +} as const; diff --git a/packages/bytebot-temporal-worker/src/health/health.controller.ts b/packages/bytebot-temporal-worker/src/health/health.controller.ts new file mode 100644 index 000000000..949ccd231 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/health/health.controller.ts @@ -0,0 +1,140 @@ +/** + * Health Controller + * + * Kubernetes health check endpoints for Temporal worker. + * Implements standard liveness, readiness, and startup probes. + */ + +import { Controller, Get } from '@nestjs/common'; +import { + HealthCheck, + HealthCheckService, + HealthCheckResult, + HealthIndicator, + HealthIndicatorResult, +} from '@nestjs/terminus'; +import { NativeConnection } from '@temporalio/worker'; + +// ============================================================================ +// Custom Health Indicators +// ============================================================================ + +export class TemporalHealthIndicator extends HealthIndicator { + private connection: NativeConnection | null = null; + private isWorkerRunning = false; + + setConnection(connection: NativeConnection): void { + this.connection = connection; + } + + setWorkerRunning(running: boolean): void { + this.isWorkerRunning = running; + } + + async isHealthy(key: string): Promise { + const isHealthy = this.connection !== null && this.isWorkerRunning; + + const result = this.getStatus(key, isHealthy, { + connected: this.connection !== null, + workerRunning: this.isWorkerRunning, + }); + + if (isHealthy) { + return result; + } + throw new Error('Temporal worker is not healthy'); + } +} + +// ============================================================================ +// Health Controller +// ============================================================================ + +@Controller('health') +export class HealthController { + private startTime: Date; + private isReady = false; + private temporalIndicator: TemporalHealthIndicator; + + constructor(private health: HealthCheckService) { + this.startTime = new Date(); + this.temporalIndicator = new TemporalHealthIndicator(); + } + + getTemporalIndicator(): TemporalHealthIndicator { + return this.temporalIndicator; + } + + setReady(ready: boolean): void { + this.isReady = ready; + } + + /** + * Liveness probe - is the process alive? + * Kubernetes uses this to determine if the container should be restarted. + */ + @Get('live') + @HealthCheck() + async liveness(): Promise { + return this.health.check([ + // Simple memory check - if we're running, we're alive + async () => ({ + memory: { + status: 'up', + rss: process.memoryUsage().rss, + heapUsed: process.memoryUsage().heapUsed, + uptime: process.uptime(), + }, + }), + ]); + } + + /** + * Readiness probe - is the worker ready to accept work? + * Kubernetes uses this to determine if the pod should receive traffic. + */ + @Get('ready') + @HealthCheck() + async readiness(): Promise { + if (!this.isReady) { + throw new Error('Worker not ready'); + } + + return this.health.check([ + () => this.temporalIndicator.isHealthy('temporal'), + ]); + } + + /** + * Startup probe - has the worker finished starting? + * Kubernetes uses this to determine if liveness/readiness probes should start. + */ + @Get('startup') + async startup(): Promise<{ status: string; startTime: string; uptime: number }> { + return { + status: this.isReady ? 'started' : 'starting', + startTime: this.startTime.toISOString(), + uptime: process.uptime(), + }; + } + + /** + * Detailed health check for monitoring dashboards. + */ + @Get() + @HealthCheck() + async check(): Promise { + return this.health.check([ + () => this.temporalIndicator.isHealthy('temporal'), + async () => ({ + process: { + status: 'up', + uptime: process.uptime(), + memory: process.memoryUsage(), + cpu: process.cpuUsage(), + version: process.version, + }, + }), + ]); + } +} diff --git a/packages/bytebot-temporal-worker/src/main.ts b/packages/bytebot-temporal-worker/src/main.ts new file mode 100644 index 000000000..c8a7314ad --- /dev/null +++ b/packages/bytebot-temporal-worker/src/main.ts @@ -0,0 +1,37 @@ +/** + * ByteBot Temporal Worker - Main Entry Point + * + * This starts both: + * 1. NestJS HTTP server for health checks and metrics + * 2. Temporal worker for workflow/activity execution + */ + +import { NestFactory } from '@nestjs/core'; +import { Logger } from '@nestjs/common'; +import { AppModule } from './app.module'; + +async function bootstrap(): Promise { + const logger = new Logger('Bootstrap'); + + // Create NestJS application for health/metrics + const app = await NestFactory.create(AppModule, { + logger: ['error', 'warn', 'log'], + }); + + // Get port from environment + const port = parseInt(process.env.HTTP_PORT ?? '3000', 10); + + // Start HTTP server + await app.listen(port); + logger.log(`Health/metrics server running on port ${port}`); + + // Import and start Temporal worker + // Note: The worker is started in a separate process via worker.ts + // This file is only for the HTTP health/metrics server + logger.log('To start Temporal worker, run: npm run start:worker'); +} + +bootstrap().catch((error) => { + console.error('Failed to start application:', error); + process.exit(1); +}); diff --git a/packages/bytebot-temporal-worker/src/metrics/index.ts b/packages/bytebot-temporal-worker/src/metrics/index.ts new file mode 100644 index 000000000..fb3bdbbaa --- /dev/null +++ b/packages/bytebot-temporal-worker/src/metrics/index.ts @@ -0,0 +1,19 @@ +/** + * Metrics Module - Public API + */ + +export { MetricsModule } from './metrics.module'; +export { + MetricsService, + setMetricsServiceInstance, + getMetricsService, + WorkflowStatus, + StepStatus, + ActivityType, + ActivityStatus, + LLMStatus, + ReplanReason, + WorkflowPhase, + HITLEventType, + ApprovalResult, +} from './metrics.service'; diff --git a/packages/bytebot-temporal-worker/src/metrics/metrics.module.ts b/packages/bytebot-temporal-worker/src/metrics/metrics.module.ts new file mode 100644 index 000000000..b54e07262 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/metrics/metrics.module.ts @@ -0,0 +1,191 @@ +/** + * Metrics Module - Phase 10.1: Enhanced Observability + * + * Provides custom Prometheus metrics for ByteBot Temporal Worker: + * - Workflow execution counts and durations + * - Step execution timing histograms + * - Activity performance metrics + * - Error rates by type + * - LLM call latencies + * + * Best Practices (Temporal 2025-2026): + * - Use labels sparingly to avoid cardinality explosion + * - Prefer histograms over summaries for aggregatable percentiles + * - Export business metrics alongside Temporal SDK metrics + */ + +import { Module, Global } from '@nestjs/common'; +import { PrometheusModule, makeCounterProvider, makeHistogramProvider, makeGaugeProvider } from '@willsoto/nestjs-prometheus'; +import { MetricsService } from './metrics.service'; + +// ============================================================================ +// Metric Definitions +// ============================================================================ + +/** + * Counter: Total number of workflow executions + * Labels: status (completed, failed, cancelled), tenant_id + */ +const workflowExecutionsCounter = makeCounterProvider({ + name: 'bytebot_workflow_executions_total', + help: 'Total number of workflow executions', + labelNames: ['status', 'tenant_id'], +}); + +/** + * Histogram: Workflow execution duration in seconds + * Labels: status, tenant_id + */ +const workflowDurationHistogram = makeHistogramProvider({ + name: 'bytebot_workflow_duration_seconds', + help: 'Workflow execution duration in seconds', + labelNames: ['status', 'tenant_id'], + buckets: [1, 5, 10, 30, 60, 120, 300, 600, 1800, 3600], +}); + +/** + * Counter: Total number of step executions + * Labels: status (completed, failed, skipped, retried), high_risk + */ +const stepExecutionsCounter = makeCounterProvider({ + name: 'bytebot_step_executions_total', + help: 'Total number of step executions', + labelNames: ['status', 'high_risk'], +}); + +/** + * Histogram: Step execution duration in seconds + * Labels: status + */ +const stepDurationHistogram = makeHistogramProvider({ + name: 'bytebot_step_duration_seconds', + help: 'Step execution duration in seconds', + labelNames: ['status'], + buckets: [0.1, 0.5, 1, 2, 5, 10, 30, 60, 120, 300], +}); + +/** + * Counter: Total number of activity executions + * Labels: activity_type (planning, execution, verification, kafka), status + */ +const activityExecutionsCounter = makeCounterProvider({ + name: 'bytebot_activity_executions_total', + help: 'Total number of activity executions', + labelNames: ['activity_type', 'status'], +}); + +/** + * Histogram: Activity execution duration in seconds + * Labels: activity_type + */ +const activityDurationHistogram = makeHistogramProvider({ + name: 'bytebot_activity_duration_seconds', + help: 'Activity execution duration in seconds', + labelNames: ['activity_type'], + buckets: [0.01, 0.05, 0.1, 0.5, 1, 2, 5, 10, 30, 60, 120], +}); + +/** + * Counter: Total LLM API calls + * Labels: model, status (success, error, timeout) + */ +const llmCallsCounter = makeCounterProvider({ + name: 'bytebot_llm_calls_total', + help: 'Total number of LLM API calls', + labelNames: ['model', 'status'], +}); + +/** + * Histogram: LLM API call latency in seconds + * Labels: model + */ +const llmLatencyHistogram = makeHistogramProvider({ + name: 'bytebot_llm_latency_seconds', + help: 'LLM API call latency in seconds', + labelNames: ['model'], + buckets: [0.5, 1, 2, 5, 10, 20, 30, 60, 90, 120], +}); + +/** + * Counter: Total replanning events + * Labels: reason (step_failed, verification_failed, steering) + */ +const replanEventsCounter = makeCounterProvider({ + name: 'bytebot_replan_events_total', + help: 'Total number of replanning events', + labelNames: ['reason'], +}); + +/** + * Gauge: Current active workflows + * Labels: phase (planning, executing, verifying, paused) + */ +const activeWorkflowsGauge = makeGaugeProvider({ + name: 'bytebot_active_workflows', + help: 'Number of currently active workflows', + labelNames: ['phase'], +}); + +/** + * Counter: Human-in-the-loop events + * Labels: event_type (approval_requested, approved, rejected) + */ +const hitlEventsCounter = makeCounterProvider({ + name: 'bytebot_hitl_events_total', + help: 'Human-in-the-loop events', + labelNames: ['event_type'], +}); + +/** + * Histogram: Approval wait time in seconds + * Labels: result (approved, rejected, timeout) + */ +const approvalWaitHistogram = makeHistogramProvider({ + name: 'bytebot_approval_wait_seconds', + help: 'Time waiting for human approval in seconds', + labelNames: ['result'], + buckets: [10, 30, 60, 300, 600, 1800, 3600, 7200, 14400, 28800], +}); + +/** + * Counter: Error events by type + * Labels: error_type, recoverable + */ +const errorEventsCounter = makeCounterProvider({ + name: 'bytebot_errors_total', + help: 'Total error events', + labelNames: ['error_type', 'recoverable'], +}); + +@Global() +@Module({ + imports: [ + PrometheusModule.register({ + defaultMetrics: { + enabled: true, + config: { + prefix: 'bytebot_', + }, + }, + path: '/metrics', + }), + ], + providers: [ + MetricsService, + workflowExecutionsCounter, + workflowDurationHistogram, + stepExecutionsCounter, + stepDurationHistogram, + activityExecutionsCounter, + activityDurationHistogram, + llmCallsCounter, + llmLatencyHistogram, + replanEventsCounter, + activeWorkflowsGauge, + hitlEventsCounter, + approvalWaitHistogram, + errorEventsCounter, + ], + exports: [MetricsService], +}) +export class MetricsModule {} diff --git a/packages/bytebot-temporal-worker/src/metrics/metrics.service.ts b/packages/bytebot-temporal-worker/src/metrics/metrics.service.ts new file mode 100644 index 000000000..05e3a4b3a --- /dev/null +++ b/packages/bytebot-temporal-worker/src/metrics/metrics.service.ts @@ -0,0 +1,325 @@ +/** + * Metrics Service - Phase 10.1: Enhanced Observability + * + * Provides a clean API for recording business metrics from workflows and activities. + * Follows best practices for Prometheus metrics collection: + * - Thread-safe counter/histogram updates + * - Consistent label values + * - Timer utilities for measuring durations + */ + +import { Injectable } from '@nestjs/common'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import { Counter, Histogram, Gauge } from 'prom-client'; + +// ============================================================================ +// Types +// ============================================================================ + +export type WorkflowStatus = 'completed' | 'failed' | 'cancelled'; +export type StepStatus = 'completed' | 'failed' | 'skipped' | 'retried'; +// Phase 11: Extended ActivityType to include new activity types and circuit breaker patterns +export type ActivityType = + | 'planning' + | 'execution' + | 'verification' + | 'kafka' + | 'refinement' + | 'fallback_planning' + | `circuit_${string}`; // Dynamic circuit breaker activity types +export type ActivityStatus = 'success' | 'failure' | 'timeout' | 'fallback'; +export type LLMStatus = 'success' | 'error' | 'timeout'; +export type ReplanReason = 'step_failed' | 'verification_failed' | 'steering' | 'manual'; +export type WorkflowPhase = 'planning' | 'executing' | 'verifying' | 'paused' | 'waiting_approval'; +export type HITLEventType = 'approval_requested' | 'approved' | 'rejected' | 'timeout'; +export type ApprovalResult = 'approved' | 'rejected' | 'timeout'; + +// ============================================================================ +// Metrics Service +// ============================================================================ + +@Injectable() +export class MetricsService { + constructor( + @InjectMetric('bytebot_workflow_executions_total') + private readonly workflowExecutions: Counter, + + @InjectMetric('bytebot_workflow_duration_seconds') + private readonly workflowDuration: Histogram, + + @InjectMetric('bytebot_step_executions_total') + private readonly stepExecutions: Counter, + + @InjectMetric('bytebot_step_duration_seconds') + private readonly stepDuration: Histogram, + + @InjectMetric('bytebot_activity_executions_total') + private readonly activityExecutions: Counter, + + @InjectMetric('bytebot_activity_duration_seconds') + private readonly activityDuration: Histogram, + + @InjectMetric('bytebot_llm_calls_total') + private readonly llmCalls: Counter, + + @InjectMetric('bytebot_llm_latency_seconds') + private readonly llmLatency: Histogram, + + @InjectMetric('bytebot_replan_events_total') + private readonly replanEvents: Counter, + + @InjectMetric('bytebot_active_workflows') + private readonly activeWorkflows: Gauge, + + @InjectMetric('bytebot_hitl_events_total') + private readonly hitlEvents: Counter, + + @InjectMetric('bytebot_approval_wait_seconds') + private readonly approvalWait: Histogram, + + @InjectMetric('bytebot_errors_total') + private readonly errorEvents: Counter, + ) {} + + // ========================================================================== + // Workflow Metrics + // ========================================================================== + + /** + * Record a completed workflow execution + */ + recordWorkflowExecution(status: WorkflowStatus, tenantId: string): void { + this.workflowExecutions.inc({ status, tenant_id: tenantId }); + } + + /** + * Record workflow duration + */ + recordWorkflowDuration(durationMs: number, status: WorkflowStatus, tenantId: string): void { + this.workflowDuration.observe( + { status, tenant_id: tenantId }, + durationMs / 1000 + ); + } + + /** + * Start timing a workflow (returns end timer function) + */ + startWorkflowTimer(status: WorkflowStatus, tenantId: string): () => number { + return this.workflowDuration.startTimer({ status, tenant_id: tenantId }); + } + + /** + * Update active workflows count + */ + setActiveWorkflows(phase: WorkflowPhase, count: number): void { + this.activeWorkflows.set({ phase }, count); + } + + incrementActiveWorkflows(phase: WorkflowPhase): void { + this.activeWorkflows.inc({ phase }); + } + + decrementActiveWorkflows(phase: WorkflowPhase): void { + this.activeWorkflows.dec({ phase }); + } + + // ========================================================================== + // Step Metrics + // ========================================================================== + + /** + * Record a step execution + */ + recordStepExecution(status: StepStatus, isHighRisk: boolean): void { + this.stepExecutions.inc({ + status, + high_risk: isHighRisk ? 'true' : 'false', + }); + } + + /** + * Record step duration + */ + recordStepDuration(durationMs: number, status: StepStatus): void { + this.stepDuration.observe({ status }, durationMs / 1000); + } + + /** + * Start timing a step execution + */ + startStepTimer(status: StepStatus): () => number { + return this.stepDuration.startTimer({ status }); + } + + // ========================================================================== + // Activity Metrics + // ========================================================================== + + /** + * Record an activity execution + */ + recordActivityExecution(activityType: ActivityType, status: ActivityStatus): void { + this.activityExecutions.inc({ activity_type: activityType, status }); + } + + /** + * Record activity duration + */ + recordActivityDuration(activityType: ActivityType, durationMs: number): void { + this.activityDuration.observe({ activity_type: activityType }, durationMs / 1000); + } + + /** + * Start timing an activity execution + */ + startActivityTimer(activityType: ActivityType): () => number { + return this.activityDuration.startTimer({ activity_type: activityType }); + } + + /** + * Convenience method to time and record an activity + */ + async timeActivity( + activityType: ActivityType, + fn: () => Promise + ): Promise { + const endTimer = this.startActivityTimer(activityType); + try { + const result = await fn(); + endTimer(); + this.recordActivityExecution(activityType, 'success'); + return result; + } catch (error) { + endTimer(); + const status = this.isTimeoutError(error) ? 'timeout' : 'failure'; + this.recordActivityExecution(activityType, status); + throw error; + } + } + + // ========================================================================== + // LLM Metrics + // ========================================================================== + + /** + * Record an LLM API call + */ + recordLLMCall(model: string, status: LLMStatus): void { + this.llmCalls.inc({ model, status }); + } + + /** + * Record LLM latency + */ + recordLLMLatency(model: string, durationMs: number): void { + this.llmLatency.observe({ model }, durationMs / 1000); + } + + /** + * Start timing an LLM call + */ + startLLMTimer(model: string): () => number { + return this.llmLatency.startTimer({ model }); + } + + /** + * Convenience method to time and record an LLM call + */ + async timeLLMCall(model: string, fn: () => Promise): Promise { + const endTimer = this.startLLMTimer(model); + try { + const result = await fn(); + endTimer(); + this.recordLLMCall(model, 'success'); + return result; + } catch (error) { + endTimer(); + const status = this.isTimeoutError(error) ? 'timeout' : 'error'; + this.recordLLMCall(model, status); + throw error; + } + } + + // ========================================================================== + // Replan Metrics + // ========================================================================== + + /** + * Record a replanning event + */ + recordReplanEvent(reason: ReplanReason): void { + this.replanEvents.inc({ reason }); + } + + // ========================================================================== + // Human-in-the-Loop Metrics + // ========================================================================== + + /** + * Record a HITL event + */ + recordHITLEvent(eventType: HITLEventType): void { + this.hitlEvents.inc({ event_type: eventType }); + } + + /** + * Record approval wait time + */ + recordApprovalWait(durationMs: number, result: ApprovalResult): void { + this.approvalWait.observe({ result }, durationMs / 1000); + } + + // ========================================================================== + // Error Metrics + // ========================================================================== + + /** + * Record an error event + */ + recordError(errorType: string, recoverable: boolean): void { + this.errorEvents.inc({ + error_type: this.normalizeErrorType(errorType), + recoverable: recoverable ? 'true' : 'false', + }); + } + + // ========================================================================== + // Helper Methods + // ========================================================================== + + private isTimeoutError(error: unknown): boolean { + if (error instanceof Error) { + return ( + error.message.includes('timeout') || + error.message.includes('ETIMEDOUT') || + error.message.includes('ESOCKETTIMEDOUT') + ); + } + return false; + } + + private normalizeErrorType(errorType: string): string { + // Normalize error types to prevent cardinality explosion + const normalized = errorType + .toUpperCase() + .replace(/[^A-Z_]/g, '_') + .replace(/_+/g, '_') + .slice(0, 50); // Limit length + return normalized || 'UNKNOWN'; + } +} + +// ============================================================================ +// Singleton instance for use in activities (non-DI context) +// ============================================================================ + +let metricsServiceInstance: MetricsService | null = null; + +export function setMetricsServiceInstance(service: MetricsService): void { + metricsServiceInstance = service; +} + +export function getMetricsService(): MetricsService | null { + return metricsServiceInstance; +} diff --git a/packages/bytebot-temporal-worker/src/types/goal-run.types.ts b/packages/bytebot-temporal-worker/src/types/goal-run.types.ts new file mode 100644 index 000000000..afc1c9496 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/types/goal-run.types.ts @@ -0,0 +1,397 @@ +/** + * ByteBot Temporal Worker - Type Definitions + * + * Industry-standard type definitions for durable workflow execution. + * Follows patterns from OpenAI, Anthropic, and Manus AI. + */ + +import { z } from 'zod'; + +// ============================================================================ +// Workflow Input/Output Types +// ============================================================================ + +export const GoalRunInputSchema = z.object({ + goalRunId: z.string().min(1), + tenantId: z.string().min(1), + userId: z.string().min(1), + goalDescription: z.string().min(1), + workspaceId: z.string().optional(), + constraints: z.object({ + maxSteps: z.number().int().positive().default(50), + maxRetries: z.number().int().nonnegative().default(3), + maxReplans: z.number().int().nonnegative().default(3), + timeoutMs: z.number().int().positive().default(3600000), // 1 hour + requireApprovalForHighRisk: z.boolean().default(true), + }).optional(), + context: z.object({ + previousAttempts: z.number().int().nonnegative().default(0), + parentGoalRunId: z.string().optional(), + inheritedKnowledge: z.array(z.string()).default([]), + }).optional(), + // Runtime-only: allows starting a "capability probe" workflow execution that does not + // call activities or mutate external systems, but still validates Update handler registration. + mode: z.enum(['NORMAL', 'CAPABILITY_PROBE']).optional(), +}); + +export type GoalRunInput = z.infer; + +export const GoalRunResultSchema = z.object({ + goalRunId: z.string(), + status: z.enum(['COMPLETED', 'FAILED', 'CANCELLED', 'TIMEOUT']), + completedAt: z.string().datetime(), + summary: z.string(), + stepsCompleted: z.number().int().nonnegative(), + totalDurationMs: z.number().int().nonnegative(), + finalOutcome: z.string().optional(), + errorDetails: z.object({ + errorType: z.string(), + errorMessage: z.string(), + failedStep: z.number().int().optional(), + recoverable: z.boolean(), + }).optional(), + artifacts: z.array(z.object({ + type: z.string(), + path: z.string(), + description: z.string().optional(), + })).default([]), + knowledgeGained: z.array(z.string()).default([]), +}); + +export type GoalRunResult = z.infer; + +// ============================================================================ +// Workflow State Types +// ============================================================================ + +export enum GoalRunPhase { + INITIALIZING = 'INITIALIZING', + PLANNING = 'PLANNING', + EXECUTING = 'EXECUTING', + WAITING_USER_INPUT = 'WAITING_USER_INPUT', + WAITING_PROVIDER = 'WAITING_PROVIDER', + VERIFYING = 'VERIFYING', + REPLANNING = 'REPLANNING', + AWAITING_APPROVAL = 'AWAITING_APPROVAL', + PAUSED = 'PAUSED', + COMPLETED = 'COMPLETED', + FAILED = 'FAILED', + CANCELLED = 'CANCELLED', +} + +export const StepSchema = z.object({ + stepNumber: z.number().int().positive(), + description: z.string(), + expectedOutcome: z.string().optional(), + isHighRisk: z.boolean().default(false), + dependencies: z.array(z.number().int().positive()).default([]), + estimatedDurationMs: z.number().int().positive().optional(), +}); + +export type Step = z.infer; + +export const StepResultSchema = z.object({ + stepNumber: z.number().int().positive(), + status: z.enum(['PENDING', 'IN_PROGRESS', 'COMPLETED', 'FAILED', 'SKIPPED']), + startedAt: z.string().datetime().optional(), + completedAt: z.string().datetime().optional(), + actualOutcome: z.string().optional(), + error: z.string().optional(), + retryCount: z.number().int().nonnegative().default(0), + artifacts: z.array(z.string()).default([]), +}); + +export type StepResult = z.infer; + +export const GoalProgressSchema = z.object({ + goalRunId: z.string(), + phase: z.nativeEnum(GoalRunPhase), + currentStep: z.number().int().nonnegative(), + totalSteps: z.number().int().nonnegative(), + completedSteps: z.number().int().nonnegative(), + failedSteps: z.number().int().nonnegative(), + percentComplete: z.number().min(0).max(100), + startedAt: z.string().datetime(), + lastUpdatedAt: z.string().datetime(), + estimatedCompletionAt: z.string().datetime().optional(), + isPaused: z.boolean().default(false), + isAwaitingApproval: z.boolean().default(false), + isWaitingUserInput: z.boolean().default(false), + isWaitingProvider: z.boolean().default(false), +}); + +export type GoalProgress = z.infer; + +// ============================================================================ +// Checkpoint Types (Manus-style persistence) +// ============================================================================ + +export const GoalCheckpointSchema = z.object({ + goalRunId: z.string(), + version: z.number().int().positive(), + checkpointedAt: z.string().datetime(), + phase: z.nativeEnum(GoalRunPhase), + + progressSummary: z.object({ + totalSteps: z.number().int().nonnegative(), + completedSteps: z.number().int().nonnegative(), + failedSteps: z.number().int().nonnegative(), + percentComplete: z.number().min(0).max(100), + }), + + completedWork: z.array(z.object({ + stepNumber: z.number().int().positive(), + description: z.string(), + outcome: z.string(), + completedAt: z.string().datetime(), + })), + + currentContext: z.object({ + lastSuccessfulStep: z.string().optional(), + lastSuccessfulOutcome: z.string().optional(), + currentStep: z.string().optional(), + failureReason: z.string().optional(), // "Leave wrong turns in context" - Manus pattern + accumulatedKnowledge: z.array(z.string()), + }), + + remainingSteps: z.array(StepSchema), + + plan: z.object({ + originalPlan: z.string(), + currentPlan: z.string(), + replanCount: z.number().int().nonnegative(), + }).optional(), +}); + +export type GoalCheckpoint = z.infer; + +// ============================================================================ +// Signal Payload Types +// ============================================================================ + +export const ApproveStepPayloadSchema = z.object({ + stepId: z.string(), + approver: z.string(), + comment: z.string().optional(), + approvedAt: z.string().datetime().optional(), +}); + +export type ApproveStepPayload = z.infer; + +export const RejectStepPayloadSchema = z.object({ + stepId: z.string(), + reason: z.string(), + rejector: z.string().optional(), + rejectedAt: z.string().datetime().optional(), +}); + +export type RejectStepPayload = z.infer; + +export const CancelGoalPayloadSchema = z.object({ + reason: z.string(), + cancelledBy: z.string().optional(), + cancelledAt: z.string().datetime().optional(), +}); + +export type CancelGoalPayload = z.infer; + +export const SteerPayloadSchema = z.object({ + instruction: z.string(), + priority: z.enum(['LOW', 'NORMAL', 'HIGH', 'URGENT']).default('NORMAL'), + addToContext: z.boolean().default(true), +}); + +export type SteerPayload = z.infer; + +// ============================================================================ +// Activity Input/Output Types +// ============================================================================ + +export const PlanGoalInputSchema = z.object({ + goalRunId: z.string(), + tenantId: z.string(), + goalDescription: z.string(), + previousFailures: z.array(z.object({ + stepNumber: z.number().int(), + error: z.string(), + })).default([]), + accumulatedKnowledge: z.array(z.string()).default([]), + constraints: z.object({ + maxSteps: z.number().int().positive(), + }).optional(), +}); + +export type PlanGoalInput = z.infer; + +export const PlanGoalOutputPlanSchema = z.object({ + kind: z.literal('PLAN'), + steps: z.array(StepSchema), + planSummary: z.string(), + estimatedDurationMs: z.number().int().positive().optional(), + confidence: z.number().min(0).max(1).optional(), + /** Phase 13.3: Assessment of whether the task is achievable with desktop agent capabilities */ + capabilityAssessment: z.string().optional(), +}); + +export const PlanGoalOutputGoalIntakeSchema = z.object({ + kind: z.literal('GOAL_INTAKE_REQUIRED'), + promptId: z.string().min(1), + goalSpecId: z.string().min(1), + reason: z.string().min(1), +}); + +// Stark contract: planning may return a durable Goal Intake request instead of steps. +export const PlanGoalOutputSchema = z.union([ + PlanGoalOutputPlanSchema, + PlanGoalOutputGoalIntakeSchema, +]); + +export type PlanGoalOutput = z.infer; + +export const ExecuteStepInputSchema = z.object({ + goalRunId: z.string(), + tenantId: z.string(), + step: StepSchema, + workspaceId: z.string().optional(), + context: z.object({ + previousStepOutcome: z.string().optional(), + accumulatedKnowledge: z.array(z.string()), + }).optional(), +}); + +export type ExecuteStepInput = z.infer; + +export const ExecuteStepOutputSchema = z.object({ + success: z.boolean(), + outcome: z.string(), + artifacts: z.array(z.string()).default([]), + knowledgeGained: z.array(z.string()).default([]), + needsApproval: z.boolean().default(false), + waitingForUserInput: z.boolean().default(false), + waitingForProvider: z.boolean().default(false), + error: z.string().optional(), +}); + +export type ExecuteStepOutput = z.infer; + +export const VerifyStepInputSchema = z.object({ + goalRunId: z.string(), + tenantId: z.string(), + step: StepSchema, + executionResult: ExecuteStepOutputSchema, +}); + +export type VerifyStepInput = z.infer; + +export const VerifyStepOutputSchema = z.object({ + verified: z.boolean(), + verificationDetails: z.string(), + suggestReplan: z.boolean().default(false), + replanReason: z.string().optional(), +}); + +export type VerifyStepOutput = z.infer; + +// ============================================================================ +// Kafka Event Types +// ============================================================================ + +export const GoalEventSchema = z.object({ + eventId: z.string().uuid(), + eventType: z.enum([ + 'GOAL_STARTED', + 'GOAL_COMPLETED', + 'GOAL_FAILED', + 'GOAL_CANCELLED', + 'GOAL_PAUSED', + 'GOAL_RESUMED', + ]), + goalRunId: z.string(), + tenantId: z.string(), + timestamp: z.string().datetime(), + payload: z.record(z.unknown()), +}); + +export type GoalEvent = z.infer; + +export const StepEventSchema = z.object({ + eventId: z.string().uuid(), + eventType: z.enum([ + 'STEP_STARTED', + 'STEP_COMPLETED', + 'STEP_FAILED', + 'STEP_SKIPPED', + 'STEP_APPROVAL_REQUESTED', + 'STEP_APPROVED', + 'STEP_REJECTED', + ]), + goalRunId: z.string(), + tenantId: z.string(), + stepNumber: z.number().int(), + timestamp: z.string().datetime(), + payload: z.record(z.unknown()), +}); + +export type StepEvent = z.infer; + +export const AuditEventSchema = z.object({ + eventId: z.string().uuid(), + eventType: z.string(), + goalRunId: z.string(), + tenantId: z.string(), + userId: z.string().optional(), + timestamp: z.string().datetime(), + action: z.string(), + details: z.record(z.unknown()), + metadata: z.object({ + workflowId: z.string().optional(), + runId: z.string().optional(), + activityId: z.string().optional(), + }).optional(), +}); + +export type AuditEvent = z.infer; + +// ============================================================================ +// Failure Classification Types (Google SRE pattern) +// ============================================================================ + +export enum FailureCategory { + TRANSIENT = 'TRANSIENT', + SEMANTIC = 'SEMANTIC', + PERMANENT = 'PERMANENT', +} + +export enum FailureType { + // Transient + HEARTBEAT_TIMEOUT = 'HEARTBEAT_TIMEOUT', + NETWORK_ERROR = 'NETWORK_ERROR', + SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE', + RATE_LIMITED = 'RATE_LIMITED', + AGENT_UNREACHABLE = 'AGENT_UNREACHABLE', + + // Semantic + STEP_FAILED = 'STEP_FAILED', + VALIDATION_ERROR = 'VALIDATION_ERROR', + ASSERTION_FAILED = 'ASSERTION_FAILED', + NEEDS_HELP = 'NEEDS_HELP', + WRONG_APPROACH = 'WRONG_APPROACH', + + // Permanent + RESOURCE_DELETED = 'RESOURCE_DELETED', + PERMISSION_DENIED = 'PERMISSION_DENIED', + BUDGET_EXHAUSTED = 'BUDGET_EXHAUSTED', + GOAL_CANCELLED = 'GOAL_CANCELLED', + FATAL_ERROR = 'FATAL_ERROR', +} + +export const FailureClassificationSchema = z.object({ + category: z.nativeEnum(FailureCategory), + type: z.nativeEnum(FailureType), + retryable: z.boolean(), + suggestedAction: z.enum(['RETRY', 'REPLAN', 'FAIL', 'ASK_HUMAN']), + maxRetries: z.number().int().nonnegative().optional(), + backoffMs: z.number().int().nonnegative().optional(), +}); + +export type FailureClassification = z.infer; diff --git a/packages/bytebot-temporal-worker/src/types/index.ts b/packages/bytebot-temporal-worker/src/types/index.ts new file mode 100644 index 000000000..c87e3586a --- /dev/null +++ b/packages/bytebot-temporal-worker/src/types/index.ts @@ -0,0 +1,7 @@ +/** + * Type Exports + * + * Export all types for the Temporal worker. + */ + +export * from './goal-run.types'; diff --git a/packages/bytebot-temporal-worker/src/utils/circuit-breaker.ts b/packages/bytebot-temporal-worker/src/utils/circuit-breaker.ts new file mode 100644 index 000000000..157ce0ab3 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/utils/circuit-breaker.ts @@ -0,0 +1,452 @@ +/** + * Circuit Breaker Utility - Phase 13 + * + * Provides resilient HTTP calls with circuit breaker pattern using opossum. + * Follows industry best practices for protecting downstream services. + * + * Phase 13 Improvements: + * - EPERM error detection and automatic retry for Cilium identity sync issues + * - Improved circuit breaker recovery timing based on industry research + * - Better half-open state handling with gradual recovery + * - Enhanced metrics for circuit breaker state monitoring + * + * Features: + * - Circuit breaker with configurable thresholds + * - Automatic fallback handling + * - Prometheus metrics integration + * - Heartbeat support for long-running operations + * - EPERM/network transient error retry + * + * @see https://github.com/nodeshift/opossum + * @see https://martinfowler.com/bliki/CircuitBreaker.html + */ + +import CircuitBreaker from 'opossum'; +import axios, { AxiosRequestConfig, AxiosResponse, AxiosError } from 'axios'; +import { Context } from '@temporalio/activity'; +import { getMetricsService } from '../metrics'; + +// ============================================================================ +// Configuration +// ============================================================================ + +export interface CircuitBreakerConfig { + /** Time in ms before a call times out (default: 120000 = 2 min) */ + timeout: number; + /** Percentage of failures that trips the circuit (default: 50) */ + errorThresholdPercentage: number; + /** Time in ms to wait before testing circuit again (default: 30000 = 30s) */ + resetTimeout: number; + /** Name for metrics and logging */ + name: string; + /** Volume threshold before circuit trips (default: 5) */ + volumeThreshold?: number; + /** Whether to enable heartbeats during requests (default: true) */ + enableHeartbeats?: boolean; + /** Heartbeat interval in ms (default: 30000 = 30s) */ + heartbeatIntervalMs?: number; +} + +const DEFAULT_CONFIG: Partial = { + timeout: 120000, + errorThresholdPercentage: 50, + resetTimeout: 30000, + volumeThreshold: 5, + enableHeartbeats: true, + heartbeatIntervalMs: 30000, +}; + +// ============================================================================ +// Phase 13: EPERM Retry Configuration +// ============================================================================ + +/** + * Configuration for retrying requests that fail due to transient network issues + * like Cilium identity synchronization (EPERM errors). + */ +export interface TransientRetryConfig { + /** Maximum number of retries for transient errors (default: 3) */ + maxRetries: number; + /** Base delay between retries in ms (default: 200) */ + baseDelayMs: number; + /** Maximum delay between retries in ms (default: 2000) */ + maxDelayMs: number; + /** Whether to use exponential backoff (default: true) */ + exponentialBackoff: boolean; +} + +const DEFAULT_TRANSIENT_RETRY_CONFIG: TransientRetryConfig = { + maxRetries: 3, + baseDelayMs: 200, + maxDelayMs: 2000, + exponentialBackoff: true, +}; + +/** + * Checks if an error is a transient network error that should be retried. + * These errors typically occur during Cilium identity synchronization. + * + * @param error - The error to check + * @returns true if the error is transient and should be retried + */ +function isTransientNetworkError(error: unknown): boolean { + if (!error) return false; + + // Check for Axios error + if (axios.isAxiosError(error)) { + const axiosError = error as AxiosError; + + // EPERM - Operation not permitted (Cilium identity not synced) + if (axiosError.code === 'EPERM' || axiosError.message?.includes('EPERM')) { + console.warn('[CircuitBreaker] Detected EPERM error - likely Cilium identity sync issue'); + return true; + } + + // ECONNREFUSED during identity sync can also occur + if (axiosError.code === 'ECONNREFUSED') { + console.warn('[CircuitBreaker] Detected ECONNREFUSED - may be transient'); + return true; + } + + // ETIMEDOUT can be transient + if (axiosError.code === 'ETIMEDOUT' || axiosError.code === 'ECONNRESET') { + console.warn(`[CircuitBreaker] Detected ${axiosError.code} - may be transient`); + return true; + } + + // Check for network errors in the message + const transientPatterns = [ + 'Operation not permitted', + 'EPERM', + 'network', + 'ENOTFOUND', + 'EAI_AGAIN', + ]; + for (const pattern of transientPatterns) { + if (axiosError.message?.includes(pattern)) { + return true; + } + } + } + + // Check error message for common transient patterns + if (error instanceof Error) { + const message = error.message.toLowerCase(); + if ( + message.includes('eperm') || + message.includes('operation not permitted') || + message.includes('econnrefused') || + message.includes('econnreset') + ) { + return true; + } + } + + return false; +} + +/** + * Sleep for a specified duration + */ +function sleep(ms: number): Promise { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +/** + * Calculate delay with exponential backoff + */ +function calculateBackoffDelay( + attempt: number, + config: TransientRetryConfig +): number { + if (!config.exponentialBackoff) { + return config.baseDelayMs; + } + const delay = config.baseDelayMs * Math.pow(2, attempt); + // Add jitter (±25%) + const jitter = delay * 0.25 * (Math.random() * 2 - 1); + return Math.min(delay + jitter, config.maxDelayMs); +} + +// ============================================================================ +// Circuit Breaker Factory +// ============================================================================ + +const circuitBreakers = new Map>(); + +/** + * Creates or retrieves a circuit breaker for the given configuration. + * Circuit breakers are cached by name for reuse. + */ +export function getCircuitBreaker( + config: CircuitBreakerConfig +): CircuitBreaker<[AxiosRequestConfig], AxiosResponse> { + const fullConfig = { ...DEFAULT_CONFIG, ...config }; + + if (circuitBreakers.has(config.name)) { + return circuitBreakers.get(config.name)!; + } + + const breaker = new CircuitBreaker( + async (axiosConfig: AxiosRequestConfig): Promise => { + return axios(axiosConfig); + }, + { + timeout: fullConfig.timeout, + errorThresholdPercentage: fullConfig.errorThresholdPercentage, + resetTimeout: fullConfig.resetTimeout, + volumeThreshold: fullConfig.volumeThreshold, + name: config.name, + } + ); + + // Register event handlers for observability + const metricsService = getMetricsService(); + + breaker.on('success', () => { + metricsService?.recordActivityExecution(`circuit_${config.name}`, 'success'); + }); + + breaker.on('failure', () => { + metricsService?.recordActivityExecution(`circuit_${config.name}`, 'failure'); + }); + + breaker.on('timeout', () => { + metricsService?.recordActivityExecution(`circuit_${config.name}`, 'timeout'); + }); + + breaker.on('open', () => { + console.warn(`[CircuitBreaker] ${config.name} circuit OPENED (failures exceeded threshold)`); + }); + + breaker.on('halfOpen', () => { + console.info(`[CircuitBreaker] ${config.name} circuit HALF-OPEN (testing recovery)`); + }); + + breaker.on('close', () => { + console.info(`[CircuitBreaker] ${config.name} circuit CLOSED (recovered)`); + }); + + breaker.on('fallback', () => { + metricsService?.recordActivityExecution(`circuit_${config.name}`, 'fallback'); + }); + + circuitBreakers.set(config.name, breaker); + return breaker; +} + +// ============================================================================ +// Resilient HTTP Client +// ============================================================================ + +/** + * Makes an HTTP request with circuit breaker protection, heartbeat support, + * and automatic retry for transient network errors (like EPERM from Cilium). + * + * Phase 13: Added EPERM retry logic to handle Cilium identity sync issues. + * + * @param axiosConfig - Axios request configuration + * @param breakerConfig - Circuit breaker configuration + * @param fallback - Optional fallback function if circuit is open or request fails + * @param retryConfig - Optional configuration for transient error retry + * @returns Axios response + */ +export async function resilientRequest( + axiosConfig: AxiosRequestConfig, + breakerConfig: CircuitBreakerConfig, + fallback?: () => Promise, + retryConfig?: Partial +): Promise> { + const fullConfig = { ...DEFAULT_CONFIG, ...breakerConfig }; + const fullRetryConfig = { ...DEFAULT_TRANSIENT_RETRY_CONFIG, ...retryConfig }; + const breaker = getCircuitBreaker(breakerConfig); + const metricsService = getMetricsService(); + + // Set up heartbeat interval if enabled and in activity context + let heartbeatInterval: NodeJS.Timeout | undefined; + let heartbeatCount = 0; + + if (fullConfig.enableHeartbeats) { + try { + const context = Context.current(); + heartbeatInterval = setInterval(() => { + heartbeatCount++; + context.heartbeat({ + operation: breakerConfig.name, + status: 'in_progress', + heartbeatCount, + timestamp: new Date().toISOString(), + }); + }, fullConfig.heartbeatIntervalMs); + } catch { + // Not in activity context, skip heartbeats + } + } + + // Configure fallback if provided + if (fallback) { + breaker.fallback(async () => { + const result = await fallback(); + return { data: result } as AxiosResponse; + }); + } + + try { + // Ensure axios config has timeout set + const configWithTimeout: AxiosRequestConfig = { + ...axiosConfig, + timeout: axiosConfig.timeout ?? fullConfig.timeout, + }; + + // Phase 13: Implement retry logic for transient errors + let lastError: unknown; + for (let attempt = 0; attempt <= fullRetryConfig.maxRetries; attempt++) { + try { + const response = await breaker.fire(configWithTimeout); + + // If we retried and succeeded, log it + if (attempt > 0) { + console.info( + `[CircuitBreaker] ${breakerConfig.name} succeeded after ${attempt} retries` + ); + metricsService?.recordActivityExecution( + `circuit_${breakerConfig.name}_retry_success`, + 'success' + ); + } + + return response as AxiosResponse; + } catch (error) { + lastError = error; + + // Check if this is a transient error that should be retried + if (isTransientNetworkError(error) && attempt < fullRetryConfig.maxRetries) { + const delay = calculateBackoffDelay(attempt, fullRetryConfig); + console.warn( + `[CircuitBreaker] ${breakerConfig.name} transient error (attempt ${attempt + 1}/${fullRetryConfig.maxRetries + 1}), ` + + `retrying in ${delay}ms: ${error instanceof Error ? error.message : String(error)}` + ); + + metricsService?.recordActivityExecution( + `circuit_${breakerConfig.name}_transient_retry`, + 'fallback' + ); + + await sleep(delay); + continue; + } + + // Not a transient error or max retries exceeded, rethrow + throw error; + } + } + + // Should not reach here, but just in case + throw lastError; + } finally { + if (heartbeatInterval) { + clearInterval(heartbeatInterval); + } + } +} + +// ============================================================================ +// Pre-configured Circuit Breakers for Butler Vantage Services +// ============================================================================ + +/** + * Circuit breaker for LLM proxy calls (planning, model calls) + * + * Phase 13 improvements: + * - Increased resetTimeout to 120s for better recovery from transient issues + * - Increased volumeThreshold to 5 to avoid premature circuit opening + * - LLM services can have variable latency, so more tolerance is needed + */ +export const LLM_CIRCUIT_BREAKER_CONFIG: CircuitBreakerConfig = { + name: 'llm_proxy', + timeout: 180000, // 3 minutes for LLM calls (increased from 2 min for large models) + errorThresholdPercentage: 50, // Trip at 50% failure rate (more tolerant) + resetTimeout: 120000, // 2 minutes before retry (increased for better recovery) + volumeThreshold: 5, // Trip after 5 failures (increased from 3) + enableHeartbeats: true, + heartbeatIntervalMs: 30000, // Heartbeat every 30s +}; + +/** Circuit breaker for orchestrator calls (internal services) */ +export const ORCHESTRATOR_CIRCUIT_BREAKER_CONFIG: CircuitBreakerConfig = { + name: 'orchestrator', + timeout: 30000, // 30 seconds for internal calls + errorThresholdPercentage: 50, + resetTimeout: 30000, + volumeThreshold: 5, + enableHeartbeats: false, // Internal calls are fast +}; + +/** Circuit breaker for task controller calls */ +export const TASK_CONTROLLER_CIRCUIT_BREAKER_CONFIG: CircuitBreakerConfig = { + name: 'task_controller', + timeout: 30000, + errorThresholdPercentage: 50, + resetTimeout: 30000, + volumeThreshold: 5, + enableHeartbeats: false, +}; + +// ============================================================================ +// Utility Functions +// ============================================================================ + +/** + * Gets the current state of all circuit breakers. + * Useful for health checks and monitoring. + */ +export function getCircuitBreakerStates(): Record { + const states: Record = {}; + + for (const [name, breaker] of circuitBreakers) { + let state: 'CLOSED' | 'OPEN' | 'HALF_OPEN' = 'CLOSED'; + if (breaker.opened) { + state = 'OPEN'; + } else if (breaker.halfOpen) { + state = 'HALF_OPEN'; + } + + states[name] = { + state, + stats: { + failures: breaker.stats.failures, + successes: breaker.stats.successes, + timeouts: breaker.stats.timeouts, + fallbacks: breaker.stats.fallbacks, + }, + }; + } + + return states; +} + +/** + * Resets a specific circuit breaker (useful for testing/recovery). + */ +export function resetCircuitBreaker(name: string): void { + const breaker = circuitBreakers.get(name); + if (breaker) { + breaker.close(); + console.info(`[CircuitBreaker] ${name} manually reset to CLOSED`); + } +} + +/** + * Clears all circuit breaker instances (useful for testing). + */ +export function clearAllCircuitBreakers(): void { + circuitBreakers.clear(); +} diff --git a/packages/bytebot-temporal-worker/src/utils/index.ts b/packages/bytebot-temporal-worker/src/utils/index.ts new file mode 100644 index 000000000..c6953e256 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/utils/index.ts @@ -0,0 +1,5 @@ +/** + * Utility Exports - Phase 11 + */ + +export * from './circuit-breaker'; diff --git a/packages/bytebot-temporal-worker/src/worker.ts b/packages/bytebot-temporal-worker/src/worker.ts new file mode 100644 index 000000000..eede02446 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/worker.ts @@ -0,0 +1,264 @@ +/** + * Temporal Worker Entry Point + * + * This is the main entry point for the Temporal worker process. + * It creates and runs a worker that executes Butler Vantage workflows. + * + * Production features: + * - HTTP server starts FIRST for health check endpoints + * - Graceful shutdown handling + * - Prometheus metrics export + * - Connection retry with backoff + * - Health status updates throughout startup + */ + +import { NestFactory, HttpAdapterHost } from '@nestjs/core'; +import { INestApplication } from '@nestjs/common'; +import { Worker, bundleWorkflowCode, NativeConnection } from '@temporalio/worker'; +import { AppModule } from './app.module'; +import { HealthController } from './health/health.controller'; +import { + createTemporalConnection, + closeTemporalConnection, + getTemporalConfig, +} from './config/temporal.config'; + +// Phase 10.1: Metrics service for enhanced observability +import { MetricsService, setMetricsServiceInstance } from './metrics'; + +// Import activities +import * as planningActivities from './activities/planning.activities'; +import * as executionActivities from './activities/execution.activities'; +import * as kafkaActivities from './activities/kafka.activities'; + +// ============================================================================ +// Worker State +// ============================================================================ + +let worker: Worker | null = null; +let nestApp: INestApplication | null = null; +let healthController: HealthController | null = null; +let isShuttingDown = false; + +// ============================================================================ +// Main Entry Point +// ============================================================================ + +async function main(): Promise { + const config = getTemporalConfig(); + + console.log('Starting Butler Vantage Temporal Worker...'); + console.log(` Address: ${config.address}`); + console.log(` Namespace: ${config.namespace}`); + console.log(` Task Queue: ${config.taskQueue}`); + console.log(` Max Concurrent Activities: ${config.maxConcurrentActivityTaskExecutions}`); + console.log(` Max Concurrent Workflows: ${config.maxConcurrentWorkflowTaskExecutions}`); + + // ========================================================================= + // STEP 1: Start HTTP server FIRST for health checks + // This ensures Kubernetes startup probes succeed immediately + // ========================================================================= + console.log('Starting HTTP health server...'); + const httpPort = parseInt(process.env.HTTP_PORT ?? '3000', 10); + + nestApp = await NestFactory.create(AppModule, { + logger: ['error', 'warn', 'log'], + }); + + await nestApp.listen(httpPort); + console.log(`HTTP health server running on port ${httpPort}`); + + // Get reference to health controller to update state + healthController = nestApp.get(HealthController); + + // Phase 10.1: Initialize metrics service for activities + try { + const metricsService = nestApp.get(MetricsService); + setMetricsServiceInstance(metricsService); + console.log('Metrics service initialized for activities'); + } catch (e) { + console.warn('MetricsService not available, continuing without activity metrics'); + } + + // ========================================================================= + // STEP 2: Connect to Temporal with retry + // Health endpoints are now available during this phase + // ========================================================================= + let connection: NativeConnection | undefined; + let retryCount = 0; + const maxRetries = 10; + const baseDelay = 1000; + + while (retryCount < maxRetries) { + try { + connection = await createTemporalConnection(); + console.log('Connected to Temporal server'); + + // Update health controller with connection + healthController!.getTemporalIndicator().setConnection(connection); + break; + } catch (error) { + retryCount++; + const delay = Math.min(baseDelay * Math.pow(2, retryCount), 30000); + console.error( + `Failed to connect to Temporal (attempt ${retryCount}/${maxRetries}):`, + error instanceof Error ? error.message : error + ); + + if (retryCount >= maxRetries) { + console.error('Max retries exceeded, exiting'); + process.exit(1); + } + + console.log(`Retrying in ${delay}ms...`); + await new Promise((resolve) => setTimeout(resolve, delay)); + } + } + + if (!connection) { + console.error('Failed to establish connection'); + process.exit(1); + } + + // ========================================================================= + // STEP 3: Bundle workflow code + // ========================================================================= + console.log('Bundling workflow code...'); + const workflowBundle = await bundleWorkflowCode({ + workflowsPath: require.resolve('./workflows/goal-run.workflow'), + }); + + // ========================================================================= + // STEP 4: Create and start worker + // ========================================================================= + worker = await Worker.create({ + connection, + namespace: config.namespace, + taskQueue: config.taskQueue, + workflowBundle, + + // Register activities + activities: { + ...planningActivities, + ...executionActivities, + ...kafkaActivities, + }, + + // Performance tuning + maxConcurrentActivityTaskExecutions: config.maxConcurrentActivityTaskExecutions, + maxConcurrentWorkflowTaskExecutions: config.maxConcurrentWorkflowTaskExecutions, + maxConcurrentLocalActivityExecutions: config.maxConcurrentLocalActivityExecutions, + maxCachedWorkflows: config.maxCachedWorkflows, + + // Phase 10.4: Rate Limiting Configuration + // Per-worker rate limit to protect downstream services + maxActivitiesPerSecond: config.maxActivitiesPerSecond, + // Task queue global rate limit (server-side) + maxTaskQueueActivitiesPerSecond: config.maxTaskQueueActivitiesPerSecond, + // Poller concurrency (controls task fetching parallelism) + maxConcurrentActivityTaskPolls: config.maxConcurrentActivityTaskPolls, + maxConcurrentWorkflowTaskPolls: config.maxConcurrentWorkflowTaskPolls, + + // Graceful shutdown + shutdownGraceTime: '30s', + + // Enable sticky execution for better performance + enableSDKTracing: true, + stickyQueueScheduleToStartTimeout: '10s', + }); + + console.log('Worker created, starting...'); + + // Update health controller - worker is now running + healthController!.getTemporalIndicator().setWorkerRunning(true); + healthController!.setReady(true); + console.log('Worker is now READY - health checks will pass'); + + // Setup graceful shutdown + setupShutdownHandlers(); + + // Run worker (blocks until shutdown) + try { + await worker.run(); + } catch (error) { + if (!isShuttingDown) { + console.error('Worker error:', error); + process.exit(1); + } + } + + console.log('Worker stopped'); +} + +// ============================================================================ +// Graceful Shutdown +// ============================================================================ + +function setupShutdownHandlers(): void { + const shutdown = async (signal: string) => { + if (isShuttingDown) { + console.log('Shutdown already in progress...'); + return; + } + + isShuttingDown = true; + console.log(`Received ${signal}, initiating graceful shutdown...`); + + // Update health status to not ready + if (healthController) { + healthController.setReady(false); + healthController.getTemporalIndicator().setWorkerRunning(false); + } + + try { + // Stop accepting new tasks + if (worker) { + console.log('Stopping worker...'); + worker.shutdown(); + } + + // Wait for in-flight tasks to complete (up to 30s) + await new Promise((resolve) => setTimeout(resolve, 5000)); + + // Close Temporal connection + console.log('Closing Temporal connection...'); + await closeTemporalConnection(); + + // Close NestJS application + if (nestApp) { + console.log('Closing HTTP server...'); + await nestApp.close(); + } + + console.log('Graceful shutdown complete'); + process.exit(0); + } catch (error) { + console.error('Error during shutdown:', error); + process.exit(1); + } + }; + + process.on('SIGTERM', () => shutdown('SIGTERM')); + process.on('SIGINT', () => shutdown('SIGINT')); + process.on('SIGHUP', () => shutdown('SIGHUP')); + + // Handle uncaught errors + process.on('uncaughtException', (error) => { + console.error('Uncaught exception:', error); + shutdown('uncaughtException'); + }); + + process.on('unhandledRejection', (reason) => { + console.error('Unhandled rejection:', reason); + shutdown('unhandledRejection'); + }); +} + +// ============================================================================ +// Run +// ============================================================================ + +main().catch((error) => { + console.error('Fatal error:', error); + process.exit(1); +}); diff --git a/packages/bytebot-temporal-worker/src/workflows/goal-run.workflow.ts b/packages/bytebot-temporal-worker/src/workflows/goal-run.workflow.ts new file mode 100644 index 000000000..f4a5ca638 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/workflows/goal-run.workflow.ts @@ -0,0 +1,1153 @@ +/** + * GoalRunWorkflow - Durable Workflow for ByteBot Goal Execution + * + * Industry-standard patterns: + * - OpenAI: Exponential backoff, background execution + * - Anthropic: Git-style checkpointing, context engineering + * - Manus AI: "Leave wrong turns in context", todo.md persistence + * - Google SRE: Failure classification, retry budgets + * + * Key Features: + * - Durable PEVR cycle (Plan → Execute → Verify → Replan) + * - Automatic retries with exponential backoff + * - Signal-based human-in-the-loop (approve, reject, steer) + * - Query-based progress inspection + * - Kafka event emission for observability + */ + +import { + proxyActivities, + defineSignal, + defineQuery, + defineUpdate, + setHandler, + condition, + sleep, + workflowInfo, + patched, + CancellationScope, + isCancellation, + ApplicationFailure, + upsertSearchAttributes, +} from '@temporalio/workflow'; + +// Phase 10.2: Import search attribute configuration +import { + SEARCH_ATTRIBUTES, + createPhaseUpdate, + createPlanSearchAttributes, + createApprovalStateUpdate, + createErrorSearchAttributes, +} from '../config/search-attributes.config'; + +// Phase 10.5: Import workflow versioning +import { WORKFLOW_PATCHES, WORKFLOW_VERSION } from '../config/workflow-versions'; + +import type { PlanningActivities } from '../activities/planning.activities'; +import type { ExecutionActivities } from '../activities/execution.activities'; +import type { KafkaActivities } from '../activities/kafka.activities'; + +import type { + GoalRunInput, + GoalRunResult, + GoalProgress, + GoalCheckpoint, + Step, + StepResult, + ApproveStepPayload, + RejectStepPayload, + CancelGoalPayload, + SteerPayload, + GoalRunPhase, +} from '../types/goal-run.types'; + +// ============================================================================ +// Activity Proxies with Retry Policies +// ============================================================================ + +const planningActivities = proxyActivities({ + startToCloseTimeout: '5m', + retry: { + initialInterval: '1s', + backoffCoefficient: 2, + maximumInterval: '30s', + maximumAttempts: 5, + }, +}); + +const executionActivities = proxyActivities({ + startToCloseTimeout: '10m', + heartbeatTimeout: '30s', // Activity must heartbeat every 30s + retry: { + initialInterval: '2s', + backoffCoefficient: 2, + maximumInterval: '60s', + maximumAttempts: 3, + }, +}); + +const kafkaActivities = proxyActivities({ + startToCloseTimeout: '10s', + retry: { + initialInterval: '500ms', + backoffCoefficient: 2, + maximumInterval: '5s', + maximumAttempts: 3, + }, +}); + +// ============================================================================ +// Signal Definitions +// ============================================================================ + +export const approveStepSignal = defineSignal<[ApproveStepPayload]>('approveStep'); +export const rejectStepSignal = defineSignal<[RejectStepPayload]>('rejectStep'); +export const pauseGoalSignal = defineSignal('pauseGoal'); +export const resumeGoalSignal = defineSignal('resumeGoal'); +export const cancelGoalSignal = defineSignal<[CancelGoalPayload]>('cancelGoal'); +export const steerSignal = defineSignal<[SteerPayload]>('steer'); +export const userPromptResolvedSignal = defineSignal<[UserPromptResolvedPayload]>('userPromptResolved'); + +export interface UserPromptResolvedPayload { + promptId: string; + answers: Record; +} + +// ============================================================================ +// Query Definitions +// ============================================================================ + +export const getProgressQuery = defineQuery('getProgress'); +export const getCurrentStepQuery = defineQuery('getCurrentStep'); +export const getCheckpointQuery = defineQuery('getCheckpoint'); +export const getStepResultsQuery = defineQuery('getStepResults'); + +// ============================================================================ +// Update Definitions - Phase 10.3: Synchronous Operations +// ============================================================================ + +/** + * Add context/knowledge to the workflow. + * Returns the updated list of accumulated knowledge. + * Unlike signals, updates provide synchronous confirmation. + */ +export interface AddKnowledgePayload { + knowledge: string[]; + source?: string; +} +export const addKnowledgeUpdate = defineUpdate('addKnowledge'); + +/** + * Modify step priority/order during execution. + * Returns true if modification was accepted. + */ +export interface ModifyStepsPayload { + skipSteps?: number[]; // Step numbers to skip + prioritizeStep?: number; // Move this step to next + addSteps?: Array<{ description: string; insertAfter: number }>; +} +export interface ModifyStepsResult { + accepted: boolean; + message: string; + newStepOrder?: number[]; +} +export const modifyStepsUpdate = defineUpdate('modifySteps'); + +/** + * Force immediate checkpoint/snapshot of workflow state. + * Returns the checkpoint data synchronously. + */ +export const forceCheckpointUpdate = defineUpdate('forceCheckpoint'); + +/** + * Request workflow to pause at next safe point. + * Returns current phase and expected pause point. + */ +export interface PauseRequestResult { + willPauseAt: string; + currentPhase: GoalRunPhase; + acknowledgment: string; +} +export const requestPauseUpdate = defineUpdate('requestPause'); + +/** + * Stark Fix: Resume from an External Input Request (prompt) with synchronous confirmation. + * The orchestrator uses this Update (with a stable updateId) to achieve replay-safe, idempotent resume. + */ +export interface UserPromptResolvedResult { + accepted: boolean; + applied: boolean; + promptId?: string; + message?: string; +} +export const userPromptResolvedUpdate = + defineUpdate('userPromptResolved'); + +// ============================================================================ +// Workflow State (Durable - survives crashes) +// ============================================================================ + +interface WorkflowState { + phase: GoalRunPhase; + steps: Step[]; + stepResults: StepResult[]; + currentStepIndex: number; + isPaused: boolean; + isCancelled: boolean; + cancelReason: string | null; + isAwaitingApproval: boolean; + approvalStepId: string | null; + approvalResult: 'APPROVED' | 'REJECTED' | null; + rejectionReason: string | null; + isWaitingUserInput: boolean; + waitingUserInputForStep: number | null; + waitingUserInputReason: 'GOAL_INTAKE' | 'STEP_INPUT' | null; + isWaitingProvider: boolean; + waitingProviderForStep: number | null; + waitingProviderReason: string | null; + providerWaitCount: number; + lastUserPromptId: string | null; + pendingUserPrompts: Array<{ promptId: string; answers: Record }>; + accumulatedKnowledge: string[]; + failureHistory: Array<{ stepNumber: number; error: string }>; + replanCount: number; + startedAt: string; + lastUpdatedAt: string; + steeringInstructions: SteerPayload[]; +} + +// ============================================================================ +// Main Workflow +// ============================================================================ + +export async function goalRunWorkflow(input: GoalRunInput): Promise { + const { workflowId, runId } = workflowInfo(); + const constraints = input.constraints ?? { + maxSteps: 50, + maxRetries: 3, + maxReplans: 3, + timeoutMs: 3600000, + requireApprovalForHighRisk: true, + }; + + // Initialize workflow state + const state: WorkflowState = { + phase: 'INITIALIZING' as GoalRunPhase, + steps: [], + stepResults: [], + currentStepIndex: 0, + isPaused: false, + isCancelled: false, + cancelReason: null, + isAwaitingApproval: false, + approvalStepId: null, + approvalResult: null, + rejectionReason: null, + isWaitingUserInput: false, + waitingUserInputForStep: null, + waitingUserInputReason: null, + isWaitingProvider: false, + waitingProviderForStep: null, + waitingProviderReason: null, + providerWaitCount: 0, + lastUserPromptId: null, + pendingUserPrompts: [], + accumulatedKnowledge: input.context?.inheritedKnowledge ?? [], + failureHistory: [], + replanCount: 0, + startedAt: new Date().toISOString(), + lastUpdatedAt: new Date().toISOString(), + steeringInstructions: [], + }; + + // ============================================================================ + // Signal Handlers + // ============================================================================ + + setHandler(approveStepSignal, (payload: ApproveStepPayload) => { + if (state.isAwaitingApproval && state.approvalStepId === payload.stepId) { + state.approvalResult = 'APPROVED'; + state.isAwaitingApproval = false; + state.lastUpdatedAt = new Date().toISOString(); + } + }); + + setHandler(rejectStepSignal, (payload: RejectStepPayload) => { + if (state.isAwaitingApproval && state.approvalStepId === payload.stepId) { + state.approvalResult = 'REJECTED'; + state.rejectionReason = payload.reason; + state.isAwaitingApproval = false; + state.lastUpdatedAt = new Date().toISOString(); + } + }); + + setHandler(pauseGoalSignal, () => { + state.isPaused = true; + state.lastUpdatedAt = new Date().toISOString(); + }); + + setHandler(resumeGoalSignal, () => { + state.isPaused = false; + state.lastUpdatedAt = new Date().toISOString(); + }); + + setHandler(cancelGoalSignal, (payload: CancelGoalPayload) => { + state.isCancelled = true; + state.cancelReason = payload.reason; + state.phase = 'CANCELLED' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + }); + + setHandler(steerSignal, (payload: SteerPayload) => { + state.steeringInstructions.push(payload); + if (payload.addToContext) { + state.accumulatedKnowledge.push(`[STEERING] ${payload.instruction}`); + } + state.lastUpdatedAt = new Date().toISOString(); + }); + + setHandler(userPromptResolvedSignal, (payload: UserPromptResolvedPayload) => { + if (payload.promptId && payload.promptId === state.lastUserPromptId) { + return; + } + + state.lastUserPromptId = payload.promptId; + state.pendingUserPrompts.push({ promptId: payload.promptId, answers: payload.answers }); + state.lastUpdatedAt = new Date().toISOString(); + }); + + // ============================================================================ + // Query Handlers + // ============================================================================ + + setHandler(getProgressQuery, (): GoalProgress => ({ + goalRunId: input.goalRunId, + phase: state.phase, + currentStep: state.currentStepIndex, + totalSteps: state.steps.length, + completedSteps: state.stepResults.filter((r) => r.status === 'COMPLETED').length, + failedSteps: state.stepResults.filter((r) => r.status === 'FAILED').length, + percentComplete: state.steps.length > 0 + ? Math.round((state.stepResults.filter((r) => r.status === 'COMPLETED').length / state.steps.length) * 100) + : 0, + startedAt: state.startedAt, + lastUpdatedAt: state.lastUpdatedAt, + isPaused: state.isPaused, + isAwaitingApproval: state.isAwaitingApproval, + isWaitingUserInput: state.isWaitingUserInput || state.phase === ('WAITING_USER_INPUT' as GoalRunPhase), + isWaitingProvider: state.isWaitingProvider || state.phase === ('WAITING_PROVIDER' as GoalRunPhase), + })); + + setHandler(getCurrentStepQuery, (): Step | null => { + if (state.currentStepIndex >= 0 && state.currentStepIndex < state.steps.length) { + return state.steps[state.currentStepIndex]; + } + return null; + }); + + setHandler(getCheckpointQuery, (): GoalCheckpoint => ({ + goalRunId: input.goalRunId, + version: state.replanCount + 1, + checkpointedAt: state.lastUpdatedAt, + phase: state.phase, + progressSummary: { + totalSteps: state.steps.length, + completedSteps: state.stepResults.filter((r) => r.status === 'COMPLETED').length, + failedSteps: state.stepResults.filter((r) => r.status === 'FAILED').length, + percentComplete: state.steps.length > 0 + ? Math.round((state.stepResults.filter((r) => r.status === 'COMPLETED').length / state.steps.length) * 100) + : 0, + }, + completedWork: state.stepResults + .filter((r) => r.status === 'COMPLETED') + .map((r) => ({ + stepNumber: r.stepNumber, + description: state.steps.find((s) => s.stepNumber === r.stepNumber)?.description ?? '', + outcome: r.actualOutcome ?? '', + completedAt: r.completedAt ?? state.lastUpdatedAt, + })), + currentContext: { + lastSuccessfulStep: state.stepResults.filter((r) => r.status === 'COMPLETED').pop()?.actualOutcome, + currentStep: state.steps[state.currentStepIndex]?.description, + failureReason: state.failureHistory[state.failureHistory.length - 1]?.error, + accumulatedKnowledge: state.accumulatedKnowledge, + }, + remainingSteps: state.steps.slice(state.currentStepIndex), + })); + + setHandler(getStepResultsQuery, (): StepResult[] => state.stepResults); + + // ============================================================================ + // Update Handlers - Phase 10.3: Synchronous Operations + // ============================================================================ + + /** + * Resume from user prompt resolution with synchronous acknowledgment. + * Note: this complements the userPromptResolvedSignal; prefer the Update for reliable orchestration. + */ + setHandler(userPromptResolvedUpdate, (payload: UserPromptResolvedPayload): UserPromptResolvedResult => { + if (!payload?.promptId) { + return { accepted: false, applied: false, message: 'promptId is required' }; + } + + if (payload.promptId === state.lastUserPromptId) { + return { accepted: true, applied: false, promptId: payload.promptId, message: 'duplicate promptId (noop)' }; + } + + state.lastUserPromptId = payload.promptId; + state.pendingUserPrompts.push({ promptId: payload.promptId, answers: payload.answers }); + state.lastUpdatedAt = new Date().toISOString(); + + return { accepted: true, applied: true, promptId: payload.promptId }; + }); + + /** + * Add knowledge to the workflow context. + * Synchronous - caller waits for confirmation. + */ + setHandler(addKnowledgeUpdate, (payload: AddKnowledgePayload): string[] => { + const source = payload.source ?? 'external'; + const newKnowledge = payload.knowledge.map((k) => `[${source}] ${k}`); + state.accumulatedKnowledge.push(...newKnowledge); + state.lastUpdatedAt = new Date().toISOString(); + return state.accumulatedKnowledge; + }); + + /** + * Modify steps during execution. + * Validates the request and returns synchronous confirmation. + */ + setHandler( + modifyStepsUpdate, + (payload: ModifyStepsPayload): ModifyStepsResult => { + // Cannot modify if already completed or cancelled + if (state.phase === 'COMPLETED' || state.phase === 'CANCELLED' || state.phase === 'FAILED') { + return { + accepted: false, + message: `Cannot modify steps in ${state.phase} state`, + }; + } + + let modified = false; + const messages: string[] = []; + + // Skip steps + if (payload.skipSteps && payload.skipSteps.length > 0) { + for (const stepNum of payload.skipSteps) { + const stepIndex = state.steps.findIndex((s) => s.stepNumber === stepNum); + if (stepIndex !== -1 && stepIndex > state.currentStepIndex) { + const result = state.stepResults[stepIndex]; + if (result && result.status === 'PENDING') { + result.status = 'SKIPPED'; + result.completedAt = new Date().toISOString(); + result.error = 'Skipped by user request'; + modified = true; + messages.push(`Skipped step ${stepNum}`); + } + } + } + } + + // Prioritize a step (move it to be next) + if (payload.prioritizeStep !== undefined) { + const targetIndex = state.steps.findIndex((s) => s.stepNumber === payload.prioritizeStep); + if (targetIndex > state.currentStepIndex + 1) { + // Move the step to be next + const [step] = state.steps.splice(targetIndex, 1); + const [result] = state.stepResults.splice(targetIndex, 1); + state.steps.splice(state.currentStepIndex + 1, 0, step); + state.stepResults.splice(state.currentStepIndex + 1, 0, result); + modified = true; + messages.push(`Prioritized step ${payload.prioritizeStep} to run next`); + } + } + + // Add new steps + if (payload.addSteps && payload.addSteps.length > 0) { + for (const newStep of payload.addSteps) { + const insertIndex = state.steps.findIndex((s) => s.stepNumber === newStep.insertAfter); + if (insertIndex !== -1) { + const newStepNumber = Math.max(...state.steps.map((s) => s.stepNumber)) + 1; + const step: Step = { + stepNumber: newStepNumber, + description: newStep.description, + isHighRisk: false, + dependencies: [], + }; + const result: StepResult = { + stepNumber: newStepNumber, + status: 'PENDING', + retryCount: 0, + artifacts: [], + }; + state.steps.splice(insertIndex + 1, 0, step); + state.stepResults.splice(insertIndex + 1, 0, result); + modified = true; + messages.push(`Added new step ${newStepNumber} after step ${newStep.insertAfter}`); + } + } + } + + state.lastUpdatedAt = new Date().toISOString(); + + return { + accepted: modified, + message: modified ? messages.join('; ') : 'No modifications applied', + newStepOrder: state.steps.map((s) => s.stepNumber), + }; + }, + { + // Validator: Check if modification is safe + validator: (payload: ModifyStepsPayload): void => { + if (payload.skipSteps?.some((n) => n < 1)) { + throw new Error('Invalid step number: must be >= 1'); + } + if (payload.prioritizeStep !== undefined && payload.prioritizeStep < 1) { + throw new Error('Invalid prioritize step number'); + } + }, + } + ); + + /** + * Force an immediate checkpoint. + * Returns full checkpoint data synchronously. + */ + setHandler(forceCheckpointUpdate, (): GoalCheckpoint => { + state.lastUpdatedAt = new Date().toISOString(); + return { + goalRunId: input.goalRunId, + version: state.replanCount + 1, + checkpointedAt: state.lastUpdatedAt, + phase: state.phase, + progressSummary: { + totalSteps: state.steps.length, + completedSteps: state.stepResults.filter((r) => r.status === 'COMPLETED').length, + failedSteps: state.stepResults.filter((r) => r.status === 'FAILED').length, + percentComplete: state.steps.length > 0 + ? Math.round((state.stepResults.filter((r) => r.status === 'COMPLETED').length / state.steps.length) * 100) + : 0, + }, + completedWork: state.stepResults + .filter((r) => r.status === 'COMPLETED') + .map((r) => ({ + stepNumber: r.stepNumber, + description: state.steps.find((s) => s.stepNumber === r.stepNumber)?.description ?? '', + outcome: r.actualOutcome ?? '', + completedAt: r.completedAt ?? state.lastUpdatedAt, + })), + currentContext: { + lastSuccessfulStep: state.stepResults.filter((r) => r.status === 'COMPLETED').pop()?.actualOutcome, + currentStep: state.steps[state.currentStepIndex]?.description, + failureReason: state.failureHistory[state.failureHistory.length - 1]?.error, + accumulatedKnowledge: state.accumulatedKnowledge, + }, + remainingSteps: state.steps.slice(state.currentStepIndex), + }; + }); + + /** + * Request workflow to pause at next safe point. + * Returns acknowledgment with expected pause location. + */ + setHandler(requestPauseUpdate, (): PauseRequestResult => { + state.isPaused = true; + state.lastUpdatedAt = new Date().toISOString(); + + let willPauseAt: string; + if (state.isAwaitingApproval) { + willPauseAt = 'Already paused - awaiting approval'; + } else if (state.phase === 'EXECUTING') { + willPauseAt = `After completing step ${state.currentStepIndex + 1}`; + } else if (state.phase === 'PLANNING') { + willPauseAt = 'After planning completes'; + } else { + willPauseAt = 'At next loop iteration'; + } + + return { + willPauseAt, + currentPhase: state.phase, + acknowledgment: `Pause requested at ${state.lastUpdatedAt}`, + }; + }); + + // ============================================================================ + // Capability Probe Mode (Runtime rollout guardrail) + // ============================================================================ + + // This mode exists to let the orchestrator verify that: + // 1) the worker can run `goalRunWorkflow` on the target task queue, and + // 2) the `userPromptResolved` Update handler is registered and callable. + // + // In CAPABILITY_PROBE mode we intentionally avoid activities (no DB writes, no Kafka emits, no planning). + if (input.mode === 'CAPABILITY_PROBE') { + state.phase = 'WAITING_USER_INPUT' as GoalRunPhase; + state.isWaitingUserInput = true; + state.waitingUserInputReason = 'GOAL_INTAKE'; + state.lastUpdatedAt = new Date().toISOString(); + + await condition(() => state.pendingUserPrompts.length > 0 || state.isCancelled); + + const completedAt = new Date().toISOString(); + const durationMs = Math.max(0, new Date(completedAt).getTime() - new Date(state.startedAt).getTime()); + + return { + goalRunId: input.goalRunId, + status: state.isCancelled ? 'CANCELLED' : 'COMPLETED', + completedAt, + summary: state.isCancelled ? 'CAPABILITY_PROBE_CANCELLED' : 'CAPABILITY_PROBE_OK', + stepsCompleted: 0, + totalDurationMs: durationMs, + finalOutcome: state.isCancelled ? state.cancelReason ?? undefined : undefined, + artifacts: [], + knowledgeGained: [], + }; + } + + // ============================================================================ + // Emit Start Event + // ============================================================================ + + try { + await kafkaActivities.emitGoalEvent({ + eventType: 'GOAL_STARTED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + payload: { + goalDescription: input.goalDescription, + workflowId, + runId, + }, + }); + } catch (e) { + // Non-critical - continue workflow even if event fails + } + + // ============================================================================ + // Main PEVR Loop + // ============================================================================ + + try { + while (!state.isCancelled) { + // Check for pause + if (state.isPaused) { + await kafkaActivities.emitGoalEvent({ + eventType: 'GOAL_PAUSED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + payload: {}, + }).catch(() => {}); + + await condition(() => !state.isPaused || state.isCancelled); + + if (!state.isCancelled) { + await kafkaActivities.emitGoalEvent({ + eventType: 'GOAL_RESUMED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + payload: {}, + }).catch(() => {}); + } + continue; + } + + // Durable WAIT for user input (Stark Fix Atom 6) + if (state.phase === ('WAITING_USER_INPUT' as GoalRunPhase)) { + state.isWaitingUserInput = true; + state.lastUpdatedAt = new Date().toISOString(); + + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('WAITING_USER_INPUT')); + } + + await condition(() => state.pendingUserPrompts.length > 0 || state.isCancelled || state.isPaused); + + if (state.isCancelled || state.isPaused) { + continue; + } + + const resolved = state.pendingUserPrompts.shift(); + if (resolved) { + state.accumulatedKnowledge.push(`[USER_INPUT] ${JSON.stringify(resolved.answers)}`); + } + + const resumePhaseAttribute = + state.waitingUserInputReason === 'GOAL_INTAKE' ? 'INITIALIZING' : 'EXECUTING'; + const resumePhase = resumePhaseAttribute as GoalRunPhase; + + state.isWaitingUserInput = false; + state.waitingUserInputForStep = null; + state.waitingUserInputReason = null; + state.phase = resumePhase; + state.lastUpdatedAt = new Date().toISOString(); + + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate(resumePhaseAttribute)); + } + + continue; + } + + // Durable WAIT for provider/model capacity (Stark Fix: no retry storms) + if (state.phase === ('WAITING_PROVIDER' as GoalRunPhase)) { + state.isWaitingProvider = true; + state.lastUpdatedAt = new Date().toISOString(); + + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('WAITING_PROVIDER')); + } + + const attempt = Math.max(0, state.providerWaitCount); + const delayMs = Math.min(10 * 60_000, 30_000 * Math.pow(2, Math.min(attempt, 5))); + + // Wait for either pause/cancel or backoff expiry. + await condition(() => state.isCancelled || state.isPaused, `${delayMs}ms`); + + if (state.isCancelled || state.isPaused) { + continue; + } + + state.providerWaitCount = Math.min(state.providerWaitCount + 1, 10); + state.isWaitingProvider = false; + state.waitingProviderForStep = null; + state.waitingProviderReason = null; + state.phase = 'EXECUTING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('EXECUTING')); + } + + continue; + } + + // Check cancellation + if (state.isCancelled) { + break; + } + + // ======================================================================== + // PLAN Phase + // ======================================================================== + + if (state.phase === 'INITIALIZING' || state.phase === 'REPLANNING') { + state.phase = 'PLANNING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + + // Phase 10.2 + 10.5: Version-safe search attribute update + // Using patched() ensures old workflows complete without this code path + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('PLANNING')); + } + + const planResult = await planningActivities.planGoal({ + goalRunId: input.goalRunId, + tenantId: input.tenantId, + goalDescription: input.goalDescription, + previousFailures: state.failureHistory, + accumulatedKnowledge: state.accumulatedKnowledge, + constraints: { maxSteps: constraints.maxSteps }, + }); + + if (planResult.kind === 'GOAL_INTAKE_REQUIRED') { + state.steps = []; + state.stepResults = []; + state.currentStepIndex = 0; + state.isWaitingUserInput = true; + state.waitingUserInputForStep = null; + state.waitingUserInputReason = 'GOAL_INTAKE'; + state.phase = 'WAITING_USER_INPUT' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('WAITING_USER_INPUT')); + } + + continue; + } + + state.steps = planResult.steps; + state.stepResults = planResult.steps.map((step) => ({ + stepNumber: step.stepNumber, + status: 'PENDING' as const, + retryCount: 0, + artifacts: [], + })); + state.currentStepIndex = 0; + state.phase = 'EXECUTING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + + // Phase 10.2 + 10.5: Version-safe search attribute update + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + const hasHighRiskSteps = state.steps.some((step) => step.isHighRisk); + upsertSearchAttributes(createPlanSearchAttributes(state.steps.length, hasHighRiskSteps)); + } + } + + // ======================================================================== + // EXECUTE Phase + // ======================================================================== + + if (state.phase === 'EXECUTING' && state.currentStepIndex < state.steps.length) { + const currentStep = state.steps[state.currentStepIndex]; + const currentResult = state.stepResults[state.currentStepIndex]; + + // Update step status + currentResult.status = 'IN_PROGRESS'; + currentResult.startedAt = new Date().toISOString(); + state.lastUpdatedAt = new Date().toISOString(); + + // Emit step started event + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_STARTED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: { description: currentStep.description }, + }).catch(() => {}); + + // Check if high-risk step requires approval + if (currentStep.isHighRisk && constraints.requireApprovalForHighRisk) { + state.isAwaitingApproval = true; + state.approvalStepId = `step-${currentStep.stepNumber}`; + state.approvalResult = null; + state.rejectionReason = null; + + // Phase 10.2 + 10.5: Version-safe search attribute update + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createApprovalStateUpdate(true)); + } + + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_APPROVAL_REQUESTED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: { description: currentStep.description, isHighRisk: true }, + }).catch(() => {}); + + // Wait for approval (with timeout) + const approved = await condition( + () => state.approvalResult !== null || state.isCancelled, + '24h' // 24 hour approval timeout + ); + + // Phase 10.2 + 10.5: Version-safe search attribute update + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createApprovalStateUpdate(false)); + } + + if (state.isCancelled) { + break; + } + + if (state.approvalResult === 'REJECTED') { + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_REJECTED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: { reason: state.rejectionReason }, + }).catch(() => {}); + + // Skip this step and continue + currentResult.status = 'SKIPPED'; + currentResult.completedAt = new Date().toISOString(); + currentResult.error = `Rejected: ${state.rejectionReason}`; + state.currentStepIndex++; + continue; + } + + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_APPROVED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: {}, + }).catch(() => {}); + } + + // Execute the step + try { + const executeResult = await executionActivities.executeStep({ + goalRunId: input.goalRunId, + tenantId: input.tenantId, + step: currentStep, + workspaceId: input.workspaceId, + context: { + previousStepOutcome: state.stepResults[state.currentStepIndex - 1]?.actualOutcome, + accumulatedKnowledge: state.accumulatedKnowledge, + }, + }); + + if (executeResult.success) { + currentResult.status = 'COMPLETED'; + currentResult.actualOutcome = executeResult.outcome; + currentResult.artifacts = executeResult.artifacts; + currentResult.completedAt = new Date().toISOString(); + + // Add knowledge gained + state.accumulatedKnowledge.push(...executeResult.knowledgeGained); + state.providerWaitCount = 0; + state.isWaitingProvider = false; + state.waitingProviderForStep = null; + state.waitingProviderReason = null; + + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_COMPLETED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: { outcome: executeResult.outcome }, + }).catch(() => {}); + + // ================================================================ + // VERIFY Phase (inline for efficiency) + // ================================================================ + + state.phase = 'VERIFYING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + + const verifyResult = await executionActivities.verifyStep({ + goalRunId: input.goalRunId, + tenantId: input.tenantId, + step: currentStep, + executionResult: executeResult, + }); + + if (verifyResult.verified) { + // Move to next step + state.currentStepIndex++; + state.phase = 'EXECUTING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + } else if (verifyResult.suggestReplan) { + // Need to replan + if (state.replanCount >= constraints.maxReplans) { + throw ApplicationFailure.create({ + type: 'MAX_REPLANS_EXCEEDED', + message: `Exceeded maximum replans (${constraints.maxReplans})`, + }); + } + + state.replanCount++; + state.failureHistory.push({ + stepNumber: currentStep.stepNumber, + error: verifyResult.replanReason ?? 'Verification failed', + }); + state.phase = 'REPLANNING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + } else { + // Retry the step + currentResult.retryCount++; + if (currentResult.retryCount >= constraints.maxRetries) { + throw ApplicationFailure.create({ + type: 'MAX_RETRIES_EXCEEDED', + message: `Step ${currentStep.stepNumber} exceeded maximum retries`, + }); + } + // Sleep with exponential backoff before retry + await sleep(`${Math.min(30000, 1000 * Math.pow(2, currentResult.retryCount))}ms`); + } + } else { + if (executeResult.waitingForProvider) { + currentResult.status = 'PENDING'; + currentResult.error = 'WAITING_PROVIDER'; + + state.isWaitingProvider = true; + state.waitingProviderForStep = currentStep.stepNumber; + state.waitingProviderReason = executeResult.error ?? executeResult.outcome ?? 'WAITING_PROVIDER'; + state.phase = 'WAITING_PROVIDER' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + + await kafkaActivities.emitAuditEvent({ + eventType: 'goal.waiting_provider', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + action: 'WAITING_PROVIDER', + details: { + stepNumber: currentStep.stepNumber, + reason: state.waitingProviderReason, + providerWaitCount: state.providerWaitCount, + }, + }).catch(() => {}); + + continue; + } + + if (executeResult.waitingForUserInput || executeResult.needsApproval) { + // Stark Fix (Atom 6): Treat "needs help" as a durable WAIT state, not a failure/retry loop. + currentResult.status = 'PENDING'; + currentResult.error = 'WAITING_USER_INPUT'; + state.isWaitingUserInput = true; + state.waitingUserInputForStep = currentStep.stepNumber; + state.waitingUserInputReason = 'STEP_INPUT'; + state.phase = 'WAITING_USER_INPUT' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + continue; + } + + // Step failed + currentResult.status = 'FAILED'; + currentResult.error = executeResult.error; + currentResult.completedAt = new Date().toISOString(); + + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_FAILED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: { error: executeResult.error }, + }).catch(() => {}); + + // Classify failure and decide action + if (currentResult.retryCount < constraints.maxRetries) { + currentResult.retryCount++; + currentResult.status = 'PENDING'; + await sleep(`${Math.min(30000, 1000 * Math.pow(2, currentResult.retryCount))}ms`); + } else { + // Try replanning + if (state.replanCount < constraints.maxReplans) { + state.replanCount++; + state.failureHistory.push({ + stepNumber: currentStep.stepNumber, + error: executeResult.error ?? 'Step execution failed', + }); + state.phase = 'REPLANNING' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + } else { + throw ApplicationFailure.create({ + type: 'STEP_PERMANENTLY_FAILED', + message: `Step ${currentStep.stepNumber} failed permanently after ${constraints.maxRetries} retries and ${constraints.maxReplans} replans`, + }); + } + } + } + } catch (error) { + if (isCancellation(error)) { + throw error; + } + + currentResult.status = 'FAILED'; + currentResult.error = error instanceof Error ? error.message : String(error); + currentResult.completedAt = new Date().toISOString(); + + await kafkaActivities.emitStepEvent({ + eventType: 'STEP_FAILED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + stepNumber: currentStep.stepNumber, + payload: { error: currentResult.error }, + }).catch(() => {}); + + throw error; + } + } + + // Check if all steps completed + if (state.currentStepIndex >= state.steps.length) { + state.phase = 'COMPLETED' as GoalRunPhase; + state.lastUpdatedAt = new Date().toISOString(); + break; + } + } + + // ============================================================================ + // Build Result + // ============================================================================ + + const completedSteps = state.stepResults.filter((r) => r.status === 'COMPLETED').length; + const allArtifacts = state.stepResults.flatMap((r) => + r.artifacts.map((path) => ({ type: 'file', path, description: undefined })) + ); + + if (state.isCancelled) { + // Phase 10.2 + 10.5: Version-safe search attribute update + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('CANCELLED')); + } + + await kafkaActivities.emitGoalEvent({ + eventType: 'GOAL_CANCELLED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + payload: { reason: state.cancelReason }, + }).catch(() => {}); + + return { + goalRunId: input.goalRunId, + status: 'CANCELLED', + completedAt: new Date().toISOString(), + summary: `Goal cancelled: ${state.cancelReason}`, + stepsCompleted: completedSteps, + totalDurationMs: Date.now() - new Date(state.startedAt).getTime(), + artifacts: allArtifacts, + knowledgeGained: state.accumulatedKnowledge, + }; + } + + // Phase 10.2 + 10.5: Version-safe search attribute update + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createPhaseUpdate('COMPLETED')); + } + + await kafkaActivities.emitGoalEvent({ + eventType: 'GOAL_COMPLETED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + payload: { + stepsCompleted: completedSteps, + totalSteps: state.steps.length, + }, + }).catch(() => {}); + + return { + goalRunId: input.goalRunId, + status: 'COMPLETED', + completedAt: new Date().toISOString(), + summary: `Successfully completed ${completedSteps}/${state.steps.length} steps`, + stepsCompleted: completedSteps, + totalDurationMs: Date.now() - new Date(state.startedAt).getTime(), + finalOutcome: state.stepResults[state.stepResults.length - 1]?.actualOutcome, + artifacts: allArtifacts, + knowledgeGained: state.accumulatedKnowledge, + }; + + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + const errorType = error instanceof ApplicationFailure ? error.type ?? 'UNKNOWN' : 'UNKNOWN'; + + // Phase 10.2 + 10.5: Version-safe search attribute update + if (patched(WORKFLOW_PATCHES.V1_1_SEARCH_ATTRIBUTES)) { + upsertSearchAttributes(createErrorSearchAttributes(errorType)); + } + + await kafkaActivities.emitGoalEvent({ + eventType: 'GOAL_FAILED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + payload: { + errorType, + errorMessage, + failedStep: state.currentStepIndex, + }, + }).catch(() => {}); + + return { + goalRunId: input.goalRunId, + status: 'FAILED', + completedAt: new Date().toISOString(), + summary: `Goal failed: ${errorMessage}`, + stepsCompleted: state.stepResults.filter((r) => r.status === 'COMPLETED').length, + totalDurationMs: Date.now() - new Date(state.startedAt).getTime(), + errorDetails: { + errorType, + errorMessage, + failedStep: state.currentStepIndex, + recoverable: errorType !== 'FATAL_ERROR', + }, + artifacts: [], + knowledgeGained: state.accumulatedKnowledge, + }; + } +} diff --git a/packages/bytebot-temporal-worker/src/workflows/goal-run.workflow.update-contract.spec.ts b/packages/bytebot-temporal-worker/src/workflows/goal-run.workflow.update-contract.spec.ts new file mode 100644 index 000000000..6208c81bc --- /dev/null +++ b/packages/bytebot-temporal-worker/src/workflows/goal-run.workflow.update-contract.spec.ts @@ -0,0 +1,309 @@ +import { TestWorkflowEnvironment } from '@temporalio/testing'; +import { Worker } from '@temporalio/worker'; +import { temporal } from '@temporalio/proto'; + +import type { PlanGoalInput, PlanGoalOutput } from '../types/goal-run.types'; + +jest.setTimeout(60_000); + +describe('GoalRunWorkflow Update contract', () => { + let env: TestWorkflowEnvironment; + + beforeAll(async () => { + env = await TestWorkflowEnvironment.createTimeSkipping(); + + // GoalRunWorkflow upserts custom search attributes; register them so the test server accepts updates. + try { + await env.connection.operatorService.addSearchAttributes({ + namespace: env.namespace ?? 'default', + searchAttributes: { + ByteBotTenantId: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_KEYWORD, + ByteBotGoalRunId: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_KEYWORD, + ByteBotPhase: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_KEYWORD, + ByteBotStepCount: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_INT, + ByteBotHasHighRiskSteps: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_BOOL, + ByteBotIsAwaitingApproval: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_BOOL, + ByteBotErrorType: temporal.api.enums.v1.IndexedValueType.INDEXED_VALUE_TYPE_KEYWORD, + }, + }); + } catch (error: any) { + const message = String(error?.message ?? error); + if (!message.includes('ALREADY_EXISTS') && !message.toLowerCase().includes('already exists')) { + throw error; + } + } + }); + + afterAll(async () => { + await env.teardown(); + }); + + it('registers and accepts the userPromptResolved Update (no "Update handler missing" regression)', async () => { + const taskQueue = `tq-${Math.random().toString(16).slice(2)}`; + + let planCalls = 0; + + const activities = { + planGoal: async (_input: PlanGoalInput): Promise => { + planCalls += 1; + if (planCalls === 1) { + return { + kind: 'GOAL_INTAKE_REQUIRED', + promptId: 'pr-1', + goalSpecId: 'gs-1', + reason: 'GOAL_SPEC_INCOMPLETE', + }; + } + return { + kind: 'PLAN', + steps: [ + { + stepNumber: 1, + description: 'Execute a simple step after goal intake', + expectedOutcome: 'Step completes successfully', + isHighRisk: false, + dependencies: [], + estimatedDurationMs: 1000, + }, + ], + planSummary: 'Single step plan', + confidence: 0.9, + }; + }, + executeStep: async () => ({ + success: true, + outcome: 'ok', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + }), + verifyStep: async () => ({ + verified: true, + verificationDetails: 'ok', + suggestReplan: false, + }), + classifyFailure: async () => ({ + category: 'TRANSIENT' as const, + retryable: true, + suggestedAction: 'RETRY' as const, + }), + emitGoalEvent: async () => {}, + emitStepEvent: async () => {}, + emitAuditEvent: async () => {}, + }; + + const worker = await Worker.create({ + connection: env.nativeConnection, + taskQueue, + workflowsPath: require.resolve('./goal-run.workflow'), + activities, + }); + + await worker.runUntil(async () => { + const workflowId = `goal-run-${Math.random().toString(16).slice(2)}`; + + const handle = await env.client.workflow.start('goalRunWorkflow', { + taskQueue, + workflowId, + args: [ + { + goalRunId: 'gr-1', + tenantId: 't-1', + userId: 'u-1', + goalDescription: 'Test goal intake before planning', + }, + ], + }); + + // Let the workflow run its first planning attempt and enter WAITING_USER_INPUT. + await env.sleep('1s'); + + const progress = await handle.query('getProgress'); + expect(progress.phase).toBe('WAITING_USER_INPUT'); + + const updateResult = await (handle as any).executeUpdate('userPromptResolved', { + args: [{ promptId: 'pr-1', answers: { notes: 'details' } }], + updateId: 'user_prompt.resume:pr-1', + }); + + expect(updateResult).toEqual(expect.objectContaining({ accepted: true, applied: true })); + + const result = await handle.result(); + expect(result.status).toBe('COMPLETED'); + expect(planCalls).toBeGreaterThanOrEqual(2); + }); + }); + + it('enters WAITING_PROVIDER and resumes execution after backoff (no retry storm)', async () => { + const taskQueue = `tq-${Math.random().toString(16).slice(2)}`; + + let executeCalls = 0; + + const activities = { + planGoal: async (): Promise => ({ + kind: 'PLAN', + steps: [ + { + stepNumber: 1, + description: 'Perform a desktop action that depends on provider capacity', + expectedOutcome: 'Step completes successfully', + isHighRisk: false, + dependencies: [], + estimatedDurationMs: 1000, + }, + ], + planSummary: 'Single step plan', + confidence: 0.9, + }), + executeStep: async () => { + executeCalls += 1; + if (executeCalls === 1) { + return { + success: false, + outcome: 'Waiting for provider/model capacity', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: true, + error: 'LiteLLM proxy is unreachable', + }; + } + return { + success: true, + outcome: 'ok', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + waitingForUserInput: false, + waitingForProvider: false, + }; + }, + verifyStep: async () => ({ + verified: true, + verificationDetails: 'ok', + suggestReplan: false, + }), + classifyFailure: async () => ({ + category: 'TRANSIENT' as const, + retryable: true, + suggestedAction: 'RETRY' as const, + }), + emitGoalEvent: async () => {}, + emitStepEvent: async () => {}, + emitAuditEvent: async () => {}, + }; + + const worker = await Worker.create({ + connection: env.nativeConnection, + taskQueue, + workflowsPath: require.resolve('./goal-run.workflow'), + activities, + }); + + await worker.runUntil(async () => { + const workflowId = `provider-wait-${Math.random().toString(16).slice(2)}`; + + const handle = await env.client.workflow.start('goalRunWorkflow', { + taskQueue, + workflowId, + args: [ + { + goalRunId: 'gr-provider-wait-1', + tenantId: 't-1', + userId: 'u-1', + goalDescription: 'Test provider wait state', + }, + ], + }); + + await env.sleep('1s'); + + const progress = await handle.query('getProgress'); + expect(progress.phase).toBe('WAITING_PROVIDER'); + expect(progress.isWaitingProvider).toBe(true); + + // Base backoff is 30s; time-skipping env will fast-forward timers. + await env.sleep('31s'); + + const result = await handle.result(); + expect(result.status).toBe('COMPLETED'); + expect(executeCalls).toBeGreaterThanOrEqual(2); + }); + }); + + it('supports CAPABILITY_PROBE mode without running activities (runtime probe safety)', async () => { + const taskQueue = `tq-${Math.random().toString(16).slice(2)}`; + + const activities = { + planGoal: jest.fn(async () => { + throw new Error('planGoal must not be called in CAPABILITY_PROBE mode'); + }), + executeStep: jest.fn(async () => { + throw new Error('executeStep must not be called in CAPABILITY_PROBE mode'); + }), + verifyStep: jest.fn(async () => { + throw new Error('verifyStep must not be called in CAPABILITY_PROBE mode'); + }), + classifyFailure: jest.fn(async () => ({ + category: 'TRANSIENT' as const, + retryable: true, + suggestedAction: 'RETRY' as const, + })), + emitGoalEvent: jest.fn(async () => { + throw new Error('emitGoalEvent must not be called in CAPABILITY_PROBE mode'); + }), + emitStepEvent: jest.fn(async () => { + throw new Error('emitStepEvent must not be called in CAPABILITY_PROBE mode'); + }), + emitAuditEvent: jest.fn(async () => { + throw new Error('emitAuditEvent must not be called in CAPABILITY_PROBE mode'); + }), + }; + + const worker = await Worker.create({ + connection: env.nativeConnection, + taskQueue, + workflowsPath: require.resolve('./goal-run.workflow'), + activities, + }); + + await worker.runUntil(async () => { + const workflowId = `probe-${Math.random().toString(16).slice(2)}`; + const handle = await env.client.workflow.start('goalRunWorkflow', { + taskQueue, + workflowId, + workflowExecutionTimeout: '1m', + args: [ + { + goalRunId: workflowId, + tenantId: 'system', + userId: 'system', + goalDescription: 'Temporal capability probe', + mode: 'CAPABILITY_PROBE', + }, + ], + }); + + await env.sleep('1s'); + + const progress = await handle.query('getProgress'); + expect(progress.phase).toBe('WAITING_USER_INPUT'); + + const updateResult = await (handle as any).executeUpdate('userPromptResolved', { + args: [{ promptId: workflowId, answers: { ok: true } }], + updateId: `temporal_capability_probe:${workflowId}`, + }); + expect(updateResult).toEqual(expect.objectContaining({ accepted: true, applied: true })); + + const result = await handle.result(); + expect(result.status).toBe('COMPLETED'); + expect(result.summary).toBe('CAPABILITY_PROBE_OK'); + + expect(activities.planGoal).not.toHaveBeenCalled(); + expect(activities.emitGoalEvent).not.toHaveBeenCalled(); + }); + }); +}); diff --git a/packages/bytebot-temporal-worker/src/workflows/index.ts b/packages/bytebot-temporal-worker/src/workflows/index.ts new file mode 100644 index 000000000..d3baa9920 --- /dev/null +++ b/packages/bytebot-temporal-worker/src/workflows/index.ts @@ -0,0 +1,7 @@ +/** + * Workflow Exports + * + * Export all workflows for the Temporal worker. + */ + +export * from './goal-run.workflow'; diff --git a/packages/bytebot-temporal-worker/test/chaos/README.md b/packages/bytebot-temporal-worker/test/chaos/README.md new file mode 100644 index 000000000..aeed80f2c --- /dev/null +++ b/packages/bytebot-temporal-worker/test/chaos/README.md @@ -0,0 +1,102 @@ +# Chaos Testing for ByteBot Temporal Worker + +This directory contains Chaos Mesh manifests for testing system resilience. + +## Prerequisites + +1. **Install Chaos Mesh**: + ```bash + helm repo add chaos-mesh https://charts.chaos-mesh.org + kubectl create ns chaos-mesh + helm install chaos-mesh chaos-mesh/chaos-mesh -n chaos-mesh --version 2.6.0 + ``` + +2. **Verify Installation**: + ```bash + kubectl get pods -n chaos-mesh + ``` + +## Test Categories + +### Pod Failure Tests (`pod-failure.yaml`) +- **pod-kill**: Sudden pod termination +- **pod-failure**: Pod enters failed state +- **container-kill**: OOMKill simulation + +### Network Chaos (`network-chaos.yaml`) +- **delay**: 200ms network latency +- **loss**: 25% packet loss +- **partition**: Network isolation +- **bandwidth**: 1mbps rate limit +- **dns**: DNS resolution failures + +### Stress Tests (`stress-chaos.yaml`) +- **cpu-stress**: 80% CPU load +- **memory-stress**: 256MB memory consumption +- **combined-stress**: CPU + memory pressure + +### Workflow (`chaos-workflow.yaml`) +- Complete resilience test scenario +- Scheduled weekly testing + +## Running Tests + +### Individual Tests +```bash +# Apply pod failure chaos +kubectl apply -f test/chaos/pod-failure.yaml + +# Monitor chaos experiments +kubectl get podchaos -n bytebot + +# Check workflow status during chaos +temporal workflow list --query "WorkflowType='goalRunWorkflow'" + +# Remove chaos +kubectl delete -f test/chaos/pod-failure.yaml +``` + +### Full Workflow +```bash +# Run complete resilience test +kubectl apply -f test/chaos/chaos-workflow.yaml + +# Monitor workflow progress +kubectl get workflow temporal-resilience-test -n bytebot -w + +# Check events +kubectl describe workflow temporal-resilience-test -n bytebot +``` + +## Monitoring During Chaos + +```bash +# Watch worker pods +kubectl get pods -n bytebot -l app=bytebot-temporal-worker -w + +# Check Temporal metrics +kubectl port-forward svc/temporal-frontend 7233:7233 -n temporal +temporal workflow list + +# Check Kafka lag +kubectl exec -it core-cluster-kafka-0 -n kafka -- \ + kafka-consumer-groups.sh --bootstrap-server localhost:9092 \ + --describe --group bytebot-consumers +``` + +## Expected Behaviors + +| Chaos Type | Expected Behavior | +|------------|-------------------| +| Pod Kill | Workflow continues on other workers, no data loss | +| Network Delay | Increased latency, activities may timeout and retry | +| Network Partition | Activities fail, Temporal retries when connectivity restored | +| CPU Stress | Slower execution, may trigger activity timeouts | +| Memory Stress | OOMKill possible, workflow resumes on restart | + +## Success Criteria + +1. **No data loss**: All in-flight workflows complete or resume correctly +2. **Automatic recovery**: System recovers without manual intervention +3. **Event ordering**: Kafka events maintain correct sequence +4. **Graceful degradation**: Error rates increase but system remains available diff --git a/packages/bytebot-temporal-worker/test/chaos/chaos-workflow.yaml b/packages/bytebot-temporal-worker/test/chaos/chaos-workflow.yaml new file mode 100644 index 000000000..fe595851e --- /dev/null +++ b/packages/bytebot-temporal-worker/test/chaos/chaos-workflow.yaml @@ -0,0 +1,154 @@ +# Chaos Mesh Workflow +# Orchestrates a complete chaos testing scenario +# +# This workflow tests the entire system resilience by: +# 1. Starting with network chaos +# 2. Adding pod failures +# 3. Injecting stress +# 4. Testing recovery +# +# Usage: +# kubectl apply -f test/chaos/chaos-workflow.yaml +# kubectl get workflow temporal-resilience-test -n bytebot +--- +apiVersion: chaos-mesh.org/v1alpha1 +kind: Workflow +metadata: + name: temporal-resilience-test + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: workflow +spec: + entry: resilience-test-entry + templates: + - name: resilience-test-entry + templateType: Serial + children: + - baseline-check + - network-degradation + - recovery-check-1 + - pod-failure-test + - recovery-check-2 + - stress-test + - final-check + + # Step 1: Baseline check (no chaos) + - name: baseline-check + templateType: Suspend + suspend: + duration: "30s" + + # Step 2: Network degradation + - name: network-degradation + templateType: NetworkChaos + networkChaos: + action: delay + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + delay: + latency: "100ms" + jitter: "50ms" + direction: both + duration: "60s" + + # Step 3: Recovery check after network chaos + - name: recovery-check-1 + templateType: Suspend + suspend: + duration: "30s" + + # Step 4: Pod failure test + - name: pod-failure-test + templateType: PodChaos + podChaos: + action: pod-failure + mode: one + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + duration: "60s" + + # Step 5: Recovery check after pod failure + - name: recovery-check-2 + templateType: Suspend + suspend: + duration: "45s" + + # Step 6: Stress test + - name: stress-test + templateType: StressChaos + stressChaos: + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + stressors: + cpu: + workers: 1 + load: 60 + duration: "60s" + + # Step 7: Final health check + - name: final-check + templateType: Suspend + suspend: + duration: "30s" +--- +# Schedule for regular chaos testing (optional) +apiVersion: chaos-mesh.org/v1alpha1 +kind: Schedule +metadata: + name: weekly-resilience-test + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: schedule +spec: + schedule: "0 2 * * 0" # Every Sunday at 2 AM + type: Workflow + historyLimit: 5 + concurrencyPolicy: Forbid + workflow: + entry: resilience-test-entry + templates: + - name: resilience-test-entry + templateType: Serial + children: + - quick-pod-test + - quick-network-test + + - name: quick-pod-test + templateType: PodChaos + podChaos: + action: pod-kill + mode: one + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + duration: "30s" + + - name: quick-network-test + templateType: NetworkChaos + networkChaos: + action: delay + mode: one + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + delay: + latency: "50ms" + direction: both + duration: "30s" diff --git a/packages/bytebot-temporal-worker/test/chaos/network-chaos.yaml b/packages/bytebot-temporal-worker/test/chaos/network-chaos.yaml new file mode 100644 index 000000000..dee4ae84b --- /dev/null +++ b/packages/bytebot-temporal-worker/test/chaos/network-chaos.yaml @@ -0,0 +1,160 @@ +# Chaos Mesh Network Chaos Tests +# Tests Temporal worker resilience to network issues +# +# Tests include: +# - Network latency injection +# - Packet loss simulation +# - Network partition between clusters +# - DNS failures +--- +# Network Latency - Simulate slow network +apiVersion: chaos-mesh.org/v1alpha1 +kind: NetworkChaos +metadata: + name: temporal-worker-network-delay + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: network-chaos +spec: + action: delay + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + delay: + latency: "200ms" + correlation: "50" + jitter: "50ms" + target: + selector: + namespaces: + - temporal + labelSelectors: + app.kubernetes.io/component: frontend + mode: all + direction: both + duration: "120s" +--- +# Packet Loss - Simulate unreliable network +apiVersion: chaos-mesh.org/v1alpha1 +kind: NetworkChaos +metadata: + name: temporal-worker-packet-loss + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: network-chaos +spec: + action: loss + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + loss: + loss: "25" + correlation: "50" + target: + selector: + namespaces: + - temporal + labelSelectors: + app.kubernetes.io/component: frontend + mode: all + direction: both + duration: "60s" +--- +# Network Partition - Simulate ClusterMesh partition +apiVersion: chaos-mesh.org/v1alpha1 +kind: NetworkChaos +metadata: + name: clustermesh-partition + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: network-chaos +spec: + action: partition + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + target: + selector: + namespaces: + - temporal + mode: all + direction: both + duration: "30s" +--- +# Bandwidth Limit - Simulate constrained network +apiVersion: chaos-mesh.org/v1alpha1 +kind: NetworkChaos +metadata: + name: temporal-worker-bandwidth-limit + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: network-chaos +spec: + action: bandwidth + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + bandwidth: + rate: "1mbps" + limit: 100 + buffer: 10000 + direction: both + duration: "120s" +--- +# DNS Chaos - Simulate DNS failures +apiVersion: chaos-mesh.org/v1alpha1 +kind: DNSChaos +metadata: + name: temporal-dns-chaos + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: dns-chaos +spec: + action: error + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + patterns: + - "temporal-frontend.temporal.svc.cluster.local" + duration: "30s" +--- +# Kafka DNS Chaos - Test Kafka connectivity resilience +apiVersion: chaos-mesh.org/v1alpha1 +kind: DNSChaos +metadata: + name: kafka-dns-chaos + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: dns-chaos +spec: + action: random + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + patterns: + - "core-cluster-kafka-bootstrap.kafka.svc.cluster.local" + duration: "60s" diff --git a/packages/bytebot-temporal-worker/test/chaos/pod-failure.yaml b/packages/bytebot-temporal-worker/test/chaos/pod-failure.yaml new file mode 100644 index 000000000..fe47dde2e --- /dev/null +++ b/packages/bytebot-temporal-worker/test/chaos/pod-failure.yaml @@ -0,0 +1,111 @@ +# Chaos Mesh Pod Failure Tests +# Tests Temporal worker resilience to pod failures +# +# Prerequisites: +# - Chaos Mesh installed: helm install chaos-mesh chaos-mesh/chaos-mesh -n chaos-mesh +# - RBAC configured for chaos testing namespace +# +# Usage: +# kubectl apply -f test/chaos/pod-failure.yaml +# # Monitor workflow behavior during chaos +# kubectl delete -f test/chaos/pod-failure.yaml +--- +# Pod Kill Chaos - Simulates sudden pod termination +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: temporal-worker-pod-kill + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: pod-failure +spec: + action: pod-kill + mode: one # Kill one random pod + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + scheduler: + cron: "@every 2m" # Kill a pod every 2 minutes + duration: "30s" +--- +# Pod Failure Chaos - Simulates pod entering failed state +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: temporal-worker-pod-failure + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: pod-failure +spec: + action: pod-failure + mode: fixed-percent + value: "50" # Fail 50% of pods + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + duration: "60s" +--- +# Container Kill - Simulates OOMKill or crash +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: temporal-worker-container-kill + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: pod-failure +spec: + action: container-kill + mode: one + containerNames: + - temporal-worker + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + duration: "30s" +--- +# Temporal Server Pod Kill - Test worker behavior when Temporal is unavailable +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: temporal-server-pod-kill + namespace: temporal + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: temporal-failure +spec: + action: pod-kill + mode: one + selector: + namespaces: + - temporal + labelSelectors: + app.kubernetes.io/component: frontend + duration: "60s" +--- +# Kafka Pod Kill - Test event emission resilience +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: kafka-broker-pod-kill + namespace: kafka + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: kafka-failure +spec: + action: pod-kill + mode: one + selector: + namespaces: + - kafka + labelSelectors: + strimzi.io/kind: Kafka + duration: "60s" diff --git a/packages/bytebot-temporal-worker/test/chaos/stress-chaos.yaml b/packages/bytebot-temporal-worker/test/chaos/stress-chaos.yaml new file mode 100644 index 000000000..6f82ff068 --- /dev/null +++ b/packages/bytebot-temporal-worker/test/chaos/stress-chaos.yaml @@ -0,0 +1,98 @@ +# Chaos Mesh Stress Chaos Tests +# Tests Temporal worker resilience under resource pressure +# +# Tests include: +# - CPU stress +# - Memory stress +# - I/O stress +--- +# CPU Stress - Simulate high CPU usage +apiVersion: chaos-mesh.org/v1alpha1 +kind: StressChaos +metadata: + name: temporal-worker-cpu-stress + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: stress-chaos +spec: + mode: one + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + stressors: + cpu: + workers: 2 + load: 80 # 80% CPU load + duration: "120s" +--- +# Memory Stress - Simulate memory pressure +apiVersion: chaos-mesh.org/v1alpha1 +kind: StressChaos +metadata: + name: temporal-worker-memory-stress + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: stress-chaos +spec: + mode: one + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + stressors: + memory: + workers: 2 + size: "256MB" + duration: "120s" +--- +# Combined Stress - CPU + Memory +apiVersion: chaos-mesh.org/v1alpha1 +kind: StressChaos +metadata: + name: temporal-worker-combined-stress + namespace: bytebot + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: stress-chaos +spec: + mode: all + selector: + namespaces: + - bytebot + labelSelectors: + app: bytebot-temporal-worker + stressors: + cpu: + workers: 1 + load: 50 + memory: + workers: 1 + size: "128MB" + duration: "180s" +--- +# Temporal Server Stress - Test degraded Temporal performance +apiVersion: chaos-mesh.org/v1alpha1 +kind: StressChaos +metadata: + name: temporal-server-stress + namespace: temporal + labels: + app.kubernetes.io/part-of: bytebot + chaos-test: stress-chaos +spec: + mode: one + selector: + namespaces: + - temporal + labelSelectors: + app.kubernetes.io/component: frontend + stressors: + cpu: + workers: 2 + load: 70 + duration: "120s" diff --git a/packages/bytebot-temporal-worker/test/jest-e2e.json b/packages/bytebot-temporal-worker/test/jest-e2e.json new file mode 100644 index 000000000..773b89949 --- /dev/null +++ b/packages/bytebot-temporal-worker/test/jest-e2e.json @@ -0,0 +1,26 @@ +{ + "moduleFileExtensions": ["js", "json", "ts"], + "rootDir": "..", + "testEnvironment": "node", + "testRegex": ".e2e-spec.ts$", + "transform": { + "^.+\\.(t|j)s$": "ts-jest" + }, + "moduleNameMapper": { + "^@/(.*)$": "/src/$1" + }, + "collectCoverageFrom": [ + "src/**/*.ts", + "!src/**/*.d.ts", + "!src/main.ts", + "!src/worker.ts" + ], + "coverageDirectory": "./coverage-e2e", + "testTimeout": 120000, + "setupFilesAfterEnv": ["/test/setup.ts"], + "globals": { + "ts-jest": { + "tsconfig": "/tsconfig.json" + } + } +} diff --git a/packages/bytebot-temporal-worker/test/load/workflow-load-test.ts b/packages/bytebot-temporal-worker/test/load/workflow-load-test.ts new file mode 100644 index 000000000..353c5d7c8 --- /dev/null +++ b/packages/bytebot-temporal-worker/test/load/workflow-load-test.ts @@ -0,0 +1,481 @@ +/** + * Workflow Load Testing Script + * + * Tests concurrent workflow execution and measures performance metrics. + * Uses Temporal's workflow client to simulate production load. + * + * Usage: + * npx ts-node test/load/workflow-load-test.ts [options] + * + * Options: + * --workflows=N Number of concurrent workflows (default: 10) + * --duration=N Test duration in seconds (default: 60) + * --ramp-up=N Ramp-up period in seconds (default: 10) + * --temporal-address Temporal server address (default: localhost:7233) + */ + +import { Connection, Client } from '@temporalio/client'; +import { v4 as uuidv4 } from 'uuid'; + +// ============================================================================ +// Configuration +// ============================================================================ + +interface LoadTestConfig { + temporalAddress: string; + namespace: string; + taskQueue: string; + workflowCount: number; + durationSeconds: number; + rampUpSeconds: number; + tenantId: string; +} + +function parseArgs(): LoadTestConfig { + const args = process.argv.slice(2); + const config: LoadTestConfig = { + temporalAddress: process.env.TEMPORAL_ADDRESS || 'localhost:7233', + namespace: process.env.TEMPORAL_NAMESPACE || 'default', + taskQueue: process.env.TEMPORAL_TASK_QUEUE || 'goal-runs', + workflowCount: 10, + durationSeconds: 60, + rampUpSeconds: 10, + tenantId: 'load-test-tenant', + }; + + for (const arg of args) { + if (arg.startsWith('--workflows=')) { + config.workflowCount = parseInt(arg.split('=')[1], 10); + } else if (arg.startsWith('--duration=')) { + config.durationSeconds = parseInt(arg.split('=')[1], 10); + } else if (arg.startsWith('--ramp-up=')) { + config.rampUpSeconds = parseInt(arg.split('=')[1], 10); + } else if (arg.startsWith('--temporal-address=')) { + config.temporalAddress = arg.split('=')[1]; + } + } + + return config; +} + +// ============================================================================ +// Metrics Collection +// ============================================================================ + +interface WorkflowMetrics { + workflowId: string; + startTime: number; + endTime?: number; + status: 'pending' | 'running' | 'completed' | 'failed'; + duration?: number; + error?: string; +} + +interface LoadTestResults { + startTime: Date; + endTime: Date; + totalWorkflows: number; + completedWorkflows: number; + failedWorkflows: number; + avgDurationMs: number; + minDurationMs: number; + maxDurationMs: number; + p50DurationMs: number; + p95DurationMs: number; + p99DurationMs: number; + throughputPerSecond: number; + errorRate: number; +} + +class MetricsCollector { + private metrics: Map = new Map(); + + start(workflowId: string): void { + this.metrics.set(workflowId, { + workflowId, + startTime: Date.now(), + status: 'running', + }); + } + + complete(workflowId: string): void { + const metric = this.metrics.get(workflowId); + if (metric) { + metric.endTime = Date.now(); + metric.duration = metric.endTime - metric.startTime; + metric.status = 'completed'; + } + } + + fail(workflowId: string, error: string): void { + const metric = this.metrics.get(workflowId); + if (metric) { + metric.endTime = Date.now(); + metric.duration = metric.endTime - metric.startTime; + metric.status = 'failed'; + metric.error = error; + } + } + + getResults(startTime: Date, endTime: Date): LoadTestResults { + const durations: number[] = []; + let completed = 0; + let failed = 0; + + for (const metric of this.metrics.values()) { + if (metric.status === 'completed' && metric.duration) { + completed++; + durations.push(metric.duration); + } else if (metric.status === 'failed') { + failed++; + } + } + + durations.sort((a, b) => a - b); + + const percentile = (p: number) => { + if (durations.length === 0) return 0; + const idx = Math.ceil((p / 100) * durations.length) - 1; + return durations[Math.max(0, idx)]; + }; + + const testDurationSeconds = (endTime.getTime() - startTime.getTime()) / 1000; + + return { + startTime, + endTime, + totalWorkflows: this.metrics.size, + completedWorkflows: completed, + failedWorkflows: failed, + avgDurationMs: durations.length > 0 + ? durations.reduce((a, b) => a + b, 0) / durations.length + : 0, + minDurationMs: durations.length > 0 ? durations[0] : 0, + maxDurationMs: durations.length > 0 ? durations[durations.length - 1] : 0, + p50DurationMs: percentile(50), + p95DurationMs: percentile(95), + p99DurationMs: percentile(99), + throughputPerSecond: completed / testDurationSeconds, + errorRate: this.metrics.size > 0 ? (failed / this.metrics.size) * 100 : 0, + }; + } +} + +// ============================================================================ +// Load Test Runner +// ============================================================================ + +async function runLoadTest(config: LoadTestConfig): Promise { + console.log('='.repeat(60)); + console.log('ByteBot Temporal Workflow Load Test'); + console.log('='.repeat(60)); + console.log(`Configuration:`); + console.log(` Temporal Address: ${config.temporalAddress}`); + console.log(` Namespace: ${config.namespace}`); + console.log(` Task Queue: ${config.taskQueue}`); + console.log(` Concurrent Workflows: ${config.workflowCount}`); + console.log(` Duration: ${config.durationSeconds}s`); + console.log(` Ramp-up: ${config.rampUpSeconds}s`); + console.log('='.repeat(60)); + + // Connect to Temporal + const connection = await Connection.connect({ + address: config.temporalAddress, + }); + + const client = new Client({ + connection, + namespace: config.namespace, + }); + + const collector = new MetricsCollector(); + const startTime = new Date(); + const endTime = new Date(startTime.getTime() + config.durationSeconds * 1000); + + // Calculate workflow start intervals for ramp-up + const intervalMs = (config.rampUpSeconds * 1000) / config.workflowCount; + + console.log('\nStarting workflows with ramp-up...\n'); + + // Start workflows with ramp-up + const workflowPromises: Promise[] = []; + + for (let i = 0; i < config.workflowCount; i++) { + const delay = i * intervalMs; + + const promise = new Promise((resolve) => { + setTimeout(async () => { + const workflowId = `load-test-${uuidv4()}`; + + try { + collector.start(workflowId); + + const handle = await client.workflow.start('goalRunWorkflow', { + args: [{ + goalRunId: workflowId, + tenantId: config.tenantId, + userId: 'load-test-user', + goalDescription: `Load test goal ${i + 1} of ${config.workflowCount}`, + workspaceId: 'load-test-workspace', + constraints: { + maxSteps: 5, + maxRetries: 2, + maxReplans: 1, + timeoutMs: 300000, + requireApprovalForHighRisk: false, + }, + }], + taskQueue: config.taskQueue, + workflowId, + }); + + // Wait for completion + await handle.result(); + collector.complete(workflowId); + + process.stdout.write('.'); + } catch (error) { + collector.fail(workflowId, error instanceof Error ? error.message : 'Unknown error'); + process.stdout.write('x'); + } + + resolve(); + }, delay); + }); + + workflowPromises.push(promise); + } + + // Wait for all workflows to complete + await Promise.all(workflowPromises); + + const actualEndTime = new Date(); + console.log('\n\nAll workflows completed.'); + + // Generate results + const results = collector.getResults(startTime, actualEndTime); + + // Print results + console.log('\n' + '='.repeat(60)); + console.log('Load Test Results'); + console.log('='.repeat(60)); + console.log(`Total Workflows: ${results.totalWorkflows}`); + console.log(`Completed: ${results.completedWorkflows}`); + console.log(`Failed: ${results.failedWorkflows}`); + console.log(`Error Rate: ${results.errorRate.toFixed(2)}%`); + console.log(''); + console.log('Latency Metrics:'); + console.log(` Average: ${results.avgDurationMs.toFixed(2)}ms`); + console.log(` Min: ${results.minDurationMs}ms`); + console.log(` Max: ${results.maxDurationMs}ms`); + console.log(` P50: ${results.p50DurationMs}ms`); + console.log(` P95: ${results.p95DurationMs}ms`); + console.log(` P99: ${results.p99DurationMs}ms`); + console.log(''); + console.log(`Throughput: ${results.throughputPerSecond.toFixed(2)} workflows/second`); + console.log('='.repeat(60)); + + await connection.close(); + + return results; +} + +// ============================================================================ +// Stress Test (Burst Load) +// ============================================================================ + +async function runStressTest(config: LoadTestConfig): Promise { + console.log('\n' + '='.repeat(60)); + console.log('Stress Test: Burst Load'); + console.log('='.repeat(60)); + + const connection = await Connection.connect({ + address: config.temporalAddress, + }); + + const client = new Client({ + connection, + namespace: config.namespace, + }); + + // Start all workflows simultaneously + const workflowIds: string[] = []; + const startPromises: Promise[] = []; + + console.log(`Starting ${config.workflowCount} workflows simultaneously...`); + + for (let i = 0; i < config.workflowCount; i++) { + const workflowId = `stress-test-${uuidv4()}`; + workflowIds.push(workflowId); + + startPromises.push( + client.workflow.start('goalRunWorkflow', { + args: [{ + goalRunId: workflowId, + tenantId: config.tenantId, + userId: 'stress-test-user', + goalDescription: `Stress test goal ${i + 1}`, + workspaceId: 'stress-test-workspace', + constraints: { + maxSteps: 3, + maxRetries: 1, + maxReplans: 1, + timeoutMs: 120000, + requireApprovalForHighRisk: false, + }, + }], + taskQueue: config.taskQueue, + workflowId, + }) + ); + } + + const startTime = Date.now(); + const handles = await Promise.all(startPromises); + const launchDuration = Date.now() - startTime; + + console.log(`All ${config.workflowCount} workflows started in ${launchDuration}ms`); + console.log('Waiting for completion...'); + + // Wait for all to complete + let completed = 0; + let failed = 0; + + for (const handle of handles) { + try { + await handle.result(); + completed++; + } catch { + failed++; + } + } + + const totalDuration = Date.now() - startTime; + + console.log('\nStress Test Results:'); + console.log(` Launch Time: ${launchDuration}ms`); + console.log(` Total Time: ${totalDuration}ms`); + console.log(` Completed: ${completed}`); + console.log(` Failed: ${failed}`); + console.log(` Launch Rate: ${(config.workflowCount / (launchDuration / 1000)).toFixed(2)} workflows/second`); + + await connection.close(); +} + +// ============================================================================ +// Soak Test (Extended Duration) +// ============================================================================ + +async function runSoakTest(config: LoadTestConfig): Promise { + console.log('\n' + '='.repeat(60)); + console.log('Soak Test: Extended Duration'); + console.log('='.repeat(60)); + + const connection = await Connection.connect({ + address: config.temporalAddress, + }); + + const client = new Client({ + connection, + namespace: config.namespace, + }); + + // Run workflows continuously for the duration + const endTime = Date.now() + config.durationSeconds * 1000; + let totalStarted = 0; + let totalCompleted = 0; + let totalFailed = 0; + + const activeWorkflows: Set> = new Set(); + const maxConcurrent = config.workflowCount; + + console.log(`Running with ${maxConcurrent} concurrent workflows for ${config.durationSeconds}s...`); + + const startWorkflow = async (): Promise => { + const workflowId = `soak-test-${uuidv4()}`; + totalStarted++; + + try { + const handle = await client.workflow.start('goalRunWorkflow', { + args: [{ + goalRunId: workflowId, + tenantId: config.tenantId, + userId: 'soak-test-user', + goalDescription: `Soak test goal ${totalStarted}`, + workspaceId: 'soak-test-workspace', + constraints: { + maxSteps: 3, + maxRetries: 1, + maxReplans: 1, + timeoutMs: 120000, + requireApprovalForHighRisk: false, + }, + }], + taskQueue: config.taskQueue, + workflowId, + }); + + await handle.result(); + totalCompleted++; + process.stdout.write('.'); + } catch { + totalFailed++; + process.stdout.write('x'); + } + }; + + // Keep starting new workflows until end time + while (Date.now() < endTime) { + // Fill up to max concurrent + while (activeWorkflows.size < maxConcurrent && Date.now() < endTime) { + const promise = startWorkflow(); + activeWorkflows.add(promise); + promise.finally(() => activeWorkflows.delete(promise)); + } + + // Wait a bit before checking again + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + // Wait for remaining workflows + await Promise.all(activeWorkflows); + + console.log('\n\nSoak Test Results:'); + console.log(` Total Started: ${totalStarted}`); + console.log(` Completed: ${totalCompleted}`); + console.log(` Failed: ${totalFailed}`); + console.log(` Throughput: ${(totalCompleted / config.durationSeconds).toFixed(2)} workflows/second`); + + await connection.close(); +} + +// ============================================================================ +// Main +// ============================================================================ + +async function main(): Promise { + const config = parseArgs(); + + try { + // Run standard load test + await runLoadTest(config); + + // Optionally run stress test + if (process.argv.includes('--stress')) { + await runStressTest(config); + } + + // Optionally run soak test + if (process.argv.includes('--soak')) { + await runSoakTest(config); + } + + console.log('\nLoad testing completed successfully!'); + process.exit(0); + } catch (error) { + console.error('Load test failed:', error); + process.exit(1); + } +} + +main(); diff --git a/packages/bytebot-temporal-worker/test/setup.ts b/packages/bytebot-temporal-worker/test/setup.ts new file mode 100644 index 000000000..72a0a474e --- /dev/null +++ b/packages/bytebot-temporal-worker/test/setup.ts @@ -0,0 +1,31 @@ +/** + * Jest E2E Test Setup + * + * This file runs before all E2E tests to configure the test environment. + */ + +// Increase timeout for workflow tests +jest.setTimeout(120000); + +// Suppress console output during tests (optional) +if (process.env.SUPPRESS_LOGS === 'true') { + global.console = { + ...console, + log: jest.fn(), + debug: jest.fn(), + info: jest.fn(), + warn: jest.fn(), + }; +} + +// Set test environment variables +process.env.NODE_ENV = 'test'; +process.env.TEMPORAL_ADDRESS = process.env.TEMPORAL_ADDRESS || 'localhost:7233'; +process.env.TEMPORAL_NAMESPACE = process.env.TEMPORAL_NAMESPACE || 'test'; +process.env.TEMPORAL_TASK_QUEUE = 'test-goal-runs'; + +// Mock external service URLs +process.env.ORCHESTRATOR_URL = 'http://localhost:3001'; +process.env.TASK_CONTROLLER_URL = 'http://localhost:3002'; +process.env.LLM_PROXY_URL = 'http://localhost:3003'; +process.env.KAFKA_BROKERS = 'localhost:9092'; diff --git a/packages/bytebot-temporal-worker/test/workflows/goal-run.workflow.e2e-spec.ts b/packages/bytebot-temporal-worker/test/workflows/goal-run.workflow.e2e-spec.ts new file mode 100644 index 000000000..a3b41a6a6 --- /dev/null +++ b/packages/bytebot-temporal-worker/test/workflows/goal-run.workflow.e2e-spec.ts @@ -0,0 +1,528 @@ +/** + * GoalRunWorkflow E2E Tests + * + * Tests the complete workflow lifecycle using Temporal's test environment. + * Uses @temporalio/testing for deterministic time control and activity mocking. + */ + +import { TestWorkflowEnvironment } from '@temporalio/testing'; +import { Worker, Runtime, DefaultLogger } from '@temporalio/worker'; +import { Client } from '@temporalio/client'; +import { v4 as uuid } from 'uuid'; + +import { goalRunWorkflow } from '../../src/workflows/goal-run.workflow'; +import type { GoalRunInput, GoalRunResult, Step } from '../../src/types/goal-run.types'; + +// Mock activities +const mockPlanGoal = jest.fn(); +const mockExecuteStep = jest.fn(); +const mockVerifyStep = jest.fn(); +const mockEmitGoalEvent = jest.fn(); +const mockEmitStepEvent = jest.fn(); + +describe('GoalRunWorkflow E2E Tests', () => { + let testEnv: TestWorkflowEnvironment; + let worker: Worker; + let client: Client; + + beforeAll(async () => { + // Reduce Temporal SDK logging noise in tests + Runtime.install({ + logger: new DefaultLogger('WARN'), + }); + + // Create test environment + testEnv = await TestWorkflowEnvironment.createLocal(); + client = testEnv.client; + }); + + afterAll(async () => { + await testEnv?.teardown(); + }); + + beforeEach(() => { + // Reset mocks before each test + jest.clearAllMocks(); + + // Default mock implementations + mockPlanGoal.mockResolvedValue({ + steps: [ + { stepNumber: 1, description: 'Test step 1', expectedOutcome: 'Step 1 complete', isHighRisk: false, dependencies: [] }, + { stepNumber: 2, description: 'Test step 2', expectedOutcome: 'Step 2 complete', isHighRisk: false, dependencies: [1] }, + ], + planSummary: 'Test plan with 2 steps', + confidence: 0.9, + }); + + mockExecuteStep.mockResolvedValue({ + success: true, + outcome: 'Step executed successfully', + artifacts: [], + knowledgeGained: ['Learned something new'], + needsApproval: false, + }); + + mockVerifyStep.mockResolvedValue({ + verified: true, + verificationDetails: 'Step verified successfully', + suggestReplan: false, + }); + + mockEmitGoalEvent.mockResolvedValue(undefined); + mockEmitStepEvent.mockResolvedValue(undefined); + }); + + async function createWorker(): Promise { + return Worker.create({ + connection: testEnv.nativeConnection, + taskQueue: 'test-goal-runs', + workflowsPath: require.resolve('../../src/workflows/goal-run.workflow'), + activities: { + planGoal: mockPlanGoal, + refinePlan: jest.fn(), + executeStep: mockExecuteStep, + verifyStep: mockVerifyStep, + classifyFailure: jest.fn(), + emitGoalEvent: mockEmitGoalEvent, + emitStepEvent: mockEmitStepEvent, + }, + }); + } + + function createTestInput(overrides?: Partial): GoalRunInput { + return { + goalRunId: uuid(), + tenantId: 'test-tenant', + userId: 'test-user', + goalDescription: 'Test goal description', + workspaceId: 'test-workspace', + constraints: { + maxSteps: 10, + maxRetries: 3, + maxReplans: 2, + timeoutMs: 300000, + requireApprovalForHighRisk: true, + }, + ...overrides, + }; + } + + describe('Basic Workflow Execution', () => { + it('should complete a simple 2-step workflow successfully', async () => { + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-${input.goalRunId}`, + }); + + return handle.result(); + }); + + expect(result.status).toBe('COMPLETED'); + expect(result.stepsCompleted).toBe(2); + expect(mockPlanGoal).toHaveBeenCalledTimes(1); + expect(mockExecuteStep).toHaveBeenCalledTimes(2); + expect(mockVerifyStep).toHaveBeenCalledTimes(2); + expect(mockEmitGoalEvent).toHaveBeenCalled(); + }); + + it('should emit correct Kafka events during workflow execution', async () => { + const worker = await createWorker(); + const input = createTestInput(); + + await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-${input.goalRunId}`, + }); + + return handle.result(); + }); + + // Verify GOAL_STARTED event + expect(mockEmitGoalEvent).toHaveBeenCalledWith( + expect.objectContaining({ + eventType: 'GOAL_STARTED', + goalRunId: input.goalRunId, + tenantId: input.tenantId, + }) + ); + + // Verify GOAL_COMPLETED event + expect(mockEmitGoalEvent).toHaveBeenCalledWith( + expect.objectContaining({ + eventType: 'GOAL_COMPLETED', + goalRunId: input.goalRunId, + }) + ); + + // Verify STEP events + expect(mockEmitStepEvent).toHaveBeenCalledWith( + expect.objectContaining({ + eventType: 'STEP_STARTED', + stepNumber: 1, + }) + ); + }); + }); + + describe('Error Handling and Retries', () => { + it('should retry failed steps up to maxRetries', async () => { + mockExecuteStep + .mockResolvedValueOnce({ + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + error: 'Transient error', + }) + .mockResolvedValueOnce({ + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + error: 'Transient error again', + }) + .mockResolvedValue({ + success: true, + outcome: 'Finally succeeded', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + }); + + const worker = await createWorker(); + const input = createTestInput({ constraints: { maxSteps: 10, maxRetries: 3, maxReplans: 2, timeoutMs: 300000, requireApprovalForHighRisk: true } }); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-retry-${input.goalRunId}`, + }); + + return handle.result(); + }); + + expect(result.status).toBe('COMPLETED'); + expect(mockExecuteStep.mock.calls.length).toBeGreaterThanOrEqual(3); + }); + + it('should trigger replanning when verification fails and suggests replan', async () => { + mockVerifyStep + .mockResolvedValueOnce({ + verified: false, + verificationDetails: 'Step did not achieve expected outcome', + suggestReplan: true, + replanReason: 'Approach was incorrect', + }) + .mockResolvedValue({ + verified: true, + verificationDetails: 'Step verified successfully', + suggestReplan: false, + }); + + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-replan-${input.goalRunId}`, + }); + + return handle.result(); + }); + + // Should have called planGoal at least twice (initial + replan) + expect(mockPlanGoal.mock.calls.length).toBeGreaterThanOrEqual(2); + }); + + it('should fail after exceeding maxReplans', async () => { + mockVerifyStep.mockResolvedValue({ + verified: false, + verificationDetails: 'Verification failed', + suggestReplan: true, + replanReason: 'Cannot complete step', + }); + + mockExecuteStep.mockResolvedValue({ + success: false, + outcome: '', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + error: 'Persistent failure', + }); + + const worker = await createWorker(); + const input = createTestInput({ + constraints: { maxSteps: 10, maxRetries: 1, maxReplans: 1, timeoutMs: 300000, requireApprovalForHighRisk: true }, + }); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-maxreplan-${input.goalRunId}`, + }); + + return handle.result(); + }); + + expect(result.status).toBe('FAILED'); + expect(result.errorDetails?.errorType).toContain('REPLAN'); + }); + }); + + describe('Signal Handling', () => { + it('should handle pause and resume signals', async () => { + // Make execution slow enough to send signals + mockExecuteStep.mockImplementation(async () => { + await new Promise((resolve) => setTimeout(resolve, 100)); + return { + success: true, + outcome: 'Step executed', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + }; + }); + + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-pause-${input.goalRunId}`, + }); + + // Send pause signal + await handle.signal('pauseGoal'); + + // Query progress to verify paused state + const progress = await handle.query('getProgress') as { isPaused: boolean }; + expect(progress.isPaused).toBe(true); + + // Resume + await handle.signal('resumeGoal'); + + return handle.result(); + }); + + expect(result.status).toBe('COMPLETED'); + }); + + it('should handle cancel signal and terminate workflow', async () => { + mockExecuteStep.mockImplementation(async () => { + await new Promise((resolve) => setTimeout(resolve, 500)); + return { + success: true, + outcome: 'Step executed', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + }; + }); + + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-cancel-${input.goalRunId}`, + }); + + // Wait a bit then cancel + await new Promise((resolve) => setTimeout(resolve, 50)); + await handle.signal('cancelGoal', { reason: 'User requested cancellation' }); + + return handle.result(); + }); + + expect(result.status).toBe('CANCELLED'); + expect(result.summary).toContain('cancelled'); + }); + }); + + describe('Query Handling', () => { + it('should return correct progress via getProgress query', async () => { + let progressDuringExecution: { goalRunId: string; totalSteps: number } | null = null; + + mockExecuteStep.mockImplementation(async () => { + return { + success: true, + outcome: 'Step executed', + artifacts: [], + knowledgeGained: [], + needsApproval: false, + }; + }); + + const worker = await createWorker(); + const input = createTestInput(); + + await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-query-${input.goalRunId}`, + }); + + // Query progress + progressDuringExecution = await handle.query('getProgress') as { goalRunId: string; totalSteps: number }; + + return handle.result(); + }); + + expect(progressDuringExecution!.goalRunId).toBe(input.goalRunId); + expect(progressDuringExecution!.totalSteps).toBeGreaterThan(0); + }); + + it('should return checkpoint data via getCheckpoint query', async () => { + const worker = await createWorker(); + const input = createTestInput(); + + await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-checkpoint-${input.goalRunId}`, + }); + + const checkpoint = await handle.query('getCheckpoint') as { goalRunId: string; progressSummary: string }; + expect(checkpoint.goalRunId).toBe(input.goalRunId); + expect(checkpoint.progressSummary).toBeDefined(); + + return handle.result(); + }); + }); + }); + + describe('High-Risk Step Approval', () => { + it('should wait for approval on high-risk steps', async () => { + mockPlanGoal.mockResolvedValue({ + steps: [ + { stepNumber: 1, description: 'High risk step', expectedOutcome: 'Done', isHighRisk: true, dependencies: [] }, + ], + planSummary: 'Plan with high-risk step', + confidence: 0.9, + }); + + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-approval-${input.goalRunId}`, + }); + + // Wait for approval request + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Check workflow is awaiting approval + const progress = await handle.query('getProgress') as { isAwaitingApproval: boolean }; + expect(progress.isAwaitingApproval).toBe(true); + + // Approve the step + await handle.signal('approveStep', { stepId: 'step-1', approver: 'test-user' }); + + return handle.result(); + }); + + expect(result.status).toBe('COMPLETED'); + expect(mockEmitStepEvent).toHaveBeenCalledWith( + expect.objectContaining({ + eventType: 'STEP_APPROVAL_REQUESTED', + }) + ); + }); + + it('should skip step when rejected', async () => { + mockPlanGoal.mockResolvedValue({ + steps: [ + { stepNumber: 1, description: 'High risk step', expectedOutcome: 'Done', isHighRisk: true, dependencies: [] }, + { stepNumber: 2, description: 'Normal step', expectedOutcome: 'Done', isHighRisk: false, dependencies: [] }, + ], + planSummary: 'Plan with high-risk step', + confidence: 0.9, + }); + + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-reject-${input.goalRunId}`, + }); + + // Wait for approval request + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Reject the step + await handle.signal('rejectStep', { stepId: 'step-1', reason: 'Too risky' }); + + return handle.result(); + }); + + expect(result.status).toBe('COMPLETED'); + expect(mockEmitStepEvent).toHaveBeenCalledWith( + expect.objectContaining({ + eventType: 'STEP_REJECTED', + }) + ); + }); + }); + + describe('Knowledge Accumulation', () => { + it('should accumulate knowledge across steps', async () => { + mockExecuteStep + .mockResolvedValueOnce({ + success: true, + outcome: 'Step 1 done', + artifacts: [], + knowledgeGained: ['Fact 1: Server is on port 8080'], + needsApproval: false, + }) + .mockResolvedValueOnce({ + success: true, + outcome: 'Step 2 done', + artifacts: [], + knowledgeGained: ['Fact 2: Database connection successful'], + needsApproval: false, + }); + + const worker = await createWorker(); + const input = createTestInput(); + + const result = await worker.runUntil(async () => { + const handle = await client.workflow.start(goalRunWorkflow, { + args: [input], + taskQueue: 'test-goal-runs', + workflowId: `test-knowledge-${input.goalRunId}`, + }); + + return handle.result(); + }); + + expect(result.knowledgeGained).toContain('Fact 1: Server is on port 8080'); + expect(result.knowledgeGained).toContain('Fact 2: Database connection successful'); + }); + }); +}); diff --git a/packages/bytebot-temporal-worker/tsconfig.json b/packages/bytebot-temporal-worker/tsconfig.json new file mode 100644 index 000000000..e36747d87 --- /dev/null +++ b/packages/bytebot-temporal-worker/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "module": "commonjs", + "declaration": true, + "removeComments": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "target": "ES2022", + "sourceMap": true, + "outDir": "./dist", + "baseUrl": "./", + "incremental": true, + "skipLibCheck": true, + "strictNullChecks": true, + "noImplicitAny": true, + "strictBindCallApply": true, + "forceConsistentCasingInFileNames": true, + "noFallthroughCasesInSwitch": true, + "esModuleInterop": true, + "resolveJsonModule": true, + "paths": { + "@/*": ["src/*"], + "@workflows/*": ["src/workflows/*"], + "@activities/*": ["src/activities/*"], + "@types/*": ["src/types/*"], + "@config/*": ["src/config/*"] + } + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "test"] +} diff --git a/packages/bytebot-workflow-orchestrator/.dockerignore b/packages/bytebot-workflow-orchestrator/.dockerignore new file mode 100644 index 000000000..ebee9770f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/.dockerignore @@ -0,0 +1,10 @@ +node_modules +dist +coverage +.git +.github +.env +.env.* +npm-debug.log +yarn-error.log +*.log diff --git a/packages/bytebot-workflow-orchestrator/Dockerfile b/packages/bytebot-workflow-orchestrator/Dockerfile new file mode 100644 index 000000000..9f1411e03 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/Dockerfile @@ -0,0 +1,59 @@ +# ByteBot Workflow Orchestrator +# Multi-stage build for production deployment + +# Build stage +FROM node:20-alpine AS builder + +WORKDIR /app + +# Install dependencies +COPY package*.json ./ +RUN npm install + +# Copy source +COPY . . + +# Generate Prisma client +RUN npx prisma generate + +# Build TypeScript +RUN npm run build + +# Production stage +FROM node:20-alpine AS production + +WORKDIR /app + +# Create non-root user +RUN addgroup -g 1001 -S nodejs && \ + adduser -S nestjs -u 1001 + +# Copy package files +COPY package*.json ./ + +# Install production dependencies only +RUN npm install --only=production + +# Copy Prisma schema and generated client +COPY --from=builder /app/prisma ./prisma +COPY --from=builder /app/node_modules/.prisma ./node_modules/.prisma +COPY --from=builder /app/node_modules/@prisma ./node_modules/@prisma + +# Copy built application +COPY --from=builder /app/dist ./dist + +# Set ownership +RUN chown -R nestjs:nodejs /app + +# Switch to non-root user +USER nestjs + +# Expose port +EXPOSE 8080 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD wget --no-verbose --tries=1 --spider http://localhost:8080/api/v1/health/live || exit 1 + +# Start application +CMD ["node", "dist/main"] diff --git a/packages/bytebot-workflow-orchestrator/README.md b/packages/bytebot-workflow-orchestrator/README.md new file mode 100644 index 000000000..cbd3fcfd3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/README.md @@ -0,0 +1,187 @@ +# ByteBot Workflow Orchestrator + +Multi-step workflow orchestration service with persistent workspaces, approval flows, and audit logging. + +## Overview + +The Workflow Orchestrator is a NestJS application that manages complex, multi-step workflow executions with: + +- **Persistent Workspaces**: Workflows execute in persistent desktop environments that maintain state across steps +- **Node-based Execution**: Workflows consist of nodes (TASK, DECISION, PARALLEL, WAIT) with dependency tracking +- **High-Risk Approval Flow** (M5): Human-in-the-loop approval for sensitive operations +- **Idempotency Guarantees** (M5): Exactly-once execution semantics for high-risk actions +- **Webhook Notifications** (Post-M5): Real-time notifications for approval events +- **Audit Logging** (Post-M5): Immutable compliance audit trail for all approval actions +- **Prometheus Metrics** (Post-M5): Comprehensive observability for approval flows + +## Architecture + +``` +┌─────────────────┐ ┌─────────────────────────┐ ┌──────────────────┐ +│ bytebot-api │────▶│ workflow-orchestrator │────▶│ task-controller │ +└─────────────────┘ └─────────────────────────┘ └──────────────────┘ + │ │ + ▼ ▼ + ┌─────────────────────┐ ┌──────────────────┐ + │ PostgreSQL │ │ Desktop Pods │ + └─────────────────────┘ └──────────────────┘ +``` + +## API Endpoints + +### Workflows +- `POST /api/v1/workflows` - Create a new workflow +- `GET /api/v1/workflows/:id` - Get workflow status +- `POST /api/v1/workflows/:id/start` - Start workflow execution +- `POST /api/v1/workflows/:id/cancel` - Cancel workflow + +### Workspaces +- `GET /api/v1/workspaces/:id/status` - Get workspace status +- `POST /api/v1/workspaces/:id/lock` - Acquire workspace lock +- `DELETE /api/v1/workspaces/:id/lock` - Release workspace lock + +### Approvals (M5) +- `POST /api/v1/approvals/request` - Request approval for high-risk action +- `GET /api/v1/approvals` - List pending approvals +- `GET /api/v1/approvals/:id` - Get approval details +- `POST /api/v1/approvals/:id/approve` - Approve request +- `POST /api/v1/approvals/:id/reject` - Reject request +- `GET /api/v1/approvals/stats` - Get approval statistics + +### Webhooks (Post-M5) +- `GET /api/v1/webhooks` - List webhook configurations +- `POST /api/v1/webhooks` - Create webhook +- `PUT /api/v1/webhooks/:id` - Update webhook +- `DELETE /api/v1/webhooks/:id` - Delete webhook +- `POST /api/v1/webhooks/:id/test` - Test webhook +- `POST /api/v1/webhooks/:id/rotate-secret` - Rotate webhook secret + +### Audit (Post-M5) +- `GET /api/v1/audit` - Query audit logs +- `GET /api/v1/audit/approvals/:id` - Get audit trail for approval +- `GET /api/v1/audit/export` - Export audit logs +- `GET /api/v1/audit/stats` - Get audit statistics + +### Health & Metrics +- `GET /api/v1/health/live` - Liveness probe +- `GET /api/v1/health/ready` - Readiness probe +- `GET /api/v1/metrics` - Prometheus metrics + +## Database Schema + +The service uses Prisma ORM with PostgreSQL. Key entities: + +- `Workspace` - Persistent workspace environments +- `WorkflowRun` - Workflow execution instances +- `WorkflowNode` - Workflow step definitions with runtime state +- `WorkflowNodeRun` - Individual node execution attempts +- `ApprovalRequest` - High-risk action approval requests +- `IdempotencyRecord` - Exactly-once execution tracking +- `WebhookConfig` - Webhook endpoint configurations +- `WebhookDelivery` - Webhook delivery records +- `AuditLog` - Immutable compliance audit trail + +## Deployment + +### Prerequisites + +1. PostgreSQL database with a `bytebotdb` database +2. Kubernetes cluster with the `bytebot` namespace +3. Helm 3.x + +### Database Migration + +Before first deployment, run Prisma migrations: + +```bash +# Connect to the database and run migrations +kubectl run -it --rm prisma-migrate \ + --image=jbutler1980/bytebot-workflow-orchestrator:1.0.0 \ + --namespace=bytebot \ + --env="DATABASE_URL=postgresql://..." \ + -- npx prisma migrate deploy +``` + +### Helm Deployment + +```bash +# Deploy to Kubernetes +helm upgrade --install bytebot-workflow-orchestrator \ + ./kubernetes/helm/charts/bytebot-workflow-orchestrator \ + -f ./kubernetes/helm/charts/bytebot-workflow-orchestrator/values-production.yaml \ + --namespace bytebot \ + --kube-context=agent +``` + +### Configuration + +Key environment variables: + +| Variable | Description | Default | +|----------|-------------|---------| +| `DATABASE_URL` | PostgreSQL connection string | Required | +| `TASK_CONTROLLER_URL` | Task controller service URL | `http://bytebot-task-controller:8080` | +| `SCHEDULER_ENABLED` | Enable workflow scheduler | `true` | +| `SCHEDULER_BATCH_SIZE` | Nodes to process per batch | `10` | +| `HIGH_RISK_APPROVAL_EXPIRY_MINUTES` | Approval request timeout | `60` | +| `IDEMPOTENCY_TTL_HOURS` | Idempotency record retention | `24` | +| `WEBHOOK_TIMEOUT_MS` | Webhook delivery timeout | `30000` | +| `AUDIT_LOG_RETENTION_DAYS` | Audit log retention period | `90` | + +## Docker + +### Build + +```bash +docker build -t jbutler1980/bytebot-workflow-orchestrator:1.0.0 . +``` + +### Push + +```bash +docker push jbutler1980/bytebot-workflow-orchestrator:1.0.0 +``` + +## Development + +### Local Setup + +```bash +# Install dependencies +npm install + +# Generate Prisma client +npx prisma generate + +# Run in development mode +npm run start:dev +``` + +### Testing + +```bash +# Unit tests +npm run test + +# E2E tests +npm run test:e2e + +# Coverage +npm run test:cov +``` + +## Metrics + +Prometheus metrics available at `/api/v1/metrics`: + +- `approvals_total` - Total approval requests by status +- `approvals_pending` - Current pending approvals +- `approval_latency_seconds` - Approval decision latency +- `webhooks_total` - Total webhook deliveries +- `webhook_delivery_seconds` - Webhook delivery latency +- `audit_logs_total` - Total audit log entries +- `idempotency_checks_total` - Idempotency check counts + +## Version History + +- **v1.0.0** (2025-12-13): Initial release with M5 approval flows and Post-M5 enhancements diff --git a/packages/bytebot-workflow-orchestrator/nest-cli.json b/packages/bytebot-workflow-orchestrator/nest-cli.json new file mode 100644 index 000000000..256648114 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/nest-cli.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://json.schemastore.org/nest-cli", + "collection": "@nestjs/schematics", + "sourceRoot": "src" +} diff --git a/packages/bytebot-workflow-orchestrator/package-lock.json b/packages/bytebot-workflow-orchestrator/package-lock.json new file mode 100644 index 000000000..9a21f0dd7 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/package-lock.json @@ -0,0 +1,12126 @@ +{ + "name": "bytebot-workflow-orchestrator", + "version": "5.17.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "bytebot-workflow-orchestrator", + "version": "5.17.0", + "license": "UNLICENSED", + "dependencies": { + "@kubernetes/client-node": "^0.22.3", + "@nestjs/common": "^11.0.1", + "@nestjs/config": "^4.0.2", + "@nestjs/core": "^11.0.1", + "@nestjs/event-emitter": "^3.0.0", + "@nestjs/jwt": "^11.0.2", + "@nestjs/platform-express": "^11.1.5", + "@nestjs/platform-socket.io": "^11.0.0", + "@nestjs/schedule": "^6.0.0", + "@nestjs/swagger": "^11.0.0", + "@nestjs/terminus": "^11.0.0", + "@nestjs/throttler": "^6.3.0", + "@nestjs/websockets": "^11.0.0", + "@opentelemetry/auto-instrumentations-node": "^0.54.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.57.0", + "@opentelemetry/resources": "^1.30.0", + "@opentelemetry/sdk-node": "^0.57.0", + "@opentelemetry/sdk-trace-base": "^1.30.0", + "@opentelemetry/semantic-conventions": "^1.28.0", + "@paralleldrive/cuid2": "^2.2.2", + "@prisma/client": "^6.16.1", + "@temporalio/client": "^1.11.7", + "@willsoto/nestjs-prometheus": "^6.0.1", + "ajv": "8.17.1", + "ajv-formats": "3.0.1", + "axios": "^1.7.9", + "class-transformer": "^0.5.1", + "class-validator": "^0.14.2", + "cockatiel": "^3.2.1", + "minimatch": "^10.0.1", + "prom-client": "^15.1.3", + "reflect-metadata": "^0.2.2", + "rxjs": "^7.8.1", + "socket.io": "^4.7.4", + "swagger-ui-express": "^5.0.1", + "zod": "^3.24.1" + }, + "devDependencies": { + "@nestjs/cli": "^11.0.0", + "@nestjs/schematics": "^11.0.0", + "@nestjs/testing": "^11.0.1", + "@types/express": "^5.0.0", + "@types/jest": "^29.5.14", + "@types/node": "^22.10.7", + "eslint": "^9.18.0", + "jest": "^29.7.0", + "prettier": "^3.4.2", + "prisma": "^6.16.1", + "rimraf": "^6.0.1", + "source-map-support": "^0.5.21", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.3" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/@angular-devkit/core": { + "version": "19.2.19", + "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-19.2.19.tgz", + "integrity": "sha512-JbLL+4IMLMBgjLZlnPG4lYDfz4zGrJ/s6Aoon321NJKuw1Kb1k5KpFu9dUY0BqLIe8xPQ2UJBpI+xXdK5MXMHQ==", + "dev": true, + "dependencies": { + "ajv": "8.17.1", + "ajv-formats": "3.0.1", + "jsonc-parser": "3.3.1", + "picomatch": "4.0.2", + "rxjs": "7.8.1", + "source-map": "0.7.4" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + }, + "peerDependencies": { + "chokidar": "^4.0.0" + }, + "peerDependenciesMeta": { + "chokidar": { + "optional": true + } + } + }, + "node_modules/@angular-devkit/core/node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/@angular-devkit/schematics": { + "version": "19.2.19", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-19.2.19.tgz", + "integrity": "sha512-J4Jarr0SohdrHcb40gTL4wGPCQ952IMWF1G/MSAQfBAPvA9ZKApYhpxcY7PmehVePve+ujpus1dGsJ7dPxz8Kg==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.19", + "jsonc-parser": "3.3.1", + "magic-string": "0.30.17", + "ora": "5.4.1", + "rxjs": "7.8.1" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + } + }, + "node_modules/@angular-devkit/schematics-cli": { + "version": "19.2.19", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics-cli/-/schematics-cli-19.2.19.tgz", + "integrity": "sha512-7q9UY6HK6sccL9F3cqGRUwKhM7b/XfD2YcVaZ2WD7VMaRlRm85v6mRjSrfKIAwxcQU0UK27kMc79NIIqaHjzxA==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.19", + "@angular-devkit/schematics": "19.2.19", + "@inquirer/prompts": "7.3.2", + "ansi-colors": "4.1.3", + "symbol-observable": "4.0.0", + "yargs-parser": "21.1.1" + }, + "bin": { + "schematics": "bin/schematics.js" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + } + }, + "node_modules/@angular-devkit/schematics-cli/node_modules/@inquirer/prompts": { + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/prompts/-/prompts-7.3.2.tgz", + "integrity": "sha512-G1ytyOoHh5BphmEBxSwALin3n1KGNYB6yImbICcRQdzXfOGbuJ9Jske/Of5Sebk339NSGGNfUshnzK8YWkTPsQ==", + "dev": true, + "dependencies": { + "@inquirer/checkbox": "^4.1.2", + "@inquirer/confirm": "^5.1.6", + "@inquirer/editor": "^4.2.7", + "@inquirer/expand": "^4.0.9", + "@inquirer/input": "^4.1.6", + "@inquirer/number": "^3.0.9", + "@inquirer/password": "^4.0.9", + "@inquirer/rawlist": "^4.0.9", + "@inquirer/search": "^3.0.9", + "@inquirer/select": "^4.0.9" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@angular-devkit/schematics/node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz", + "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz", + "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz", + "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz", + "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + "dev": true, + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.4" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@borewit/text-codec": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.1.1.tgz", + "integrity": "sha512-5L/uBxmjaCIX5h8Z+uu+kA9BQLkc/Wl06UGR5ajNRxu+/XjonB5i8JpgFMrPj3LXTCPA0pv8yxUvbUi+QthGGA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "dev": true, + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.3.tgz", + "integrity": "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", + "integrity": "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@grpc/grpc-js": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.14.3.tgz", + "integrity": "sha512-Iq8QQQ/7X3Sac15oB6p0FmUg/klxQvXLeileoqrTRGJYLV+/9tubbr9ipz0GKHjmXVsgFPo/+W+2cA8eNcR+XA==", + "dependencies": { + "@grpc/proto-loader": "^0.8.0", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/@grpc/proto-loader": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.8.0.tgz", + "integrity": "sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.5.3", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@inquirer/ansi": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@inquirer/ansi/-/ansi-1.0.2.tgz", + "integrity": "sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==", + "dev": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/checkbox": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-4.3.2.tgz", + "integrity": "sha512-VXukHf0RR1doGe6Sm4F0Em7SWYLTHSsbGfJdS9Ja2bX5/D5uwVOEjr07cncLROdBvmnvCATYEWlHqYmXv2IlQA==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/core": "^10.3.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/confirm": { + "version": "5.1.21", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.21.tgz", + "integrity": "sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core": { + "version": "10.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.3.2.tgz", + "integrity": "sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "cli-width": "^4.1.0", + "mute-stream": "^2.0.0", + "signal-exit": "^4.1.0", + "wrap-ansi": "^6.2.0", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/editor": { + "version": "4.2.23", + "resolved": "https://registry.npmjs.org/@inquirer/editor/-/editor-4.2.23.tgz", + "integrity": "sha512-aLSROkEwirotxZ1pBaP8tugXRFCxW94gwrQLxXfrZsKkfjOYC1aRvAZuhpJOb5cu4IBTJdsCigUlf2iCOu4ZDQ==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/external-editor": "^1.0.3", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/expand": { + "version": "4.0.23", + "resolved": "https://registry.npmjs.org/@inquirer/expand/-/expand-4.0.23.tgz", + "integrity": "sha512-nRzdOyFYnpeYTTR2qFwEVmIWypzdAx/sIkCMeTNTcflFOovfqUk+HcFhQQVBftAh9gmGrpFj6QcGEqrDMDOiew==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/external-editor": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@inquirer/external-editor/-/external-editor-1.0.3.tgz", + "integrity": "sha512-RWbSrDiYmO4LbejWY7ttpxczuwQyZLBUyygsA9Nsv95hpzUWwnNTVQmAq3xuh7vNwCp07UTmE5i11XAEExx4RA==", + "dev": true, + "dependencies": { + "chardet": "^2.1.1", + "iconv-lite": "^0.7.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/figures": { + "version": "1.0.15", + "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.15.tgz", + "integrity": "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==", + "dev": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/input": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/@inquirer/input/-/input-4.3.1.tgz", + "integrity": "sha512-kN0pAM4yPrLjJ1XJBjDxyfDduXOuQHrBB8aLDMueuwUGn+vNpF7Gq7TvyVxx8u4SHlFFj4trmj+a2cbpG4Jn1g==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/number": { + "version": "3.0.23", + "resolved": "https://registry.npmjs.org/@inquirer/number/-/number-3.0.23.tgz", + "integrity": "sha512-5Smv0OK7K0KUzUfYUXDXQc9jrf8OHo4ktlEayFlelCjwMXz0299Y8OrI+lj7i4gCBY15UObk76q0QtxjzFcFcg==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/password": { + "version": "4.0.23", + "resolved": "https://registry.npmjs.org/@inquirer/password/-/password-4.0.23.tgz", + "integrity": "sha512-zREJHjhT5vJBMZX/IUbyI9zVtVfOLiTO66MrF/3GFZYZ7T4YILW5MSkEYHceSii/KtRk+4i3RE7E1CUXA2jHcA==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/prompts": { + "version": "7.10.1", + "resolved": "https://registry.npmjs.org/@inquirer/prompts/-/prompts-7.10.1.tgz", + "integrity": "sha512-Dx/y9bCQcXLI5ooQ5KyvA4FTgeo2jYj/7plWfV5Ak5wDPKQZgudKez2ixyfz7tKXzcJciTxqLeK7R9HItwiByg==", + "dev": true, + "dependencies": { + "@inquirer/checkbox": "^4.3.2", + "@inquirer/confirm": "^5.1.21", + "@inquirer/editor": "^4.2.23", + "@inquirer/expand": "^4.0.23", + "@inquirer/input": "^4.3.1", + "@inquirer/number": "^3.0.23", + "@inquirer/password": "^4.0.23", + "@inquirer/rawlist": "^4.1.11", + "@inquirer/search": "^3.2.2", + "@inquirer/select": "^4.4.2" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/rawlist": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/@inquirer/rawlist/-/rawlist-4.1.11.tgz", + "integrity": "sha512-+LLQB8XGr3I5LZN/GuAHo+GpDJegQwuPARLChlMICNdwW7OwV2izlCSCxN6cqpL0sMXmbKbFcItJgdQq5EBXTw==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/search": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/@inquirer/search/-/search-3.2.2.tgz", + "integrity": "sha512-p2bvRfENXCZdWF/U2BXvnSI9h+tuA8iNqtUKb9UWbmLYCRQxd8WkvwWvYn+3NgYaNwdUkHytJMGG4MMLucI1kA==", + "dev": true, + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/select": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@inquirer/select/-/select-4.4.2.tgz", + "integrity": "sha512-l4xMuJo55MAe+N7Qr4rX90vypFwCajSakx59qe/tMaC1aEHWLyw68wF4o0A4SLAY4E0nd+Vt+EyskeDIqu1M6w==", + "dev": true, + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/core": "^10.3.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/type": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.10.tgz", + "integrity": "sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==", + "dev": true, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.2", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz", + "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/reporters/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@jest/reporters/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.11.tgz", + "integrity": "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", + "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@jsep-plugin/assignment": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@jsep-plugin/assignment/-/assignment-1.3.0.tgz", + "integrity": "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==", + "engines": { + "node": ">= 10.16.0" + }, + "peerDependencies": { + "jsep": "^0.4.0||^1.0.0" + } + }, + "node_modules/@jsep-plugin/regex": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@jsep-plugin/regex/-/regex-1.0.4.tgz", + "integrity": "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg==", + "engines": { + "node": ">= 10.16.0" + }, + "peerDependencies": { + "jsep": "^0.4.0||^1.0.0" + } + }, + "node_modules/@kubernetes/client-node": { + "version": "0.22.3", + "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.22.3.tgz", + "integrity": "sha512-dG8uah3+HDJLpJEESshLRZlAZ4PgDeV9mZXT0u1g7oy4KMRzdZ7n5g0JEIlL6QhK51/2ztcIqURAnjfjJt6Z+g==", + "dependencies": { + "byline": "^5.0.0", + "isomorphic-ws": "^5.0.0", + "js-yaml": "^4.1.0", + "jsonpath-plus": "^10.2.0", + "request": "^2.88.0", + "rfc4648": "^1.3.0", + "stream-buffers": "^3.0.2", + "tar": "^7.0.0", + "tslib": "^2.4.1", + "ws": "^8.18.0" + }, + "optionalDependencies": { + "openid-client": "^6.1.3" + } + }, + "node_modules/@lukeed/csprng": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lukeed/csprng/-/csprng-1.1.0.tgz", + "integrity": "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@microsoft/tsdoc": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/@microsoft/tsdoc/-/tsdoc-0.16.0.tgz", + "integrity": "sha512-xgAyonlVVS+q7Vc7qLW0UrJU7rSFcETRWsqdXZtjzRU8dF+6CkozTK4V4y1LwOX7j8r/vHphjDeMeGI4tNGeGA==" + }, + "node_modules/@nestjs/cli": { + "version": "11.0.14", + "resolved": "https://registry.npmjs.org/@nestjs/cli/-/cli-11.0.14.tgz", + "integrity": "sha512-YwP03zb5VETTwelXU+AIzMVbEZKk/uxJL+z9pw0mdG9ogAtqZ6/mpmIM4nEq/NU8D0a7CBRLcMYUmWW/55pfqw==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.19", + "@angular-devkit/schematics": "19.2.19", + "@angular-devkit/schematics-cli": "19.2.19", + "@inquirer/prompts": "7.10.1", + "@nestjs/schematics": "^11.0.1", + "ansis": "4.2.0", + "chokidar": "4.0.3", + "cli-table3": "0.6.5", + "commander": "4.1.1", + "fork-ts-checker-webpack-plugin": "9.1.0", + "glob": "13.0.0", + "node-emoji": "1.11.0", + "ora": "5.4.1", + "tsconfig-paths": "4.2.0", + "tsconfig-paths-webpack-plugin": "4.2.0", + "typescript": "5.9.3", + "webpack": "5.103.0", + "webpack-node-externals": "3.0.0" + }, + "bin": { + "nest": "bin/nest.js" + }, + "engines": { + "node": ">= 20.11" + }, + "peerDependencies": { + "@swc/cli": "^0.1.62 || ^0.3.0 || ^0.4.0 || ^0.5.0 || ^0.6.0 || ^0.7.0", + "@swc/core": "^1.3.62" + }, + "peerDependenciesMeta": { + "@swc/cli": { + "optional": true + }, + "@swc/core": { + "optional": true + } + } + }, + "node_modules/@nestjs/common": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/@nestjs/common/-/common-11.1.9.tgz", + "integrity": "sha512-zDntUTReRbAThIfSp3dQZ9kKqI+LjgLp5YZN5c1bgNRDuoeLySAoZg46Bg1a+uV8TMgIRziHocglKGNzr6l+bQ==", + "dependencies": { + "file-type": "21.1.0", + "iterare": "1.2.1", + "load-esm": "1.0.3", + "tslib": "2.8.1", + "uid": "2.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "class-transformer": ">=0.4.1", + "class-validator": ">=0.13.2", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "class-transformer": { + "optional": true + }, + "class-validator": { + "optional": true + } + } + }, + "node_modules/@nestjs/config": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@nestjs/config/-/config-4.0.2.tgz", + "integrity": "sha512-McMW6EXtpc8+CwTUwFdg6h7dYcBUpH5iUILCclAsa+MbCEvC9ZKu4dCHRlJqALuhjLw97pbQu62l4+wRwGeZqA==", + "dependencies": { + "dotenv": "16.4.7", + "dotenv-expand": "12.0.1", + "lodash": "4.17.21" + }, + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "rxjs": "^7.1.0" + } + }, + "node_modules/@nestjs/core": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/@nestjs/core/-/core-11.1.9.tgz", + "integrity": "sha512-a00B0BM4X+9z+t3UxJqIZlemIwCQdYoPKrMcM+ky4z3pkqqG1eTWexjs+YXpGObnLnjtMPVKWlcZHp3adDYvUw==", + "hasInstallScript": true, + "dependencies": { + "@nuxt/opencollective": "0.4.1", + "fast-safe-stringify": "2.1.1", + "iterare": "1.2.1", + "path-to-regexp": "8.3.0", + "tslib": "2.8.1", + "uid": "2.0.2" + }, + "engines": { + "node": ">= 20" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/microservices": "^11.0.0", + "@nestjs/platform-express": "^11.0.0", + "@nestjs/websockets": "^11.0.0", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/platform-express": { + "optional": true + }, + "@nestjs/websockets": { + "optional": true + } + } + }, + "node_modules/@nestjs/event-emitter": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@nestjs/event-emitter/-/event-emitter-3.0.1.tgz", + "integrity": "sha512-0Ln/x+7xkU6AJFOcQI9tIhUMXVF7D5itiaQGOyJbXtlAfAIt8gzDdJm+Im7cFzKoWkiW5nCXCPh6GSvdQd/3Dw==", + "dependencies": { + "eventemitter2": "6.4.9" + }, + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "@nestjs/core": "^10.0.0 || ^11.0.0" + } + }, + "node_modules/@nestjs/jwt": { + "version": "11.0.2", + "resolved": "https://registry.npmjs.org/@nestjs/jwt/-/jwt-11.0.2.tgz", + "integrity": "sha512-rK8aE/3/Ma45gAWfCksAXUNbOoSOUudU0Kn3rT39htPF7wsYXtKfjALKeKKJbFrIWbLjsbqfXX5bIJNvgBugGA==", + "dependencies": { + "@types/jsonwebtoken": "9.0.10", + "jsonwebtoken": "9.0.3" + }, + "peerDependencies": { + "@nestjs/common": "^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0" + } + }, + "node_modules/@nestjs/mapped-types": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@nestjs/mapped-types/-/mapped-types-2.1.0.tgz", + "integrity": "sha512-W+n+rM69XsFdwORF11UqJahn4J3xi4g/ZEOlJNL6KoW5ygWSmBB2p0S2BZ4FQeS/NDH72e6xIcu35SfJnE8bXw==", + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "class-transformer": "^0.4.0 || ^0.5.0", + "class-validator": "^0.13.0 || ^0.14.0", + "reflect-metadata": "^0.1.12 || ^0.2.0" + }, + "peerDependenciesMeta": { + "class-transformer": { + "optional": true + }, + "class-validator": { + "optional": true + } + } + }, + "node_modules/@nestjs/platform-express": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/@nestjs/platform-express/-/platform-express-11.1.9.tgz", + "integrity": "sha512-GVd3+0lO0mJq2m1kl9hDDnVrX3Nd4oH3oDfklz0pZEVEVS0KVSp63ufHq2Lu9cyPdSBuelJr9iPm2QQ1yX+Kmw==", + "dependencies": { + "cors": "2.8.5", + "express": "5.1.0", + "multer": "2.0.2", + "path-to-regexp": "8.3.0", + "tslib": "2.8.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/core": "^11.0.0" + } + }, + "node_modules/@nestjs/platform-express/node_modules/express": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz", + "integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==", + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.0", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/@nestjs/platform-socket.io": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/@nestjs/platform-socket.io/-/platform-socket.io-11.1.9.tgz", + "integrity": "sha512-OaAW+voXo5BXbFKd9Ot3SL05tEucRMhZRdw5wdWZf/RpIl9hB6G6OHr8DDxNbUGvuQWzNnZHCDHx3EQJzjcIyA==", + "dependencies": { + "socket.io": "4.8.1", + "tslib": "2.8.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/websockets": "^11.0.0", + "rxjs": "^7.1.0" + } + }, + "node_modules/@nestjs/schedule": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@nestjs/schedule/-/schedule-6.1.0.tgz", + "integrity": "sha512-W25Ydc933Gzb1/oo7+bWzzDiOissE+h/dhIAPugA39b9MuIzBbLybuXpc1AjoQLczO3v0ldmxaffVl87W0uqoQ==", + "dependencies": { + "cron": "4.3.5" + }, + "peerDependencies": { + "@nestjs/common": "^10.0.0 || ^11.0.0", + "@nestjs/core": "^10.0.0 || ^11.0.0" + } + }, + "node_modules/@nestjs/schematics": { + "version": "11.0.9", + "resolved": "https://registry.npmjs.org/@nestjs/schematics/-/schematics-11.0.9.tgz", + "integrity": "sha512-0NfPbPlEaGwIT8/TCThxLzrlz3yzDNkfRNpbL7FiplKq3w4qXpJg0JYwqgMEJnLQZm3L/L/5XjoyfJHUO3qX9g==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.17", + "@angular-devkit/schematics": "19.2.17", + "comment-json": "4.4.1", + "jsonc-parser": "3.3.1", + "pluralize": "8.0.0" + }, + "peerDependencies": { + "typescript": ">=4.8.2" + } + }, + "node_modules/@nestjs/schematics/node_modules/@angular-devkit/core": { + "version": "19.2.17", + "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-19.2.17.tgz", + "integrity": "sha512-Ah008x2RJkd0F+NLKqIpA34/vUGwjlprRCkvddjDopAWRzYn6xCkz1Tqwuhn0nR1Dy47wTLKYD999TYl5ONOAQ==", + "dev": true, + "dependencies": { + "ajv": "8.17.1", + "ajv-formats": "3.0.1", + "jsonc-parser": "3.3.1", + "picomatch": "4.0.2", + "rxjs": "7.8.1", + "source-map": "0.7.4" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + }, + "peerDependencies": { + "chokidar": "^4.0.0" + }, + "peerDependenciesMeta": { + "chokidar": { + "optional": true + } + } + }, + "node_modules/@nestjs/schematics/node_modules/@angular-devkit/schematics": { + "version": "19.2.17", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-19.2.17.tgz", + "integrity": "sha512-ADfbaBsrG8mBF6Mfs+crKA/2ykB8AJI50Cv9tKmZfwcUcyAdmTr+vVvhsBCfvUAEokigSsgqgpYxfkJVxhJYeg==", + "dev": true, + "dependencies": { + "@angular-devkit/core": "19.2.17", + "jsonc-parser": "3.3.1", + "magic-string": "0.30.17", + "ora": "5.4.1", + "rxjs": "7.8.1" + }, + "engines": { + "node": "^18.19.1 || ^20.11.1 || >=22.0.0", + "npm": "^6.11.0 || ^7.5.6 || >=8.0.0", + "yarn": ">= 1.13.0" + } + }, + "node_modules/@nestjs/schematics/node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/@nestjs/swagger": { + "version": "11.2.3", + "resolved": "https://registry.npmjs.org/@nestjs/swagger/-/swagger-11.2.3.tgz", + "integrity": "sha512-a0xFfjeqk69uHIUpP8u0ryn4cKuHdra2Ug96L858i0N200Hxho+n3j+TlQXyOF4EstLSGjTfxI1Xb2E1lUxeNg==", + "dependencies": { + "@microsoft/tsdoc": "0.16.0", + "@nestjs/mapped-types": "2.1.0", + "js-yaml": "4.1.1", + "lodash": "4.17.21", + "path-to-regexp": "8.3.0", + "swagger-ui-dist": "5.30.2" + }, + "peerDependencies": { + "@fastify/static": "^8.0.0", + "@nestjs/common": "^11.0.1", + "@nestjs/core": "^11.0.1", + "class-transformer": "*", + "class-validator": "*", + "reflect-metadata": "^0.1.12 || ^0.2.0" + }, + "peerDependenciesMeta": { + "@fastify/static": { + "optional": true + }, + "class-transformer": { + "optional": true + }, + "class-validator": { + "optional": true + } + } + }, + "node_modules/@nestjs/terminus": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/@nestjs/terminus/-/terminus-11.0.0.tgz", + "integrity": "sha512-c55LOo9YGovmQHtFUMa/vDaxGZ2cglMTZejqgHREaApt/GArTfgYYGwhRXPLq8ZwiQQlLuYB+79e9iA8mlDSLA==", + "dependencies": { + "boxen": "5.1.2", + "check-disk-space": "3.4.0" + }, + "peerDependencies": { + "@grpc/grpc-js": "*", + "@grpc/proto-loader": "*", + "@mikro-orm/core": "*", + "@mikro-orm/nestjs": "*", + "@nestjs/axios": "^2.0.0 || ^3.0.0 || ^4.0.0", + "@nestjs/common": "^10.0.0 || ^11.0.0", + "@nestjs/core": "^10.0.0 || ^11.0.0", + "@nestjs/microservices": "^10.0.0 || ^11.0.0", + "@nestjs/mongoose": "^11.0.0", + "@nestjs/sequelize": "^10.0.0 || ^11.0.0", + "@nestjs/typeorm": "^10.0.0 || ^11.0.0", + "@prisma/client": "*", + "mongoose": "*", + "reflect-metadata": "0.1.x || 0.2.x", + "rxjs": "7.x", + "sequelize": "*", + "typeorm": "*" + }, + "peerDependenciesMeta": { + "@grpc/grpc-js": { + "optional": true + }, + "@grpc/proto-loader": { + "optional": true + }, + "@mikro-orm/core": { + "optional": true + }, + "@mikro-orm/nestjs": { + "optional": true + }, + "@nestjs/axios": { + "optional": true + }, + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/mongoose": { + "optional": true + }, + "@nestjs/sequelize": { + "optional": true + }, + "@nestjs/typeorm": { + "optional": true + }, + "@prisma/client": { + "optional": true + }, + "mongoose": { + "optional": true + }, + "sequelize": { + "optional": true + }, + "typeorm": { + "optional": true + } + } + }, + "node_modules/@nestjs/testing": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/@nestjs/testing/-/testing-11.1.9.tgz", + "integrity": "sha512-UFxerBDdb0RUNxQNj25pvkvNE7/vxKhXYWBt3QuwBFnYISzRIzhVlyIqLfoV5YI3zV0m0Nn4QAn1KM0zzwfEng==", + "dev": true, + "dependencies": { + "tslib": "2.8.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/nest" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/core": "^11.0.0", + "@nestjs/microservices": "^11.0.0", + "@nestjs/platform-express": "^11.0.0" + }, + "peerDependenciesMeta": { + "@nestjs/microservices": { + "optional": true + }, + "@nestjs/platform-express": { + "optional": true + } + } + }, + "node_modules/@nestjs/throttler": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/@nestjs/throttler/-/throttler-6.5.0.tgz", + "integrity": "sha512-9j0ZRfH0QE1qyrj9JjIRDz5gQLPqq9yVC2nHsrosDVAfI5HHw08/aUAWx9DZLSdQf4HDkmhTTEGLrRFHENvchQ==", + "peerDependencies": { + "@nestjs/common": "^7.0.0 || ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0", + "@nestjs/core": "^7.0.0 || ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0", + "reflect-metadata": "^0.1.13 || ^0.2.0" + } + }, + "node_modules/@nestjs/websockets": { + "version": "11.1.9", + "resolved": "https://registry.npmjs.org/@nestjs/websockets/-/websockets-11.1.9.tgz", + "integrity": "sha512-kkkdeTVcc3X7ZzvVqUVpOAJoh49kTRUjWNUXo5jmG+27OvZoHfs/vuSiqxidrrbIgydSqN15HUsf1wZwQUrxCQ==", + "dependencies": { + "iterare": "1.2.1", + "object-hash": "3.0.0", + "tslib": "2.8.1" + }, + "peerDependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/core": "^11.0.0", + "@nestjs/platform-socket.io": "^11.0.0", + "reflect-metadata": "^0.1.12 || ^0.2.0", + "rxjs": "^7.1.0" + }, + "peerDependenciesMeta": { + "@nestjs/platform-socket.io": { + "optional": true + } + } + }, + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@nuxt/opencollective": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@nuxt/opencollective/-/opencollective-0.4.1.tgz", + "integrity": "sha512-GXD3wy50qYbxCJ652bDrDzgMr3NFEkIS374+IgFQKkCvk9yiYcLvX2XDYr7UyQxf4wK0e+yqDYRubZ0DtOxnmQ==", + "dependencies": { + "consola": "^3.2.3" + }, + "bin": { + "opencollective": "bin/opencollective.js" + }, + "engines": { + "node": "^14.18.0 || >=16.10.0", + "npm": ">=5.10.0" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/api-logs": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.56.0.tgz", + "integrity": "sha512-Wr39+94UNNG3Ei9nv3pHd4AJ63gq5nSemMRpCd8fPwDL9rN3vK26lzxfH27mw16XzOSO+TpyQwBAMaLxaPWG0g==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node": { + "version": "0.54.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/auto-instrumentations-node/-/auto-instrumentations-node-0.54.0.tgz", + "integrity": "sha512-MJYh3hUN7FupIXGy/cOiMoTIM3lTELXFiu9dFXD6YK9AE/Uez2YfgRnHyotD9h/qJeL7uDcI5DHAGkbb/2EdOQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/instrumentation-amqplib": "^0.45.0", + "@opentelemetry/instrumentation-aws-lambda": "^0.49.0", + "@opentelemetry/instrumentation-aws-sdk": "^0.48.0", + "@opentelemetry/instrumentation-bunyan": "^0.44.0", + "@opentelemetry/instrumentation-cassandra-driver": "^0.44.0", + "@opentelemetry/instrumentation-connect": "^0.42.0", + "@opentelemetry/instrumentation-cucumber": "^0.12.0", + "@opentelemetry/instrumentation-dataloader": "^0.15.0", + "@opentelemetry/instrumentation-dns": "^0.42.0", + "@opentelemetry/instrumentation-express": "^0.46.0", + "@opentelemetry/instrumentation-fastify": "^0.43.0", + "@opentelemetry/instrumentation-fs": "^0.18.0", + "@opentelemetry/instrumentation-generic-pool": "^0.42.0", + "@opentelemetry/instrumentation-graphql": "^0.46.0", + "@opentelemetry/instrumentation-grpc": "^0.56.0", + "@opentelemetry/instrumentation-hapi": "^0.44.0", + "@opentelemetry/instrumentation-http": "^0.56.0", + "@opentelemetry/instrumentation-ioredis": "^0.46.0", + "@opentelemetry/instrumentation-kafkajs": "^0.6.0", + "@opentelemetry/instrumentation-knex": "^0.43.0", + "@opentelemetry/instrumentation-koa": "^0.46.0", + "@opentelemetry/instrumentation-lru-memoizer": "^0.43.0", + "@opentelemetry/instrumentation-memcached": "^0.42.0", + "@opentelemetry/instrumentation-mongodb": "^0.50.0", + "@opentelemetry/instrumentation-mongoose": "^0.45.0", + "@opentelemetry/instrumentation-mysql": "^0.44.0", + "@opentelemetry/instrumentation-mysql2": "^0.44.0", + "@opentelemetry/instrumentation-nestjs-core": "^0.43.0", + "@opentelemetry/instrumentation-net": "^0.42.0", + "@opentelemetry/instrumentation-pg": "^0.49.0", + "@opentelemetry/instrumentation-pino": "^0.45.0", + "@opentelemetry/instrumentation-redis": "^0.45.0", + "@opentelemetry/instrumentation-redis-4": "^0.45.0", + "@opentelemetry/instrumentation-restify": "^0.44.0", + "@opentelemetry/instrumentation-router": "^0.43.0", + "@opentelemetry/instrumentation-socket.io": "^0.45.0", + "@opentelemetry/instrumentation-tedious": "^0.17.0", + "@opentelemetry/instrumentation-undici": "^0.9.0", + "@opentelemetry/instrumentation-winston": "^0.43.0", + "@opentelemetry/resource-detector-alibaba-cloud": "^0.29.6", + "@opentelemetry/resource-detector-aws": "^1.9.0", + "@opentelemetry/resource-detector-azure": "^0.4.0", + "@opentelemetry/resource-detector-container": "^0.5.2", + "@opentelemetry/resource-detector-gcp": "^0.31.0", + "@opentelemetry/resources": "^1.24.0", + "@opentelemetry/sdk-node": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.4.1" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/context-async-hooks": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.29.0.tgz", + "integrity": "sha512-TKT91jcFXgHyIDF1lgJF3BHGIakn6x0Xp7Tq3zoS3TMPzT9IlP0xEavWP8C1zGjU9UmZP2VR1tJhW9Az1A3w8Q==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/core": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.29.0.tgz", + "integrity": "sha512-gmT7vAreXl0DTHD2rVZcw3+l2g84+5XiHIqdBUxXbExymPCvSsGOpiwMmn8nkiJur28STV31wnhIDrzWDPzjfA==", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-logs-otlp-grpc": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-grpc/-/exporter-logs-otlp-grpc-0.56.0.tgz", + "integrity": "sha512-/ef8wcphVKZ0uI7A1oqQI/gEMiBUlkeBkM9AGx6AviQFIbgPVSdNK3+bHBkyq5qMkyWgkeQCSJ0uhc5vJpf0dw==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-grpc-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0", + "@opentelemetry/sdk-logs": "0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-logs-otlp-http": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-http/-/exporter-logs-otlp-http-0.56.0.tgz", + "integrity": "sha512-gN/itg2B30pa+yAqiuIHBCf3E77sSBlyWVzb+U/MDLzEMOwfnexlMvOWULnIO1l2xR2MNLEuPCQAOrL92JHEJg==", + "dependencies": { + "@opentelemetry/api-logs": "0.56.0", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0", + "@opentelemetry/sdk-logs": "0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-logs-otlp-proto": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-proto/-/exporter-logs-otlp-proto-0.56.0.tgz", + "integrity": "sha512-MaO+eGrdksd8MpEbDDLbWegHc3w6ualZV6CENxNOm3wqob0iOx78/YL2NVIKyP/0ktTUIs7xIppUYqfY3ogFLQ==", + "dependencies": { + "@opentelemetry/api-logs": "0.56.0", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-logs": "0.56.0", + "@opentelemetry/sdk-trace-base": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-trace-otlp-grpc": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-grpc/-/exporter-trace-otlp-grpc-0.56.0.tgz", + "integrity": "sha512-9hRHue78CV2XShAt30HadBK8XEtOBiQmnkYquR1RQyf2RYIdJvhiypEZ+Jh3NGW8Qi14icTII/1oPTQlhuyQdQ==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-grpc-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-trace-otlp-http": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.56.0.tgz", + "integrity": "sha512-vqVuJvcwameA0r0cNrRzrZqPLB0otS+95g0XkZdiKOXUo81wYdY6r4kyrwz4nSChqTBEFm0lqi/H2OWGboOa6g==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-trace-otlp-proto": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-proto/-/exporter-trace-otlp-proto-0.56.0.tgz", + "integrity": "sha512-UYVtz8Kp1QZpZFg83ZrnwRIxF2wavNyi1XaIKuQNFjlYuGCh8JH4+GOuHUU4G8cIzOkWdjNR559vv0Q+MCz+1w==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/exporter-zipkin": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-zipkin/-/exporter-zipkin-1.29.0.tgz", + "integrity": "sha512-9wNUxbl/sju2AvA3UhL2kLF1nfhJ4dVJgvktc3hx80Bg/fWHvF6ik4R3woZ/5gYFqZ97dcuik0dWPQEzLPNBtg==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/otlp-exporter-base": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.56.0.tgz", + "integrity": "sha512-eURvv0fcmBE+KE1McUeRo+u0n18ZnUeSc7lDlW/dzlqFYasEbsztTK4v0Qf8C4vEY+aMTjPKUxBG0NX2Te3Pmw==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-transformer": "0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/otlp-grpc-exporter-base": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-grpc-exporter-base/-/otlp-grpc-exporter-base-0.56.0.tgz", + "integrity": "sha512-QqM4si8Ew8CW5xVk4mYbfusJzMXyk6tkYA5SI0w/5NBxmiZZaYPwQQ2cu58XUH2IMPAsi71yLJVJQaWBBCta0A==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/otlp-exporter-base": "0.56.0", + "@opentelemetry/otlp-transformer": "0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/otlp-transformer": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.56.0.tgz", + "integrity": "sha512-kVkH/W2W7EpgWWpyU5VnnjIdSD7Y7FljQYObAQSKdRcejiwMj2glypZtUdfq1LTJcv4ht0jyTrw1D3CCxssNtQ==", + "dependencies": { + "@opentelemetry/api-logs": "0.56.0", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-logs": "0.56.0", + "@opentelemetry/sdk-metrics": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0", + "protobufjs": "^7.3.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/propagator-b3": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.29.0.tgz", + "integrity": "sha512-ktsNDlqhu+/IPGEJRMj81upg2JupUp+SwW3n1ZVZTnrDiYUiMUW41vhaziA7Q6UDhbZvZ58skDpQhe2ZgNIPvg==", + "dependencies": { + "@opentelemetry/core": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/propagator-jaeger": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.29.0.tgz", + "integrity": "sha512-EXIEYmFgybnFMijVgqx1mq/diWwSQcd0JWVksytAVQEnAiaDvP45WuncEVQkFIAC0gVxa2+Xr8wL5pF5jCVKbg==", + "dependencies": { + "@opentelemetry/core": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/resources": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.29.0.tgz", + "integrity": "sha512-s7mLXuHZE7RQr1wwweGcaRp3Q4UJJ0wazeGlc/N5/XSe6UyXfsh1UQGMADYeg7YwD+cEdMtU1yJAUXdnFzYzyQ==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/sdk-logs": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.56.0.tgz", + "integrity": "sha512-OS0WPBJF++R/cSl+terUjQH5PebloidB1Jbbecgg2rnCmQbTST9xsRes23bLfDQVRvmegmHqDh884h0aRdJyLw==", + "dependencies": { + "@opentelemetry/api-logs": "0.56.0", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/resources": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.4.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/sdk-metrics": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.29.0.tgz", + "integrity": "sha512-MkVtuzDjXZaUJSuJlHn6BSXjcQlMvHcsDV7LjY4P6AJeffMa4+kIGDjzsCf6DkAh6Vqlwag5EWEam3KZOX5Drw==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/resources": "1.29.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/sdk-node": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-node/-/sdk-node-0.56.0.tgz", + "integrity": "sha512-FOY7tWboBBxqftLNHPJFmDXo9fRoPd2PlzfEvSd6058BJM9gY4pWCg8lbVlu03aBrQjcfCTAhXk/tz1Yqd/m6g==", + "dependencies": { + "@opentelemetry/api-logs": "0.56.0", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/exporter-logs-otlp-grpc": "0.56.0", + "@opentelemetry/exporter-logs-otlp-http": "0.56.0", + "@opentelemetry/exporter-logs-otlp-proto": "0.56.0", + "@opentelemetry/exporter-trace-otlp-grpc": "0.56.0", + "@opentelemetry/exporter-trace-otlp-http": "0.56.0", + "@opentelemetry/exporter-trace-otlp-proto": "0.56.0", + "@opentelemetry/exporter-zipkin": "1.29.0", + "@opentelemetry/instrumentation": "0.56.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/sdk-logs": "0.56.0", + "@opentelemetry/sdk-metrics": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0", + "@opentelemetry/sdk-trace-node": "1.29.0", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/sdk-trace-base": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.29.0.tgz", + "integrity": "sha512-hEOpAYLKXF3wGJpXOtWsxEtqBgde0SCv+w+jvr3/UusR4ll3QrENEGnSl1WDCyRrpqOQ5NCNOvZch9UFVa7MnQ==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/resources": "1.29.0", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/sdk-trace-node": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.29.0.tgz", + "integrity": "sha512-ZpGYt+VnMu6O0SRKzhuIivr7qJm3GpWnTCMuJspu4kt3QWIpIenwixo5Vvjuu3R4h2Onl/8dtqAiPIs92xd5ww==", + "dependencies": { + "@opentelemetry/context-async-hooks": "1.29.0", + "@opentelemetry/core": "1.29.0", + "@opentelemetry/propagator-b3": "1.29.0", + "@opentelemetry/propagator-jaeger": "1.29.0", + "@opentelemetry/sdk-trace-base": "1.29.0", + "semver": "^7.5.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/auto-instrumentations-node/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/context-async-hooks": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.30.1.tgz", + "integrity": "sha512-s5vvxXPVdjqS3kTLKMeBMvop9hbWkwzBpu+mUO2M7sZtlkyDJGwFe33wRKnbaYDo8ExRVBIIdwIGrqpxHuKttA==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.30.1.tgz", + "integrity": "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-grpc": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-grpc/-/exporter-logs-otlp-grpc-0.57.2.tgz", + "integrity": "sha512-eovEy10n3umjKJl2Ey6TLzikPE+W4cUQ4gCwgGP1RqzTGtgDra0WjIqdy29ohiUKfvmbiL3MndZww58xfIvyFw==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-grpc-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/sdk-logs": "0.57.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-http": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-http/-/exporter-logs-otlp-http-0.57.2.tgz", + "integrity": "sha512-0rygmvLcehBRp56NQVLSleJ5ITTduq/QfU7obOkyWgPpFHulwpw2LYTqNIz5TczKZuy5YY+5D3SDnXZL1tXImg==", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/sdk-logs": "0.57.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-http/node_modules/@opentelemetry/api-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.57.2.tgz", + "integrity": "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-proto": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-proto/-/exporter-logs-otlp-proto-0.57.2.tgz", + "integrity": "sha512-ta0ithCin0F8lu9eOf4lEz9YAScecezCHkMMyDkvd9S7AnZNX5ikUmC5EQOQADU+oCcgo/qkQIaKcZvQ0TYKDw==", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-logs": "0.57.2", + "@opentelemetry/sdk-trace-base": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/api-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.57.2.tgz", + "integrity": "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-grpc/-/exporter-metrics-otlp-grpc-0.57.2.tgz", + "integrity": "sha512-r70B8yKR41F0EC443b5CGB4rUaOMm99I5N75QQt6sHKxYDzSEc6gm48Diz1CI1biwa5tDPznpylTrywO/pT7qw==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/exporter-metrics-otlp-http": "0.57.2", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-grpc-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-metrics": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-http": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.57.2.tgz", + "integrity": "sha512-ttb9+4iKw04IMubjm3t0EZsYRNWr3kg44uUuzfo9CaccYlOh8cDooe4QObDUkvx9d5qQUrbEckhrWKfJnKhemA==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-metrics": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-proto": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-proto/-/exporter-metrics-otlp-proto-0.57.2.tgz", + "integrity": "sha512-HX068Q2eNs38uf7RIkNN9Hl4Ynl+3lP0++KELkXMCpsCbFO03+0XNNZ1SkwxPlP9jrhQahsMPMkzNXpq3fKsnw==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/exporter-metrics-otlp-http": "0.57.2", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-metrics": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-prometheus": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-prometheus/-/exporter-prometheus-0.57.2.tgz", + "integrity": "sha512-VqIqXnuxWMWE/1NatAGtB1PvsQipwxDcdG4RwA/umdBcW3/iOHp0uejvFHTRN2O78ZPged87ErJajyUBPUhlDQ==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-metrics": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-grpc": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-grpc/-/exporter-trace-otlp-grpc-0.57.2.tgz", + "integrity": "sha512-gHU1vA3JnHbNxEXg5iysqCWxN9j83d7/epTYBZflqQnTyCC4N7yZXn/dMM+bEmyhQPGjhCkNZLx4vZuChH1PYw==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-grpc-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-http": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.57.2.tgz", + "integrity": "sha512-sB/gkSYFu+0w2dVQ0PWY9fAMl172PKMZ/JrHkkW8dmjCL0CYkmXeE+ssqIL/yBUTPOvpLIpenX5T9RwXRBW/3g==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-proto": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-proto/-/exporter-trace-otlp-proto-0.57.2.tgz", + "integrity": "sha512-awDdNRMIwDvUtoRYxRhja5QYH6+McBLtoz1q9BeEsskhZcrGmH/V1fWpGx8n+Rc+542e8pJA6y+aullbIzQmlw==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-zipkin": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-zipkin/-/exporter-zipkin-1.30.1.tgz", + "integrity": "sha512-6S2QIMJahIquvFaaxmcwpvQQRD/YFaMTNoIxrfPIPOeITN+a8lfEcPDxNxn8JDAaxkg+4EnXhz8upVDYenoQjA==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/exporter-zipkin/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/instrumentation": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.56.0.tgz", + "integrity": "sha512-2KkGBKE+FPXU1F0zKww+stnlUxUTlBvLCiWdP63Z9sqXYeNI/ziNzsxAp4LAdUcTQmXjw1IWgvm5CAb/BHy99w==", + "dependencies": { + "@opentelemetry/api-logs": "0.56.0", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "semver": "^7.5.2", + "shimmer": "^1.2.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-amqplib": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-amqplib/-/instrumentation-amqplib-0.45.0.tgz", + "integrity": "sha512-SlKLsOS65NGMIBG1Lh/hLrMDU9WzTUF25apnV6ZmWZB1bBmUwan7qrwwrTu1cL5LzJWCXOdZPuTaxP7pC9qxnQ==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-aws-lambda": { + "version": "0.49.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-aws-lambda/-/instrumentation-aws-lambda-0.49.0.tgz", + "integrity": "sha512-FIKQSzX/MSzfARqgm7lX9p/QUj7USyicioBYI5BFGuOOoLefxBlJINAcRs3EvCh1taEnJ7/LpbrhlcF7r4Yqvg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@types/aws-lambda": "8.10.143" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-aws-sdk": { + "version": "0.48.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-aws-sdk/-/instrumentation-aws-sdk-0.48.0.tgz", + "integrity": "sha512-Bl4geb9DS5Zxr5mOsDcDTLjwrfipQ4KDl1ZT5gmoOvVuZPp308reGdtnO1QmqbvMwcgMxD2aBdWUoYgtx1WgWw==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/propagation-utils": "^0.30.14", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-bunyan": { + "version": "0.44.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-bunyan/-/instrumentation-bunyan-0.44.0.tgz", + "integrity": "sha512-9JHcfUPejOx5ULuxrH5K5qOZ9GJSTisuMSZZFVkDigZJ42pMn26Zgmb1HhuiZXd/ZcFgOeLZcwQNpBmF1whftg==", + "dependencies": { + "@opentelemetry/api-logs": "^0.56.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@types/bunyan": "1.8.9" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-cassandra-driver": { + "version": "0.44.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-cassandra-driver/-/instrumentation-cassandra-driver-0.44.0.tgz", + "integrity": "sha512-HbhNoqAelB1T4QtgKJbOy7wB26R15HToLyMmYwNFICyDtfY7nhRmGRSzPt6akpwXpyCq43/P+L6XYTmqSWTK/Q==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-connect": { + "version": "0.42.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-connect/-/instrumentation-connect-0.42.0.tgz", + "integrity": "sha512-bOoYHBmbnq/jFaLHmXJ55VQ6jrH5fHDMAPjFM0d3JvR0dvIqW7anEoNC33QqYGFYUfVJ50S0d/eoyF61ALqQuA==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@types/connect": "3.4.36" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-connect/node_modules/@types/connect": { + "version": "3.4.36", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.36.tgz", + "integrity": "sha512-P63Zd/JUGq+PdrM1lv0Wv5SBYeA2+CORvbrXbngriYY0jzLUWfQMQQxOhjONEz/wlHOAxOdY7CY65rgQdTjq2w==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@opentelemetry/instrumentation-cucumber": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-cucumber/-/instrumentation-cucumber-0.12.0.tgz", + "integrity": "sha512-0sAhKYaxi5/SM+z8nbwmezNVlnJGkcZgMA7ClenVMIoH5xjow/b2gzJOWr3Ch7FPEXBcyoY/sIqfYWRwmRXWiw==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/instrumentation-dataloader": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-dataloader/-/instrumentation-dataloader-0.15.0.tgz", + "integrity": "sha512-5fP35A2jUPk4SerVcduEkpbRAIoqa2PaP5rWumn01T1uSbavXNccAr3Xvx1N6xFtZxXpLJq4FYqGFnMgDWgVng==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-dns": { + "version": "0.42.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-dns/-/instrumentation-dns-0.42.0.tgz", + "integrity": "sha512-HsKYWwMADJAcdY4UkNNbvcg9cm5Xhz5wxBPyT15z7wigatiEoCXPrbbbRDmCe+eKTc2tRxUPmg49u6MsIGcUmg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express": { + "version": "0.46.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-express/-/instrumentation-express-0.46.0.tgz", + "integrity": "sha512-BCEClDj/HPq/1xYRAlOr6z+OUnbp2eFp18DSrgyQz4IT9pkdYk8eWHnMi9oZSqlC6J5mQzkFmaW5RrKb1GLQhg==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-fastify": { + "version": "0.43.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-fastify/-/instrumentation-fastify-0.43.0.tgz", + "integrity": "sha512-Lmdsg7tYiV+K3/NKVAQfnnLNGmakUOFdB0PhoTh2aXuSyCmyNnnDvhn2MsArAPTZ68wnD5Llh5HtmiuTkf+DyQ==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-fs": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-fs/-/instrumentation-fs-0.18.0.tgz", + "integrity": "sha512-kC40y6CEMONm8/MWwoF5GHWIC7gOdF+g3sgsjfwJaUkgD6bdWV+FgG0XApqSbTQndICKzw3RonVk8i7s6mHqhA==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-generic-pool": { + "version": "0.42.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-generic-pool/-/instrumentation-generic-pool-0.42.0.tgz", + "integrity": "sha512-J4QxqiQ1imtB9ogzsOnHra0g3dmmLAx4JCeoK3o0rFes1OirljNHnO8Hsj4s1jAir8WmWvnEEQO1y8yk6j2tog==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-graphql": { + "version": "0.46.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-graphql/-/instrumentation-graphql-0.46.0.tgz", + "integrity": "sha512-tplk0YWINSECcK89PGM7IVtOYenXyoOuhOQlN0X0YrcDUfMS4tZMKkVc0vyhNWYYrexnUHwNry2YNBNugSpjlQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-grpc": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-grpc/-/instrumentation-grpc-0.56.0.tgz", + "integrity": "sha512-cmqCZqyKtyu4oLx3rQmPMeqAo69er7ULnbEBTFCW0++AAimIoAXJptrEvB5X9HYr0NP2TqF8As/vlV3IVmY5OQ==", + "dependencies": { + "@opentelemetry/instrumentation": "0.56.0", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-grpc/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/instrumentation-hapi": { + "version": "0.44.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-hapi/-/instrumentation-hapi-0.44.0.tgz", + "integrity": "sha512-4HdNIMNXWK1O6nsaQOrACo83QWEVoyNODTdVDbUqtqXiv2peDfD0RAPhSQlSGWLPw3S4d9UoOmrV7s2HYj6T2A==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http": { + "version": "0.56.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.56.0.tgz", + "integrity": "sha512-/bWHBUAq8VoATnH9iLk5w8CE9+gj+RgYSUphe7hry472n6fYl7+4PvuScoQMdmSUTprKq/gyr2kOWL6zrC7FkQ==", + "dependencies": { + "@opentelemetry/core": "1.29.0", + "@opentelemetry/instrumentation": "0.56.0", + "@opentelemetry/semantic-conventions": "1.28.0", + "forwarded-parse": "2.1.2", + "semver": "^7.5.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/core": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.29.0.tgz", + "integrity": "sha512-gmT7vAreXl0DTHD2rVZcw3+l2g84+5XiHIqdBUxXbExymPCvSsGOpiwMmn8nkiJur28STV31wnhIDrzWDPzjfA==", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/instrumentation-ioredis": { + "version": "0.46.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-ioredis/-/instrumentation-ioredis-0.46.0.tgz", + "integrity": "sha512-sOdsq8oGi29V58p1AkefHvuB3l2ymP1IbxRIX3y4lZesQWKL8fLhBmy8xYjINSQ5gHzWul2yoz7pe7boxhZcqQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/redis-common": "^0.36.2", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-kafkajs": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-kafkajs/-/instrumentation-kafkajs-0.6.0.tgz", + "integrity": "sha512-MGQrzqEUAl0tacKJUFpuNHJesyTi51oUzSVizn7FdvJplkRIdS11FukyZBZJEscofSEdk7Ycmg+kNMLi5QHUFg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-knex": { + "version": "0.43.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-knex/-/instrumentation-knex-0.43.0.tgz", + "integrity": "sha512-mOp0TRQNFFSBj5am0WF67fRO7UZMUmsF3/7HSDja9g3H4pnj+4YNvWWyZn4+q0rGrPtywminAXe0rxtgaGYIqg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-koa": { + "version": "0.46.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-koa/-/instrumentation-koa-0.46.0.tgz", + "integrity": "sha512-RcWXMQdJQANnPUaXbHY5G0Fg6gmleZ/ZtZeSsekWPaZmQq12FGk0L1UwodIgs31OlYfviAZ4yTeytoSUkgo5vQ==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-lru-memoizer": { + "version": "0.43.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-lru-memoizer/-/instrumentation-lru-memoizer-0.43.0.tgz", + "integrity": "sha512-fZc+1eJUV+tFxaB3zkbupiA8SL3vhDUq89HbDNg1asweYrEb9OlHIB+Ot14ZiHUc1qCmmWmZHbPTwa56mVVwzg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-memcached": { + "version": "0.42.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-memcached/-/instrumentation-memcached-0.42.0.tgz", + "integrity": "sha512-6peg2nImB4JNpK+kW95b12B6tRSwRpc0KCm6Ol41uDYPli800J9vWi+DGoPsmTrgZpkEfCe9Z9Ob9Z6Fth2zwg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@types/memcached": "^2.2.6" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mongodb": { + "version": "0.50.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mongodb/-/instrumentation-mongodb-0.50.0.tgz", + "integrity": "sha512-DtwJMjYFXFT5auAvv8aGrBj1h3ciA/dXQom11rxL7B1+Oy3FopSpanvwYxJ+z0qmBrQ1/iMuWELitYqU4LnlkQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mongoose": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mongoose/-/instrumentation-mongoose-0.45.0.tgz", + "integrity": "sha512-zHgNh+A01C5baI2mb5dAGyMC7DWmUpOfwpV8axtC0Hd5Uzqv+oqKgKbVDIVhOaDkPxjgVJwYF9YQZl2pw2qxIA==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mysql": { + "version": "0.44.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mysql/-/instrumentation-mysql-0.44.0.tgz", + "integrity": "sha512-al7jbXvT/uT1KV8gdNDzaWd5/WXf+mrjrsF0/NtbnqLa0UUFGgQnoK3cyborgny7I+KxWhL8h7YPTf6Zq4nKsg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@types/mysql": "2.15.26" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-mysql2": { + "version": "0.44.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-mysql2/-/instrumentation-mysql2-0.44.0.tgz", + "integrity": "sha512-e9QY4AGsjGFwmfHd6kBa4yPaQZjAq2FuxMb0BbKlXCAjG+jwqw+sr9xWdJGR60jMsTq52hx3mAlE3dUJ9BipxQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@opentelemetry/sql-common": "^0.40.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-nestjs-core": { + "version": "0.43.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-nestjs-core/-/instrumentation-nestjs-core-0.43.0.tgz", + "integrity": "sha512-NEo4RU7HTjiaXk3curqXUvCb9alRiFWxQY//+hvDXwWLlADX2vB6QEmVCeEZrKO+6I/tBrI4vNdAnbCY9ldZVg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-net": { + "version": "0.42.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-net/-/instrumentation-net-0.42.0.tgz", + "integrity": "sha512-RCX1e4aHBxpTdm3xyQWDF6dbfclRY1xXAzZnEwuFj1IO+DAqnu8oO11NRBIfH6TNRBmeBKbpiaGbmzCV9ULwIA==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-pg": { + "version": "0.49.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-pg/-/instrumentation-pg-0.49.0.tgz", + "integrity": "sha512-3alvNNjPXVdAPdY1G7nGRVINbDxRK02+KAugDiEpzw0jFQfU8IzFkSWA4jyU4/GbMxKvHD+XIOEfSjpieSodKw==", + "dependencies": { + "@opentelemetry/core": "^1.26.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "1.27.0", + "@opentelemetry/sql-common": "^0.40.1", + "@types/pg": "8.6.1", + "@types/pg-pool": "2.0.6" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-pg/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.27.0.tgz", + "integrity": "sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/instrumentation-pino": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-pino/-/instrumentation-pino-0.45.0.tgz", + "integrity": "sha512-u7XwRdMDPzB6PHRo1EJNxTmjpHPnLpssYlr5t89aWFXP6fP3M2oRKjyX8EZHTSky/6GOMy860mzmded2VVFvfg==", + "dependencies": { + "@opentelemetry/api-logs": "^0.56.0", + "@opentelemetry/core": "^1.25.0", + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-redis": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-redis/-/instrumentation-redis-0.45.0.tgz", + "integrity": "sha512-IKooJ9pUwPhL5nGEMi9QXvO6pMhwgJe6BzmZ0BMoZweKasC0Y0GekKjPw86Lhx+X1xoJCOFJhoWE9c5SnBJVcw==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/redis-common": "^0.36.2", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-redis-4": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-redis-4/-/instrumentation-redis-4-0.45.0.tgz", + "integrity": "sha512-Sjgym1xn3mdxPRH5CNZtoz+bFd3E3NlGIu7FoYr4YrQouCc9PbnmoBcmSkEdDy5LYgzNildPgsjx9l0EKNjKTQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/redis-common": "^0.36.2", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-restify": { + "version": "0.44.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-restify/-/instrumentation-restify-0.44.0.tgz", + "integrity": "sha512-JUIs6NcSkH+AtUgaUknD+1M4GQA5vOPKqwJqdaJbaEQzHo+QTDn8GY1iiSKXktL68OwRddbyQv6tu2NyCGcKSw==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-router": { + "version": "0.43.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-router/-/instrumentation-router-0.43.0.tgz", + "integrity": "sha512-IkSBWfzlpwLZSJMj3rDG21bDYqbWvW3D/HEx5yCxjUUWVbcz9tRKXjxwG1LB6ZJfnXwwVIOgbz+7XW0HyAXr9Q==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-socket.io": { + "version": "0.45.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-socket.io/-/instrumentation-socket.io-0.45.0.tgz", + "integrity": "sha512-X/CUjHqX1mZHEqXjD4AgVA5VXW1JHIauj1LDEjUDky/3RCsUTysj031x0Sq+8yBwcPyHF6k9vZ8DNw+CfxscOQ==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-tedious": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-tedious/-/instrumentation-tedious-0.17.0.tgz", + "integrity": "sha512-yRBz2409an03uVd1Q2jWMt3SqwZqRFyKoWYYX3hBAtPDazJ4w5L+1VOij71TKwgZxZZNdDBXImTQjii+VeuzLg==", + "dependencies": { + "@opentelemetry/instrumentation": "^0.56.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@types/tedious": "^4.0.14" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-undici": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-undici/-/instrumentation-undici-0.9.0.tgz", + "integrity": "sha512-lxc3cpUZ28CqbrWcUHxGW/ObDpMOYbuxF/ZOzeFZq54P9uJ2Cpa8gcrC9F716mtuiMaekwk8D6n34vg/JtkkxQ==", + "dependencies": { + "@opentelemetry/core": "^1.8.0", + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.7.0" + } + }, + "node_modules/@opentelemetry/instrumentation-winston": { + "version": "0.43.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-winston/-/instrumentation-winston-0.43.0.tgz", + "integrity": "sha512-TVvRwqjmf4+CcjsdkXc+VHiIG0Qzzim5dx8cN5wXRt4+UYIjyZpnhi/WmSjC0fJdkKb6DNjTIw7ktmB/eRj/jQ==", + "dependencies": { + "@opentelemetry/api-logs": "^0.56.0", + "@opentelemetry/instrumentation": "^0.56.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-exporter-base": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.57.2.tgz", + "integrity": "sha512-XdxEzL23Urhidyebg5E6jZoaiW5ygP/mRjxLHixogbqwDy2Faduzb5N0o/Oi+XTIJu+iyxXdVORjXax+Qgfxag==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-transformer": "0.57.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-grpc-exporter-base": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-grpc-exporter-base/-/otlp-grpc-exporter-base-0.57.2.tgz", + "integrity": "sha512-USn173KTWy0saqqRB5yU9xUZ2xdgb1Rdu5IosJnm9aV4hMTuFFRTUsQxbgc24QxpCHeoKzzCSnS/JzdV0oM2iQ==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/otlp-exporter-base": "0.57.2", + "@opentelemetry/otlp-transformer": "0.57.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.57.2.tgz", + "integrity": "sha512-48IIRj49gbQVK52jYsw70+Jv+JbahT8BqT2Th7C4H7RCM9d0gZ5sgNPoMpWldmfjvIsSgiGJtjfk9MeZvjhoig==", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-logs": "0.57.2", + "@opentelemetry/sdk-metrics": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1", + "protobufjs": "^7.3.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/api-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.57.2.tgz", + "integrity": "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/propagation-utils": { + "version": "0.30.16", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagation-utils/-/propagation-utils-0.30.16.tgz", + "integrity": "sha512-ZVQ3Z/PQ+2GQlrBfbMMMT0U7MzvYZLCPP800+ooyaBqm4hMvuQHfP028gB9/db0mwkmyEAMad9houukUVxhwcw==", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/propagator-b3": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.30.1.tgz", + "integrity": "sha512-oATwWWDIJzybAZ4pO76ATN5N6FFbOA1otibAVlS8v90B4S1wClnhRUk7K+2CHAwN1JKYuj4jh/lpCEG5BAqFuQ==", + "dependencies": { + "@opentelemetry/core": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-jaeger": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.30.1.tgz", + "integrity": "sha512-Pj/BfnYEKIOImirH76M4hDaBSx6HyZ2CXUqk+Kj02m6BB80c/yo4BdWkn/1gDFfU+YPY+bPR2U0DKBfdxCKwmg==", + "dependencies": { + "@opentelemetry/core": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/redis-common": { + "version": "0.36.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/redis-common/-/redis-common-0.36.2.tgz", + "integrity": "sha512-faYX1N0gpLhej/6nyp6bgRjzAKXn5GOEMYY7YhciSfCoITAktLUtQ36d24QEWNA1/WA1y6qQunCe0OhHRkVl9g==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/resource-detector-alibaba-cloud": { + "version": "0.29.7", + "resolved": "https://registry.npmjs.org/@opentelemetry/resource-detector-alibaba-cloud/-/resource-detector-alibaba-cloud-0.29.7.tgz", + "integrity": "sha512-PExUl/R+reSQI6Y/eNtgAsk6RHk1ElYSzOa8/FHfdc/nLmx9sqMasBEpLMkETkzDP7t27ORuXe4F9vwkV2uwwg==", + "dependencies": { + "@opentelemetry/core": "^1.26.0", + "@opentelemetry/resources": "^1.10.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/resource-detector-aws": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resource-detector-aws/-/resource-detector-aws-1.12.0.tgz", + "integrity": "sha512-Cvi7ckOqiiuWlHBdA1IjS0ufr3sltex2Uws2RK6loVp4gzIJyOijsddAI6IZ5kiO8h/LgCWe8gxPmwkTKImd+Q==", + "dependencies": { + "@opentelemetry/core": "^1.0.0", + "@opentelemetry/resources": "^1.10.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/resource-detector-azure": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resource-detector-azure/-/resource-detector-azure-0.4.0.tgz", + "integrity": "sha512-Ix3DwsbUWyLbBCZ1yqT3hJxc5wFPaJ6dvsIgJA/nmjScwscRCWQqTWXywY4+Q+tytLPnuAKZWbBhxcNvNlcn5Q==", + "dependencies": { + "@opentelemetry/core": "^1.25.1", + "@opentelemetry/resources": "^1.10.1", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/resource-detector-container": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/@opentelemetry/resource-detector-container/-/resource-detector-container-0.5.3.tgz", + "integrity": "sha512-x5DxWu+ZALBuFpxwO2viv9ktH4Y3Gk9LaYKn2U8J+aeD412iy/OcGLPbQ76Px7pQ8qaJ5rnjcevBOHYT4aA+zQ==", + "dependencies": { + "@opentelemetry/core": "^1.26.0", + "@opentelemetry/resources": "^1.10.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/resource-detector-gcp": { + "version": "0.31.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resource-detector-gcp/-/resource-detector-gcp-0.31.0.tgz", + "integrity": "sha512-KNd2Ab3hc0PsBVtWMie11AbQ7i1KXNPYlgTsyGPCHBed6KARVfPekfjWbPEbTXwart4la98abxL0sJLsfgyJSA==", + "dependencies": { + "@opentelemetry/core": "^1.0.0", + "@opentelemetry/resources": "^1.10.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "gcp-metadata": "^6.0.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/resources": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.30.1.tgz", + "integrity": "sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.57.2.tgz", + "integrity": "sha512-TXFHJ5c+BKggWbdEQ/inpgIzEmS2BGQowLE9UhsMd7YYlUfBQJ4uax0VF/B5NYigdM/75OoJGhAV3upEhK+3gg==", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.4.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/api-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.57.2.tgz", + "integrity": "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-metrics": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.30.1.tgz", + "integrity": "sha512-q9zcZ0Okl8jRgmy7eNW3Ku1XSgg3sDLa5evHZpCwjspw7E8Is4K/haRPDJrBcX3YSn/Y7gUvFnByNYEKQNbNog==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-node": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-node/-/sdk-node-0.57.2.tgz", + "integrity": "sha512-8BaeqZyN5sTuPBtAoY+UtKwXBdqyuRKmekN5bFzAO40CgbGzAxfTpiL3PBerT7rhZ7p2nBdq7FaMv/tBQgHE4A==", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/exporter-logs-otlp-grpc": "0.57.2", + "@opentelemetry/exporter-logs-otlp-http": "0.57.2", + "@opentelemetry/exporter-logs-otlp-proto": "0.57.2", + "@opentelemetry/exporter-metrics-otlp-grpc": "0.57.2", + "@opentelemetry/exporter-metrics-otlp-http": "0.57.2", + "@opentelemetry/exporter-metrics-otlp-proto": "0.57.2", + "@opentelemetry/exporter-prometheus": "0.57.2", + "@opentelemetry/exporter-trace-otlp-grpc": "0.57.2", + "@opentelemetry/exporter-trace-otlp-http": "0.57.2", + "@opentelemetry/exporter-trace-otlp-proto": "0.57.2", + "@opentelemetry/exporter-zipkin": "1.30.1", + "@opentelemetry/instrumentation": "0.57.2", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/sdk-logs": "0.57.2", + "@opentelemetry/sdk-metrics": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1", + "@opentelemetry/sdk-trace-node": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/api-logs": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.57.2.tgz", + "integrity": "sha512-uIX52NnTM0iBh84MShlpouI7UKqkZ7MrUszTmaypHBu4r7NofznSnQRfJ+uUeDtQDj6w8eFGg5KBLDAwAPz1+A==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/instrumentation": { + "version": "0.57.2", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.57.2.tgz", + "integrity": "sha512-BdBGhQBh8IjZ2oIIX6F2/Q3LKm/FDDKi6ccYKcBTeilh6SNdNKveDOLk73BkSJjQLJk6qe4Yh+hHw1UPhCDdrg==", + "dependencies": { + "@opentelemetry/api-logs": "0.57.2", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "semver": "^7.5.2", + "shimmer": "^1.2.1" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.30.1.tgz", + "integrity": "sha512-jVPgBbH1gCy2Lb7X0AVQ8XAfgg0pJ4nvl8/IiQA6nxOsPvS+0zMJaFSs2ltXe0J6C8dqjcnpyqINDJmU30+uOg==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/resources": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-base/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-trace-node": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.30.1.tgz", + "integrity": "sha512-cBjYOINt1JxXdpw1e5MlHmFRc5fgj4GW/86vsKFxJCJ8AL4PdVtYH41gWwl4qd4uQjqEL1oJVrXkSy5cnduAnQ==", + "dependencies": { + "@opentelemetry/context-async-hooks": "1.30.1", + "@opentelemetry/core": "1.30.1", + "@opentelemetry/propagator-b3": "1.30.1", + "@opentelemetry/propagator-jaeger": "1.30.1", + "@opentelemetry/sdk-trace-base": "1.30.1", + "semver": "^7.5.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.38.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.38.0.tgz", + "integrity": "sha512-kocjix+/sSggfJhwXqClZ3i9Y/MI0fp7b+g7kCRm6psy2dsf8uApTRclwG18h8Avm7C9+fnt+O36PspJ/OzoWg==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sql-common": { + "version": "0.40.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/sql-common/-/sql-common-0.40.1.tgz", + "integrity": "sha512-nSDlnHSqzC3pXn/wZEZVLuAuJ1MYMXPBwtv2qAbCa3847SaHItdE7SzUq/Jtb0KZmh1zfAbNi3AAMjztTT4Ugg==", + "dependencies": { + "@opentelemetry/core": "^1.1.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0" + } + }, + "node_modules/@paralleldrive/cuid2": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.3.1.tgz", + "integrity": "sha512-XO7cAxhnTZl0Yggq6jOgjiOHhbgcO4NqFqwSmQpjK3b6TEE6Uj/jfSk6wzYyemh3+I0sHirKSetjQwn5cZktFw==", + "dependencies": { + "@noble/hashes": "^1.1.5" + } + }, + "node_modules/@prisma/client": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/client/-/client-6.19.1.tgz", + "integrity": "sha512-4SXj4Oo6HyQkLUWT8Ke5R0PTAfVOKip5Roo+6+b2EDTkFg5be0FnBWiuRJc0BC0sRQIWGMLKW1XguhVfW/z3/A==", + "hasInstallScript": true, + "engines": { + "node": ">=18.18" + }, + "peerDependencies": { + "prisma": "*", + "typescript": ">=5.1.0" + }, + "peerDependenciesMeta": { + "prisma": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/@prisma/config": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/config/-/config-6.19.1.tgz", + "integrity": "sha512-bUL/aYkGXLwxVGhJmQMtslLT7KPEfUqmRa919fKI4wQFX4bIFUKiY8Jmio/2waAjjPYrtuDHa7EsNCnJTXxiOw==", + "devOptional": true, + "dependencies": { + "c12": "3.1.0", + "deepmerge-ts": "7.1.5", + "effect": "3.18.4", + "empathic": "2.0.0" + } + }, + "node_modules/@prisma/debug": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/debug/-/debug-6.19.1.tgz", + "integrity": "sha512-h1JImhlAd/s5nhY/e9qkAzausWldbeT+e4nZF7A4zjDYBF4BZmKDt4y0jK7EZapqOm1kW7V0e9agV/iFDy3fWw==", + "devOptional": true + }, + "node_modules/@prisma/engines": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/engines/-/engines-6.19.1.tgz", + "integrity": "sha512-xy95dNJ7DiPf9IJ3oaVfX785nbFl7oNDzclUF+DIiJw6WdWCvPl0LPU0YqQLsrwv8N64uOQkH391ujo3wSo+Nw==", + "devOptional": true, + "hasInstallScript": true, + "dependencies": { + "@prisma/debug": "6.19.1", + "@prisma/engines-version": "7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7", + "@prisma/fetch-engine": "6.19.1", + "@prisma/get-platform": "6.19.1" + } + }, + "node_modules/@prisma/engines-version": { + "version": "7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7", + "resolved": "https://registry.npmjs.org/@prisma/engines-version/-/engines-version-7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7.tgz", + "integrity": "sha512-03bgb1VD5gvuumNf+7fVGBzfpJPjmqV423l/WxsWk2cNQ42JD0/SsFBPhN6z8iAvdHs07/7ei77SKu7aZfq8bA==", + "devOptional": true + }, + "node_modules/@prisma/fetch-engine": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/fetch-engine/-/fetch-engine-6.19.1.tgz", + "integrity": "sha512-mmgcotdaq4VtAHO6keov3db+hqlBzQS6X7tR7dFCbvXjLVTxBYdSJFRWz+dq7F9p6dvWyy1X0v8BlfRixyQK6g==", + "devOptional": true, + "dependencies": { + "@prisma/debug": "6.19.1", + "@prisma/engines-version": "7.1.1-3.c2990dca591cba766e3b7ef5d9e8a84796e47ab7", + "@prisma/get-platform": "6.19.1" + } + }, + "node_modules/@prisma/get-platform": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/@prisma/get-platform/-/get-platform-6.19.1.tgz", + "integrity": "sha512-zsg44QUiQAnFUyh6Fbt7c9HjMXHwFTqtrgcX7DAZmRgnkPyYT7Sh8Mn8D5PuuDYNtMOYcpLGg576MLfIORsBYw==", + "devOptional": true, + "dependencies": { + "@prisma/debug": "6.19.1" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, + "node_modules/@scarf/scarf": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@scarf/scarf/-/scarf-1.4.0.tgz", + "integrity": "sha512-xxeapPiUXdZAE3che6f3xogoJPeZgig6omHEy1rIY5WVsB3H2BHNnZH+gHG6x91SCWyQCzWGsuL2Hh3ClO5/qQ==", + "hasInstallScript": true + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz", + "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@socket.io/component-emitter": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==" + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "devOptional": true + }, + "node_modules/@temporalio/client": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/client/-/client-1.14.0.tgz", + "integrity": "sha512-kjzJ+7M2kHj32cTTSQT5WOjEIOxY0TNV5g6Sw9PzWmKWdtIZig+d7qUIA3VjDe/TieNozxjR2wNAX5sKzYFANA==", + "dependencies": { + "@grpc/grpc-js": "^1.12.4", + "@temporalio/common": "1.14.0", + "@temporalio/proto": "1.14.0", + "abort-controller": "^3.0.0", + "long": "^5.2.3", + "uuid": "^11.1.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/client/node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/@temporalio/common": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/common/-/common-1.14.0.tgz", + "integrity": "sha512-jVmurBdFHdqw/wIehzVJikS8MhavL630p88TJ64P5PH0nP8S5V8R5vhkmHZ7n0sMRO+A0QFyWYyvnccu6MQZvw==", + "dependencies": { + "@temporalio/proto": "1.14.0", + "long": "^5.2.3", + "ms": "3.0.0-canary.1", + "nexus-rpc": "^0.0.1", + "proto3-json-serializer": "^2.0.0" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@temporalio/common/node_modules/ms": { + "version": "3.0.0-canary.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-3.0.0-canary.1.tgz", + "integrity": "sha512-kh8ARjh8rMN7Du2igDRO9QJnqCb2xYTJxyQYK7vJJS4TvLLmsbyhiKpSW+t+y26gyOyMd0riphX0GeWKU3ky5g==", + "engines": { + "node": ">=12.13" + } + }, + "node_modules/@temporalio/proto": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@temporalio/proto/-/proto-1.14.0.tgz", + "integrity": "sha512-duYVjt3x6SkuFzJr+5NlklEgookPqW065qdcvogmdfVjrgiwz4W/07AN3+fL4ufmqt1//0SyF6nyqv9RNADYNA==", + "dependencies": { + "long": "^5.2.3", + "protobufjs": "^7.2.5" + }, + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/@tokenizer/inflate": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/@tokenizer/inflate/-/inflate-0.3.1.tgz", + "integrity": "sha512-4oeoZEBQdLdt5WmP/hx1KZ6D3/Oid/0cUb2nk4F0pTDAWy+KCH3/EnAkZF/bvckWo8I33EqBm01lIPgmgc8rCA==", + "dependencies": { + "debug": "^4.4.1", + "fflate": "^0.8.2", + "token-types": "^6.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/@tokenizer/token": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", + "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==" + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", + "integrity": "sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, + "node_modules/@types/aws-lambda": { + "version": "8.10.143", + "resolved": "https://registry.npmjs.org/@types/aws-lambda/-/aws-lambda-8.10.143.tgz", + "integrity": "sha512-u5vzlcR14ge/4pMTTMDQr3MF0wEe38B2F9o84uC4F43vN5DGTy63npRrB6jQhyt+C0lGv4ZfiRcRkqJoZuPnmg==" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.6", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz", + "integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==", + "dev": true, + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bunyan": { + "version": "1.8.9", + "resolved": "https://registry.npmjs.org/@types/bunyan/-/bunyan-1.8.9.tgz", + "integrity": "sha512-ZqS9JGpBxVOvsawzmVt30sP++gSQMTejCkIAQ3VdadOcRE8izTyW66hufvwLeH+YEGP6Js2AW7Gz+RMyvrEbmw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/cors": { + "version": "2.8.19", + "resolved": "https://registry.npmjs.org/@types/cors/-/cors-2.8.19.tgz", + "integrity": "sha512-mFNylyeyqN93lfe/9CSxOGREz8cpzAhH+E93xJ4xWQf62V8sQ/24reV2nyzUWM6H6Xji+GGHpkbLe7pVoUEskg==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "dev": true, + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dev": true, + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true + }, + "node_modules/@types/express": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.6.tgz", + "integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==", + "dev": true, + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^5.0.0", + "@types/serve-static": "^2" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.1.0.tgz", + "integrity": "sha512-jnHMsrd0Mwa9Cf4IdOzbz543y4XJepXrbia2T4b6+spXC2We3t1y6K44D3mR8XMFSXMCf3/l7rCgddfx7UNVBA==", + "dev": true, + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz", + "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/http-errors": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz", + "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", + "dev": true + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.14", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz", + "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==", + "dev": true, + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/jsonwebtoken": { + "version": "9.0.10", + "resolved": "https://registry.npmjs.org/@types/jsonwebtoken/-/jsonwebtoken-9.0.10.tgz", + "integrity": "sha512-asx5hIG9Qmf/1oStypjanR7iKTv0gXQ1Ov/jfrX6kS/EO0OFni8orbmGCn0672NHR3kXHwpAwR+B368ZGN/2rA==", + "dependencies": { + "@types/ms": "*", + "@types/node": "*" + } + }, + "node_modules/@types/luxon": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/@types/luxon/-/luxon-3.7.1.tgz", + "integrity": "sha512-H3iskjFIAn5SlJU7OuxUmTEpebK6TKB8rxZShDslBMZJ5u9S//KM1sbdAisiSrqwLQncVjnpi2OK2J51h+4lsg==" + }, + "node_modules/@types/memcached": { + "version": "2.2.10", + "resolved": "https://registry.npmjs.org/@types/memcached/-/memcached-2.2.10.tgz", + "integrity": "sha512-AM9smvZN55Gzs2wRrqeMHVP7KE8KWgCJO/XL5yCly2xF6EKa4YlbpK+cLSAH4NG/Ah64HrlegmGqW8kYws7Vxg==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==" + }, + "node_modules/@types/mysql": { + "version": "2.15.26", + "resolved": "https://registry.npmjs.org/@types/mysql/-/mysql-2.15.26.tgz", + "integrity": "sha512-DSLCOXhkvfS5WNNPbfn2KdICAmk8lLc+/PNvnPnF7gOdMZCxopXduqv0OQ13y/yA/zXTSikZZqVgybUxOEg6YQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "22.19.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.3.tgz", + "integrity": "sha512-1N9SBnWYOJTrNZCdh/yJE+t910Y128BoyY+zBLWhL3r0TYzlTmFdXrPwHL9DyFZmlEXNQQolTZh3KHV31QDhyA==", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/pg": { + "version": "8.6.1", + "resolved": "https://registry.npmjs.org/@types/pg/-/pg-8.6.1.tgz", + "integrity": "sha512-1Kc4oAGzAl7uqUStZCDvaLFqZrW9qWSjXOmBfdgyBP5La7Us6Mg4GBvRlSoaZMhQF/zSj1C8CtKMBkoiT8eL8w==", + "dependencies": { + "@types/node": "*", + "pg-protocol": "*", + "pg-types": "^2.2.0" + } + }, + "node_modules/@types/pg-pool": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/pg-pool/-/pg-pool-2.0.6.tgz", + "integrity": "sha512-TaAUE5rq2VQYxab5Ts7WZhKNmuN78Q6PiFonTDdpbx8a1H0M1vhy3rhiMjl+e2iHmogyMw7jZF4FrE6eJUy5HQ==", + "dependencies": { + "@types/pg": "*" + } + }, + "node_modules/@types/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==", + "dev": true + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "dev": true + }, + "node_modules/@types/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz", + "integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-2.2.0.tgz", + "integrity": "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ==", + "dev": true, + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*" + } + }, + "node_modules/@types/shimmer": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@types/shimmer/-/shimmer-1.2.0.tgz", + "integrity": "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==" + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==", + "dev": true + }, + "node_modules/@types/tedious": { + "version": "4.0.14", + "resolved": "https://registry.npmjs.org/@types/tedious/-/tedious-4.0.14.tgz", + "integrity": "sha512-KHPsfX/FoVbUGbyYvk1q9MMQHLPeRZhRJZdO45Q4YjvFkv4hMNghCWTvy7rdKessBsmtz4euWCWAB6/tVpI1Iw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/validator": { + "version": "13.15.10", + "resolved": "https://registry.npmjs.org/@types/validator/-/validator-13.15.10.tgz", + "integrity": "sha512-T8L6i7wCuyoK8A/ZeLYt1+q0ty3Zb9+qbSSvrIVitzT3YjZqkTZ40IbRsPanlB4h1QB3JVL1SYCdR6ngtFYcuA==" + }, + "node_modules/@types/yargs": { + "version": "17.0.35", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz", + "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "dev": true + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "dev": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "dev": true, + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "dev": true, + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "dev": true, + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", + "dev": true + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "dev": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@willsoto/nestjs-prometheus": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@willsoto/nestjs-prometheus/-/nestjs-prometheus-6.0.2.tgz", + "integrity": "sha512-ePyLZYdIrOOdlOWovzzMisIgviXqhPVzFpSMKNNhn6xajhRHeBsjAzSdpxZTc6pnjR9hw1lNAHyKnKl7lAPaVg==", + "peerDependencies": { + "@nestjs/common": "^7.0.0 || ^8.0.0 || ^9.0.0 || ^10.0.0 || ^11.0.0", + "prom-client": "^15.0.0" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/accepts": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", + "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", + "dependencies": { + "mime-types": "^3.0.0", + "negotiator": "^1.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "peerDependencies": { + "acorn": "^8" + } + }, + "node_modules/acorn-import-phases": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/acorn-import-phases/-/acorn-import-phases-1.0.4.tgz", + "integrity": "sha512-wKmbr/DDiIXzEOiWrTTUcDm24kQ2vGfZQvM2fwg2vXqR5uW6aapr7ObPtj1th32b9u90/Pf4AItvdTh42fBmVQ==", + "dev": true, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "acorn": "^8.14.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", + "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/ansis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/ansis/-/ansis-4.2.0.tgz", + "integrity": "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==", + "dev": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/anymatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/append-field": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/append-field/-/append-field-1.0.0.tgz", + "integrity": "sha512-klpgFSWLW1ZEs8svjfb7g4qWY0YS5imI82dTg+QahUvJ8YqAY0P10Uk8tTyh9ZGuYEZEMaeJYCF5BFuX552hsw==" + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/array-timsort": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/array-timsort/-/array-timsort-1.0.3.tgz", + "integrity": "sha512-/+3GRL7dDAGEfM6TseQk/U+mi18TU2Ms9I3UlLdUMhz2hbvGNTKdj9xniwXfUqgYhHxRx0+8UnKkvlNwVU+cWQ==", + "dev": true + }, + "node_modules/asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", + "engines": { + "node": "*" + } + }, + "node_modules/aws4": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz", + "integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==" + }, + "node_modules/axios": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", + "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz", + "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5" + }, + "peerDependencies": { + "@babel/core": "^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/base64id": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/base64id/-/base64id-2.0.0.tgz", + "integrity": "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog==", + "engines": { + "node": "^4.5.0 || >= 5.9" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.7", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.7.tgz", + "integrity": "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg==", + "dev": true, + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, + "node_modules/bignumber.js": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "engines": { + "node": "*" + } + }, + "node_modules/bintrees": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bintrees/-/bintrees-1.0.2.tgz", + "integrity": "sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw==" + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/body-parser": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.1.tgz", + "integrity": "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw==", + "dependencies": { + "bytes": "^3.1.2", + "content-type": "^1.0.5", + "debug": "^4.4.3", + "http-errors": "^2.0.0", + "iconv-lite": "^0.7.0", + "on-finished": "^2.4.1", + "qs": "^6.14.0", + "raw-body": "^3.0.1", + "type-is": "^2.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/boxen": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz", + "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==", + "dependencies": { + "ansi-align": "^3.0.0", + "camelcase": "^6.2.0", + "chalk": "^4.1.0", + "cli-boxes": "^2.2.1", + "string-width": "^4.2.2", + "type-fest": "^0.20.2", + "widest-line": "^3.1.0", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boxen/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/byline": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", + "integrity": "sha512-s6webAy+R4SR8XVuJWt2V2rGvhnrhxN+9S15GNuTK3wKPOXFF6RNc+8ug2XhH+2s4f+uudG4kUVYmYOQWL2g0Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/c12": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/c12/-/c12-3.1.0.tgz", + "integrity": "sha512-uWoS8OU1MEIsOv8p/5a82c3H31LsWVR5qiyXVfBNOzfffjUWtPnhAb4BYI2uG2HfGmZmFjCtui5XNWaps+iFuw==", + "devOptional": true, + "dependencies": { + "chokidar": "^4.0.3", + "confbox": "^0.2.2", + "defu": "^6.1.4", + "dotenv": "^16.6.1", + "exsolve": "^1.0.7", + "giget": "^2.0.0", + "jiti": "^2.4.2", + "ohash": "^2.0.11", + "pathe": "^2.0.3", + "perfect-debounce": "^1.0.0", + "pkg-types": "^2.2.0", + "rc9": "^2.1.2" + }, + "peerDependencies": { + "magicast": "^0.3.5" + }, + "peerDependenciesMeta": { + "magicast": { + "optional": true + } + } + }, + "node_modules/c12/node_modules/dotenv": { + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", + "devOptional": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001760", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001760.tgz", + "integrity": "sha512-7AAMPcueWELt1p3mi13HR/LHH0TJLT11cnwDJEs3xA4+CK/PLKeO9Kl1oru24htkyUKtkGCvAx4ohB0Ttry8Dw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/chardet": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-2.1.1.tgz", + "integrity": "sha512-PsezH1rqdV9VvyNhxxOW32/d75r01NY7TQCmOqomRo15ZSOKbpTFVsfjghxo6JloQUCGnH4k1LGu0R4yCLlWQQ==", + "dev": true + }, + "node_modules/check-disk-space": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/check-disk-space/-/check-disk-space-3.4.0.tgz", + "integrity": "sha512-drVkSqfwA+TvuEhFipiR1OC9boEGZL5RrWvVsOthdcvQNXyCCuKkEiTOTXZ7qxSf/GLwq4GvzfrQD/Wz325hgw==", + "engines": { + "node": ">=16" + } + }, + "node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "devOptional": true, + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "engines": { + "node": ">=18" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "dev": true, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/citty": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/citty/-/citty-0.1.6.tgz", + "integrity": "sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==", + "devOptional": true, + "dependencies": { + "consola": "^3.2.3" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==" + }, + "node_modules/class-transformer": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", + "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" + }, + "node_modules/class-validator": { + "version": "0.14.3", + "resolved": "https://registry.npmjs.org/class-validator/-/class-validator-0.14.3.tgz", + "integrity": "sha512-rXXekcjofVN1LTOSw+u4u9WXVEUvNBVjORW154q/IdmYWy1nMbOU9aNtZB0t8m+FJQ9q91jlr2f9CwwUFdFMRA==", + "dependencies": { + "@types/validator": "^13.15.3", + "libphonenumber-js": "^1.11.1", + "validator": "^13.15.20" + } + }, + "node_modules/cli-boxes": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", + "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "dev": true, + "engines": { + "node": ">= 12" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/cockatiel": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/cockatiel/-/cockatiel-3.2.1.tgz", + "integrity": "sha512-gfrHV6ZPkquExvMh9IOkKsBzNDk6sDuZ6DdBGUBkvFnTCqCxzpuq48RySgP0AnaqQkw2zynOFj9yly6T1Q2G5Q==", + "engines": { + "node": ">=16" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz", + "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==", + "dev": true + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/comment-json": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/comment-json/-/comment-json-4.4.1.tgz", + "integrity": "sha512-r1To31BQD5060QdkC+Iheai7gHwoSZobzunqkf2/kQ6xIAfJyrKNAFUwdKvkK7Qgu7pVTKQEa7ok7Ed3ycAJgg==", + "dev": true, + "dependencies": { + "array-timsort": "^1.0.3", + "core-util-is": "^1.0.3", + "esprima": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/concat-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz", + "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==", + "engines": [ + "node >= 6.0" + ], + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/confbox": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.2.2.tgz", + "integrity": "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==", + "devOptional": true + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/content-disposition": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.1.tgz", + "integrity": "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz", + "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", + "engines": { + "node": ">=6.6.0" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true + }, + "node_modules/cors": { + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", + "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dev": true, + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, + "node_modules/cron": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/cron/-/cron-4.3.5.tgz", + "integrity": "sha512-hKPP7fq1+OfyCqoePkKfVq7tNAdFwiQORr4lZUHwrf0tebC65fYEeWgOrXOL6prn1/fegGOdTfrM6e34PJfksg==", + "dependencies": { + "@types/luxon": "~3.7.0", + "luxon": "~3.7.0" + }, + "engines": { + "node": ">=18.x" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz", + "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==", + "dev": true, + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deepmerge-ts": { + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/deepmerge-ts/-/deepmerge-ts-7.1.5.tgz", + "integrity": "sha512-HOJkrhaYsweh+W+e74Yn7YStZOilkoPb6fycpwNLKzSPtruFs48nYis0zy5yJz1+ktUhHxoRDJ27RQAWLIJVJw==", + "devOptional": true, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "devOptional": true + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/destr/-/destr-2.0.5.tgz", + "integrity": "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==", + "devOptional": true + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dotenv-expand": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-12.0.1.tgz", + "integrity": "sha512-LaKRbou8gt0RNID/9RoI+J2rvXsBRPMV7p+ElHlPhcSARbCPDYcYG2s1TIzAfWv4YSgyY5taidWzzs31lNV3yQ==", + "dependencies": { + "dotenv": "^16.4.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/effect": { + "version": "3.18.4", + "resolved": "https://registry.npmjs.org/effect/-/effect-3.18.4.tgz", + "integrity": "sha512-b1LXQJLe9D11wfnOKAk3PKxuqYshQ0Heez+y5pnkd3jLj1yx9QhM72zZ9uUrOQyNvrs2GZZd/3maL0ZV18YuDA==", + "devOptional": true, + "dependencies": { + "@standard-schema/spec": "^1.0.0", + "fast-check": "^3.23.1" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.267", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", + "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", + "dev": true + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/empathic": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/empathic/-/empathic-2.0.0.tgz", + "integrity": "sha512-i6UzDscO/XfAcNYD75CfICkmfLedpyPDdozrLMmQc5ORaQcdMoc21OnlEylMIqI7U8eniKrPMxxtj8k0vhmJhA==", + "devOptional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/engine.io": { + "version": "6.6.4", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-6.6.4.tgz", + "integrity": "sha512-ZCkIjSYNDyGn0R6ewHDtXgns/Zre/NT6Agvq1/WobF7JXgFff4SeDroKiCO3fNJreU9YG429Sc81o4w5ok/W5g==", + "dependencies": { + "@types/cors": "^2.8.12", + "@types/node": ">=10.0.0", + "accepts": "~1.3.4", + "base64id": "2.0.0", + "cookie": "~0.7.2", + "cors": "~2.8.5", + "debug": "~4.3.1", + "engine.io-parser": "~5.2.1", + "ws": "~8.17.1" + }, + "engines": { + "node": ">=10.2.0" + } + }, + "node_modules/engine.io-parser": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-5.2.3.tgz", + "integrity": "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/engine.io/node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/engine.io/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/engine.io/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/engine.io/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/engine.io/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/engine.io/node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.4", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.4.tgz", + "integrity": "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==", + "dev": true, + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.2", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.2.tgz", + "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/eventemitter2": { + "version": "6.4.9", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.9.tgz", + "integrity": "sha512-JEPTiaOt9f04oa6NOkc4aH+nVp5I3wEjpHbIPqfgCdD5v5bUzy7xQqwcVO2aDQgOWhI28da57HksMrzK9HlRxg==" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "dev": true, + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/execa/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/express": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", + "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + "peer": true, + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.1", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "depd": "^2.0.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/exsolve": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/exsolve/-/exsolve-1.0.8.tgz", + "integrity": "sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==", + "devOptional": true + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", + "engines": [ + "node >=0.6.0" + ] + }, + "node_modules/fast-check": { + "version": "3.23.2", + "resolved": "https://registry.npmjs.org/fast-check/-/fast-check-3.23.2.tgz", + "integrity": "sha512-h5+1OzzfCC3Ef7VbtKdcv7zsstUQwUDlYpUTvjeUsJAssPgLn7QzbboPtL5ro04Mq0rPOsMzl7q5hIbRs2wD1A==", + "devOptional": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "dependencies": { + "pure-rand": "^6.1.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==" + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ] + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fflate": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", + "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==" + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/file-type": { + "version": "21.1.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-21.1.0.tgz", + "integrity": "sha512-boU4EHmP3JXkwDo4uhyBhTt5pPstxB6eEXKJBu2yu2l7aAMMm7QQYQEzssJmKReZYrFdFOJS8koVo6bXIBGDqA==", + "dependencies": { + "@tokenizer/inflate": "^0.3.1", + "strtok3": "^10.3.1", + "token-types": "^6.0.0", + "uint8array-extras": "^1.4.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz", + "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", + "dependencies": { + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "on-finished": "^2.4.1", + "parseurl": "^1.3.3", + "statuses": "^2.0.1" + }, + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", + "engines": { + "node": "*" + } + }, + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-9.1.0.tgz", + "integrity": "sha512-mpafl89VFPJmhnJ1ssH+8wmM2b50n+Rew5x42NeI2U78aRWgtkEtGmctp7iT16UjquJTjorEmIfESj3DxdW84Q==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.16.7", + "chalk": "^4.1.2", + "chokidar": "^4.0.1", + "cosmiconfig": "^8.2.0", + "deepmerge": "^4.2.2", + "fs-extra": "^10.0.0", + "memfs": "^3.4.1", + "minimatch": "^3.0.4", + "node-abort-controller": "^3.0.1", + "schema-utils": "^3.1.1", + "semver": "^7.3.5", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "typescript": ">3.6.0", + "webpack": "^5.11.0" + } + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/form-data/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/form-data/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/forwarded-parse": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/forwarded-parse/-/forwarded-parse-2.1.2.tgz", + "integrity": "sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw==" + }, + "node_modules/fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", + "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/fs-monkey": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.1.0.tgz", + "integrity": "sha512-QMUezzXWII9EV5aTFXW1UBVUO77wYPpjqIF8/AviUCThNeSYZykpoTixUeaNNBwmCev0AMDWMAni+f8Hxb1IFw==", + "dev": true + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gaxios": { + "version": "6.7.1", + "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", + "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", + "dependencies": { + "extend": "^3.0.2", + "https-proxy-agent": "^7.0.1", + "is-stream": "^2.0.0", + "node-fetch": "^2.6.9", + "uuid": "^9.0.1" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gaxios/node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/gcp-metadata": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz", + "integrity": "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==", + "dependencies": { + "gaxios": "^6.1.1", + "google-logging-utils": "^0.0.2", + "json-bigint": "^1.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "dependencies": { + "assert-plus": "^1.0.0" + } + }, + "node_modules/giget": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/giget/-/giget-2.0.0.tgz", + "integrity": "sha512-L5bGsVkxJbJgdnwyuheIunkGatUF/zssUoxxjACCseZYAVbaqdh9Tsmmlkl8vYan09H7sbvKt4pS8GqKLBrEzA==", + "devOptional": true, + "dependencies": { + "citty": "^0.1.6", + "consola": "^3.4.0", + "defu": "^6.1.4", + "node-fetch-native": "^1.6.6", + "nypm": "^0.6.0", + "pathe": "^2.0.3" + }, + "bin": { + "giget": "dist/cli.mjs" + } + }, + "node_modules/glob": { + "version": "13.0.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.0.tgz", + "integrity": "sha512-tvZgpqk6fz4BaNZ66ZsRaZnbHvP/jG3uKJvAZOwEVUL4RTA5nJeeLYfyN9/VA8NX/V3IBG+hkeuGpKjvELkVhA==", + "dev": true, + "dependencies": { + "minimatch": "^10.1.1", + "minipass": "^7.1.2", + "path-scurry": "^2.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "dev": true + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/google-logging-utils": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-0.0.2.tgz", + "integrity": "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==", + "engines": { + "node": ">=14" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/handlebars/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==", + "engines": { + "node": ">=4" + } + }, + "node_modules/har-validator": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", + "deprecated": "this library is no longer supported", + "dependencies": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/har-validator/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/har-validator/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + }, + "engines": { + "node": ">=0.8", + "npm": ">=1.3.7" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz", + "integrity": "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-in-the-middle": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.15.0.tgz", + "integrity": "sha512-bpQy+CrsRmYmoPMAE/0G33iwRqwW4ouqdRg8jgbH3aKuCtOc8lxgmYXg2dMM92CRiGP660EtBcymH/eVUpCSaA==", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-promise": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", + "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==" + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/isomorphic-ws": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-5.0.0.tgz", + "integrity": "sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw==", + "peerDependencies": { + "ws": "*" + } + }, + "node_modules/isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz", + "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==", + "dev": true, + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/iterare": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/iterare/-/iterare-1.2.1.tgz", + "integrity": "sha512-RKYVTCjAnRthyJes037NX/IiqeidgN1xc3j1RjFfECFp28A1GVwK9nA+i0rJPaHqSZwygLzRnFlzUuHFoWWy+Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-config/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-config/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-runner/node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/jest-runtime/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "devOptional": true, + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/jose": { + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.3.tgz", + "integrity": "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==", + "optional": true, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" + }, + "node_modules/jsep": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/jsep/-/jsep-1.4.0.tgz", + "integrity": "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==", + "engines": { + "node": ">= 10.16.0" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-bigint": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", + "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", + "dependencies": { + "bignumber.js": "^9.0.0" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonc-parser": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.3.1.tgz", + "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==", + "dev": true + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonpath-plus": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-10.3.0.tgz", + "integrity": "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA==", + "dependencies": { + "@jsep-plugin/assignment": "^1.3.0", + "@jsep-plugin/regex": "^1.0.4", + "jsep": "^1.4.0" + }, + "bin": { + "jsonpath": "bin/jsonpath-cli.js", + "jsonpath-plus": "bin/jsonpath-cli.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/jsonwebtoken": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.3.tgz", + "integrity": "sha512-MT/xP0CrubFRNLNKvxJ2BYfy53Zkm++5bX9dtuPbqAeQpTVe0MQTFhao8+Cp//EmJp244xt6Drw/GVEGCUj40g==", + "dependencies": { + "jws": "^4.0.1", + "lodash.includes": "^4.3.0", + "lodash.isboolean": "^3.0.3", + "lodash.isinteger": "^4.0.4", + "lodash.isnumber": "^3.0.3", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.once": "^4.0.0", + "ms": "^2.1.1", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsprim": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/jwa": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", + "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", + "dependencies": { + "buffer-equal-constant-time": "^1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz", + "integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==", + "dependencies": { + "jwa": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/libphonenumber-js": { + "version": "1.12.31", + "resolved": "https://registry.npmjs.org/libphonenumber-js/-/libphonenumber-js-1.12.31.tgz", + "integrity": "sha512-Z3IhgVgrqO1S5xPYM3K5XwbkDasU67/Vys4heW+lfSBALcUZjeIIzI8zCLifY+OCzSq+fpDdywMDa7z+4srJPQ==" + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/load-esm": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/load-esm/-/load-esm-1.0.3.tgz", + "integrity": "sha512-v5xlu8eHD1+6r8EHTg6hfmO97LN8ugKtiXcy5e6oN72iD2r6u0RPfLl6fxM+7Wnh2ZRq15o0russMst44WauPA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + }, + { + "type": "buymeacoffee", + "url": "https://buymeacoffee.com/borewit" + } + ], + "engines": { + "node": ">=13.2.0" + } + }, + "node_modules/loader-runner": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.1.tgz", + "integrity": "sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==", + "dev": true, + "engines": { + "node": ">=6.11.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==" + }, + "node_modules/lodash.includes": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", + "integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", + "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==" + }, + "node_modules/lodash.isinteger": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", + "integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==" + }, + "node_modules/lodash.isnumber": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", + "integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", + "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/luxon": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.7.2.tgz", + "integrity": "sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==", + "engines": { + "node": ">=12" + } + }, + "node_modules/magic-string": { + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz", + "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dev": true, + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz", + "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz", + "integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/minizlib": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz", + "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mkdirp": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", + "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", + "dependencies": { + "minimist": "^1.2.6" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/module-details-from-path": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz", + "integrity": "sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/multer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/multer/-/multer-2.0.2.tgz", + "integrity": "sha512-u7f2xaZ/UG8oLXHvtF/oWTRvT44p9ecwBBqTwgJVq0+4BW1g8OW01TyMEGWBHbyMOYVHXslaut7qEQ1meATXgw==", + "dependencies": { + "append-field": "^1.0.0", + "busboy": "^1.6.0", + "concat-stream": "^2.0.0", + "mkdirp": "^0.5.6", + "object-assign": "^4.1.1", + "type-is": "^1.6.18", + "xtend": "^4.0.2" + }, + "engines": { + "node": ">= 10.16.0" + } + }, + "node_modules/multer/node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/multer/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/multer/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/multer/node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mute-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz", + "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==", + "dev": true, + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true + }, + "node_modules/nexus-rpc": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/nexus-rpc/-/nexus-rpc-0.0.1.tgz", + "integrity": "sha512-hAWn8Hh2eewpB5McXR5EW81R3pR/ziuGhKCF3wFyUVCklanPqrIgMNr7jKCbzXeNVad0nUDfWpFRqh2u+zxQtw==", + "engines": { + "node": ">= 18.0.0" + } + }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==", + "dev": true + }, + "node_modules/node-emoji": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz", + "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==", + "dev": true, + "dependencies": { + "lodash": "^4.17.21" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-fetch-native": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/node-fetch-native/-/node-fetch-native-1.6.7.tgz", + "integrity": "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==", + "devOptional": true + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nypm": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/nypm/-/nypm-0.6.2.tgz", + "integrity": "sha512-7eM+hpOtrKrBDCh7Ypu2lJ9Z7PNZBdi/8AT3AX8xoCj43BBVHD0hPSTEvMtkMpfs8FCqBGhxB+uToIQimA111g==", + "devOptional": true, + "dependencies": { + "citty": "^0.1.6", + "consola": "^3.4.2", + "pathe": "^2.0.3", + "pkg-types": "^2.3.0", + "tinyexec": "^1.0.1" + }, + "bin": { + "nypm": "dist/cli.mjs" + }, + "engines": { + "node": "^14.16.0 || >=16.10.0" + } + }, + "node_modules/oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "engines": { + "node": "*" + } + }, + "node_modules/oauth4webapi": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/oauth4webapi/-/oauth4webapi-3.8.3.tgz", + "integrity": "sha512-pQ5BsX3QRTgnt5HxgHwgunIRaDXBdkT23tf8dfzmtTIL2LTpdmxgbpbBm0VgFWAIDlezQvQCTgnVIUmHupXHxw==", + "optional": true, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ohash": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-2.0.11.tgz", + "integrity": "sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ==", + "devOptional": true + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/openid-client": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-6.8.1.tgz", + "integrity": "sha512-VoYT6enBo6Vj2j3Q5Ec0AezS+9YGzQo1f5Xc42lreMGlfP4ljiXPKVDvCADh+XHCV/bqPu/wWSiCVXbJKvrODw==", + "optional": true, + "dependencies": { + "jose": "^6.1.0", + "oauth4webapi": "^3.8.2" + }, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" + }, + "node_modules/path-scurry": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.1.tgz", + "integrity": "sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==", + "dev": true, + "dependencies": { + "lru-cache": "^11.0.0", + "minipass": "^7.1.2" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "11.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz", + "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==", + "dev": true, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/path-to-regexp": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz", + "integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "devOptional": true + }, + "node_modules/perfect-debounce": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/perfect-debounce/-/perfect-debounce-1.0.0.tgz", + "integrity": "sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==", + "devOptional": true + }, + "node_modules/performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==" + }, + "node_modules/pg-int8": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", + "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/pg-protocol": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.10.3.tgz", + "integrity": "sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==" + }, + "node_modules/pg-types": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", + "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", + "dependencies": { + "pg-int8": "1.0.1", + "postgres-array": "~2.0.0", + "postgres-bytea": "~1.0.0", + "postgres-date": "~1.0.4", + "postgres-interval": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true + }, + "node_modules/picomatch": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", + "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-types": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-2.3.0.tgz", + "integrity": "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==", + "devOptional": true, + "dependencies": { + "confbox": "^0.2.2", + "exsolve": "^1.0.7", + "pathe": "^2.0.3" + } + }, + "node_modules/pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/postgres-array": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", + "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/postgres-bytea": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.1.tgz", + "integrity": "sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-date": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", + "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-interval": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", + "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", + "dependencies": { + "xtend": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz", + "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==", + "dev": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prisma": { + "version": "6.19.1", + "resolved": "https://registry.npmjs.org/prisma/-/prisma-6.19.1.tgz", + "integrity": "sha512-XRfmGzh6gtkc/Vq3LqZJcS2884dQQW3UhPo6jNRoiTW95FFQkXFg8vkYEy6og+Pyv0aY7zRQ7Wn1Cvr56XjhQQ==", + "devOptional": true, + "hasInstallScript": true, + "dependencies": { + "@prisma/config": "6.19.1", + "@prisma/engines": "6.19.1" + }, + "bin": { + "prisma": "build/index.js" + }, + "engines": { + "node": ">=18.18" + }, + "peerDependencies": { + "typescript": ">=5.1.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/prom-client": { + "version": "15.1.3", + "resolved": "https://registry.npmjs.org/prom-client/-/prom-client-15.1.3.tgz", + "integrity": "sha512-6ZiOBfCywsD4k1BN9IX0uZhF+tJkV8q8llP64G5Hajs4JOeVLPCwpPVcpXy3BwYiUGgyJzsJJQeOIv7+hDSq8g==", + "dependencies": { + "@opentelemetry/api": "^1.4.0", + "tdigest": "^0.1.1" + }, + "engines": { + "node": "^16 || ^18 || >=20" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proto3-json-serializer": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/proto3-json-serializer/-/proto3-json-serializer-2.0.2.tgz", + "integrity": "sha512-SAzp/O4Yh02jGdRc+uIrGoe87dkN/XtwxfZ4ZyafJHymd79ozp5VG5nyZ7ygqPM5+cpLDjjGnYFUkngonyDPOQ==", + "dependencies": { + "protobufjs": "^7.2.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/protobufjs": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", + "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/psl": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dependencies": { + "punycode": "^2.3.1" + }, + "funding": { + "url": "https://github.com/sponsors/lupomontero" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", + "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==", + "devOptional": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz", + "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/rc9": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/rc9/-/rc9-2.1.2.tgz", + "integrity": "sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==", + "devOptional": true, + "dependencies": { + "defu": "^6.1.4", + "destr": "^2.0.3" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "devOptional": true, + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==" + }, + "node_modules/request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", + "dependencies": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/request/node_modules/form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/request/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/request/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/request/node_modules/qs": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-in-the-middle": { + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-7.5.2.tgz", + "integrity": "sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==", + "dependencies": { + "debug": "^4.3.5", + "module-details-from-path": "^1.0.3", + "resolve": "^1.22.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz", + "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/restore-cursor/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/rfc4648": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.5.4.tgz", + "integrity": "sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg==" + }, + "node_modules/rimraf": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-6.1.2.tgz", + "integrity": "sha512-cFCkPslJv7BAXJsYlK1dZsbP8/ZNLkCAQ0bi1hf5EKX2QHegmDFEFA6QhuYJlk7UDdc+02JjO80YSOrWPpw06g==", + "dev": true, + "dependencies": { + "glob": "^13.0.0", + "package-json-from-dist": "^1.0.1" + }, + "bin": { + "rimraf": "dist/esm/bin.mjs" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/router": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz", + "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", + "dependencies": { + "debug": "^4.4.0", + "depd": "^2.0.0", + "is-promise": "^4.0.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^8.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/schema-utils/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/schema-utils/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "dev": true, + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/schema-utils/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-static": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz", + "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", + "dependencies": { + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "parseurl": "^1.3.3", + "send": "^1.2.0" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/shimmer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz", + "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==" + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/socket.io": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-4.8.1.tgz", + "integrity": "sha512-oZ7iUCxph8WYRHHcjBEc9unw3adt5CmSNlppj/5Q4k2RIrhl8Z5yY2Xr4j9zj0+wzVZ0bxmYoGSzKJnRl6A4yg==", + "dependencies": { + "accepts": "~1.3.4", + "base64id": "~2.0.0", + "cors": "~2.8.5", + "debug": "~4.3.2", + "engine.io": "~6.6.0", + "socket.io-adapter": "~2.5.2", + "socket.io-parser": "~4.2.4" + }, + "engines": { + "node": ">=10.2.0" + } + }, + "node_modules/socket.io-adapter": { + "version": "2.5.5", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-2.5.5.tgz", + "integrity": "sha512-eLDQas5dzPgOWCk9GuuJC2lBqItuhKI4uxGgo9aIV7MYbk2h9Q6uULEh8WBzThoI7l+qU9Ast9fVUmkqPP9wYg==", + "dependencies": { + "debug": "~4.3.4", + "ws": "~8.17.1" + } + }, + "node_modules/socket.io-adapter/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/socket.io-adapter/node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/socket.io-parser": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-4.2.4.tgz", + "integrity": "sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew==", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.3.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/socket.io-parser/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/socket.io/node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/socket.io/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/socket.io/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/socket.io/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/socket.io/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/stream-buffers": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.3.tgz", + "integrity": "sha512-pqMqwQCso0PBJt2PQmDO0cFj0lyqmiwOMiMSkVtRokl7e+ZTRYgDHKnuZNbqjiJXgsg4nuqtD/zxuo9KqTp0Yw==", + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strtok3": { + "version": "10.3.4", + "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-10.3.4.tgz", + "integrity": "sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg==", + "dependencies": { + "@tokenizer/token": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/swagger-ui-dist": { + "version": "5.30.2", + "resolved": "https://registry.npmjs.org/swagger-ui-dist/-/swagger-ui-dist-5.30.2.tgz", + "integrity": "sha512-HWCg1DTNE/Nmapt+0m2EPXFwNKNeKK4PwMjkwveN/zn1cV2Kxi9SURd+m0SpdcSgWEK/O64sf8bzXdtUhigtHA==", + "dependencies": { + "@scarf/scarf": "=1.4.0" + } + }, + "node_modules/swagger-ui-express": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/swagger-ui-express/-/swagger-ui-express-5.0.1.tgz", + "integrity": "sha512-SrNU3RiBGTLLmFU8GIJdOdanJTl4TOmT27tt3bWWHppqYmAZ6IDuEuBvMU6nZq0zLEe6b/1rACXCgLZqO6ZfrA==", + "dependencies": { + "swagger-ui-dist": ">=5.0.0" + }, + "engines": { + "node": ">= v0.10.32" + }, + "peerDependencies": { + "express": ">=4.0.0 || >=5.0.0-beta" + } + }, + "node_modules/symbol-observable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-4.0.0.tgz", + "integrity": "sha512-b19dMThMV4HVFynSAM1++gBHAbk2Tc/osgLIBZMKsyqh34jb2e8Os7T6ZW/Bt3pJFdBTd2JwAnAAEQV7rSNvcQ==", + "dev": true, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/tapable": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tar": { + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.2.tgz", + "integrity": "sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.1.0", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/tar/node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "engines": { + "node": ">=18" + } + }, + "node_modules/tdigest": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/tdigest/-/tdigest-0.1.2.tgz", + "integrity": "sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==", + "dependencies": { + "bintrees": "1.0.2" + } + }, + "node_modules/terser": { + "version": "5.44.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.44.1.tgz", + "integrity": "sha512-t/R3R/n0MSwnnazuPpPNVO60LX0SKL45pyl9YlvxIdkH0Of7D5qM2EVe+yASRIlY5pZ73nclYJfNANGWPwFDZw==", + "dev": true, + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.15.0", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.16", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.16.tgz", + "integrity": "sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "jest-worker": "^27.4.5", + "schema-utils": "^4.3.0", + "serialize-javascript": "^6.0.2", + "terser": "^5.31.1" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dev": true, + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/schema-utils": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tinyexec": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz", + "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + "devOptional": true, + "engines": { + "node": ">=18" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/token-types": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/token-types/-/token-types-6.1.1.tgz", + "integrity": "sha512-kh9LVIWH5CnL63Ipf0jhlBIy0UsrMj/NJDfpsy1SqOXlLKEVyXXYrnFxFT1yOOYVGBSApeVnjPw/sBz5BfEjAQ==", + "dependencies": { + "@borewit/text-codec": "^0.1.0", + "@tokenizer/token": "^0.3.0", + "ieee754": "^1.2.1" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "dependencies": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/ts-jest": { + "version": "29.4.6", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.6.tgz", + "integrity": "sha512-fSpWtOO/1AjSNQguk43hb/JCo16oJDnMJf3CdEGNkqsEX3t0KX96xvyX1D7PfLCpVoKu4MfVrqUkFyblYoY4lA==", + "dev": true, + "dependencies": { + "bs-logger": "^0.2.6", + "fast-json-stable-stringify": "^2.1.0", + "handlebars": "^4.7.8", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.7.3", + "type-fest": "^4.41.0", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0 || ^30.0.0", + "@jest/types": "^29.0.0 || ^30.0.0", + "babel-jest": "^29.0.0 || ^30.0.0", + "jest": "^29.0.0 || ^30.0.0", + "jest-util": "^29.0.0 || ^30.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jest-util": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/tsconfig-paths": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-4.2.0.tgz", + "integrity": "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==", + "dev": true, + "dependencies": { + "json5": "^2.2.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tsconfig-paths-webpack-plugin": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths-webpack-plugin/-/tsconfig-paths-webpack-plugin-4.2.0.tgz", + "integrity": "sha512-zbem3rfRS8BgeNK50Zz5SIQgXzLafiHjOwUAvk/38/o1jHn/V5QAgVUcz884or7WYcPaH3N2CIfUc2u0ul7UcA==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "enhanced-resolve": "^5.7.0", + "tapable": "^2.2.1", + "tsconfig-paths": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/tsconfig-paths/node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==" + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", + "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", + "dependencies": { + "content-type": "^1.0.5", + "media-typer": "^1.1.0", + "mime-types": "^3.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==" + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "devOptional": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/uid": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/uid/-/uid-2.0.2.tgz", + "integrity": "sha512-u3xV3X7uzvi5b1MncmZo3i2Aw222Zk1keqLA1YkHldREkAhAqi65wuPfe7lHx8H/Wzy+8CE7S7uS3jekIM5s8g==", + "dependencies": { + "@lukeed/csprng": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uint8array-extras": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/uint8array-extras/-/uint8array-extras-1.5.0.tgz", + "integrity": "sha512-rvKSBiC5zqCCiDZ9kAOszZcDvdAHwwIKJG33Ykj43OKcWsnmcBRL09YTU4nOeHZ8Y2a7l1MgTd08SBe9A8Qj6A==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==" + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.2.tgz", + "integrity": "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/validator": { + "version": "13.15.23", + "resolved": "https://registry.npmjs.org/validator/-/validator-13.15.23.tgz", + "integrity": "sha512-4yoz1kEWqUjzi5zsPbAS/903QXSYp0UOtHsPpp7p9rHAw/W+dkInskAE386Fat3oKRROwO98d9ZB0G4cObgUyw==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/verror/node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/watchpack": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.4.tgz", + "integrity": "sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==", + "dev": true, + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/webpack": { + "version": "5.103.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.103.0.tgz", + "integrity": "sha512-HU1JOuV1OavsZ+mfigY0j8d1TgQgbZ6M+J75zDkpEAwYeXjWSqrGJtgnPblJjd/mAyTNQ7ygw0MiKOn6etz8yw==", + "dev": true, + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.8", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.15.0", + "acorn-import-phases": "^1.0.3", + "browserslist": "^4.26.3", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.3", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.3.1", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^4.3.3", + "tapable": "^2.3.0", + "terser-webpack-plugin": "^5.3.11", + "watchpack": "^2.4.4", + "webpack-sources": "^3.3.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-node-externals": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/webpack-node-externals/-/webpack-node-externals-3.0.0.tgz", + "integrity": "sha512-LnL6Z3GGDPht/AigwRh2dvL9PQPFQ8skEpVrWZXLWBYmqcaojHNN0onvHzie6rq7EWKrrBfPYqNEzTJgiwEQDQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/webpack-sources": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.3.3.tgz", + "integrity": "sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg==", + "dev": true, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/webpack/node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/webpack/node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/schema-utils": { + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", + "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", + "dependencies": { + "string-width": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/write-file-atomic/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors-cjs": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.3.tgz", + "integrity": "sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/package.json b/packages/bytebot-workflow-orchestrator/package.json new file mode 100644 index 000000000..f7dccbdf2 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/package.json @@ -0,0 +1,102 @@ +{ + "name": "bytebot-workflow-orchestrator", + "version": "5.17.0", + "description": "ByteBot Workflow Orchestrator - Manages multi-step workflow execution with persistent workspaces", + "author": "ByteBot Team", + "private": true, + "license": "UNLICENSED", + "scripts": { + "prebuild": "rimraf dist", + "build": "nest build", + "format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"", + "start": "nest start", + "start:dev": "nest start --watch", + "start:debug": "nest start --debug --watch", + "start:prod": "node dist/main", + "lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix", + "test": "jest", + "test:watch": "jest --watch", + "test:cov": "jest --coverage", + "test:e2e": "jest --config ./test/jest-e2e.json", + "prisma:generate": "prisma generate", + "prisma:migrate": "prisma migrate deploy", + "prisma:migrate:dev": "prisma migrate dev", + "maintenance:unstick-strategy-prompts": "ts-node scripts/unstick-strategy-prompts.ts --dry-run", + "maintenance:unstick-desktop-not-allowed-prompts": "ts-node scripts/unstick-desktop-not-allowed-prompts.ts --dry-run" + }, + "dependencies": { + "@kubernetes/client-node": "^0.22.3", + "@nestjs/common": "^11.0.1", + "@temporalio/client": "^1.11.7", + "@opentelemetry/auto-instrumentations-node": "^0.54.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.57.0", + "@opentelemetry/resources": "^1.30.0", + "@opentelemetry/sdk-node": "^0.57.0", + "@opentelemetry/sdk-trace-base": "^1.30.0", + "@opentelemetry/semantic-conventions": "^1.28.0", + "@nestjs/config": "^4.0.2", + "@nestjs/core": "^11.0.1", + "@nestjs/event-emitter": "^3.0.0", + "@nestjs/jwt": "^11.0.2", + "@nestjs/platform-express": "^11.1.5", + "@nestjs/platform-socket.io": "^11.0.0", + "@nestjs/schedule": "^6.0.0", + "@nestjs/swagger": "^11.0.0", + "@nestjs/terminus": "^11.0.0", + "@nestjs/throttler": "^6.3.0", + "@nestjs/websockets": "^11.0.0", + "@paralleldrive/cuid2": "^2.2.2", + "@prisma/client": "^6.16.1", + "@willsoto/nestjs-prometheus": "^6.0.1", + "ajv": "8.17.1", + "ajv-formats": "3.0.1", + "axios": "^1.7.9", + "class-transformer": "^0.5.1", + "class-validator": "^0.14.2", + "cockatiel": "^3.2.1", + "minimatch": "^10.0.1", + "prom-client": "^15.1.3", + "reflect-metadata": "^0.2.2", + "rxjs": "^7.8.1", + "socket.io": "^4.7.4", + "swagger-ui-express": "^5.0.1", + "zod": "^3.24.1" + }, + "devDependencies": { + "@nestjs/cli": "^11.0.0", + "@nestjs/schematics": "^11.0.0", + "@nestjs/testing": "^11.0.1", + "@types/express": "^5.0.0", + "@types/jest": "^29.5.14", + "@types/node": "^22.10.7", + "eslint": "^9.18.0", + "jest": "^29.7.0", + "prettier": "^3.4.2", + "prisma": "^6.16.1", + "rimraf": "^6.0.1", + "source-map-support": "^0.5.21", + "ts-jest": "^29.2.5", + "ts-node": "^10.9.2", + "typescript": "^5.7.3" + }, + "jest": { + "moduleFileExtensions": [ + "js", + "json", + "ts" + ], + "rootDir": "src", + "testRegex": ".*\\.spec\\.ts$", + "transform": { + "^.+\\.(t|j)s$": "ts-jest" + }, + "collectCoverageFrom": [ + "**/*.(t|j)s" + ], + "coverageDirectory": "../coverage", + "testEnvironment": "node" + }, + "engines": { + "node": ">=20" + } +} diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20251215_phase6_performance_indexes.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20251215_phase6_performance_indexes.sql new file mode 100644 index 000000000..0e82c2006 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20251215_phase6_performance_indexes.sql @@ -0,0 +1,81 @@ +-- Phase 6: Production Hardening - Performance Indexes +-- Created: 2025-12-15 +-- Purpose: Add optimized indexes for high-volume query patterns + +-- ============================================================================= +-- Activity Events Table - Additional Performance Indexes +-- ============================================================================= + +-- Composite index for filtering by event type within time ranges +-- Useful for: "Show all STEP_COMPLETED events in the last hour" +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_activity_events_type_created +ON workflow_orchestrator.activity_events (event_type, created_at DESC); + +-- Composite index for severity-based filtering within a goal run +-- Useful for: "Show all errors/warnings for this goal run" +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_activity_events_goalrun_severity +ON workflow_orchestrator.activity_events (goal_run_id, severity, created_at DESC); + +-- ============================================================================= +-- Goal Runs Table - Additional Performance Indexes +-- ============================================================================= + +-- Composite index for tenant-specific status queries +-- Useful for: "Show all RUNNING goals for tenant X" (dashboard) +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_goal_runs_tenant_status_created +ON workflow_orchestrator.goal_runs (tenant_id, status, created_at DESC); + +-- Composite index for phase monitoring +-- Useful for: "Find all goals stuck in EXECUTING phase" +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_goal_runs_phase_updated +ON workflow_orchestrator.goal_runs (phase, updated_at); + +-- ============================================================================= +-- Checklist Items Table - Additional Performance Indexes +-- ============================================================================= + +-- Composite index for status tracking within a plan +-- Useful for: "Show pending items for this plan version" +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_checklist_items_plan_status +ON workflow_orchestrator.checklist_items (plan_version_id, status, "order"); + +-- ============================================================================= +-- Workflow Execution Metrics - Time Series Optimization +-- ============================================================================= + +-- BRIN index for timestamp-based range scans on large metrics tables +-- BRIN indexes are much smaller than B-tree for time-series data +-- Only beneficial for tables > 100K rows with sequential inserts +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_wf_exec_metrics_timestamp_brin +ON workflow_orchestrator.workflow_execution_metrics +USING BRIN (timestamp) WITH (pages_per_range = 128); + +-- ============================================================================= +-- Workflow Step Metrics - Time Series Optimization +-- ============================================================================= + +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_wf_step_metrics_timestamp_brin +ON workflow_orchestrator.workflow_step_metrics +USING BRIN (timestamp) WITH (pages_per_range = 128); + +-- ============================================================================= +-- Dead Letter Entries - Recovery Queue Optimization +-- ============================================================================= + +-- Composite index for finding retryable entries +-- Useful for: "Find all entries ready for retry" +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_dead_letter_retry_queue +ON workflow_orchestrator.dead_letter_entries (status, next_retry_at) +WHERE status IN ('PENDING', 'RETRYING') AND next_retry_at IS NOT NULL; + +-- ============================================================================= +-- Index Statistics Update +-- ============================================================================= + +-- Analyze tables to update statistics for query planner +ANALYZE workflow_orchestrator.activity_events; +ANALYZE workflow_orchestrator.goal_runs; +ANALYZE workflow_orchestrator.checklist_items; +ANALYZE workflow_orchestrator.workflow_execution_metrics; +ANALYZE workflow_orchestrator.workflow_step_metrics; +ANALYZE workflow_orchestrator.dead_letter_entries; diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20251216_phase10_enterprise_tables.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20251216_phase10_enterprise_tables.sql new file mode 100644 index 000000000..5ad4887a5 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20251216_phase10_enterprise_tables.sql @@ -0,0 +1,366 @@ +-- Phase 10: Enterprise Features Tables +-- Creates tables for multi-tenant administration, SSO, compliance, and LLM providers +-- Run: psql -h -U -d -f 20251216_phase10_enterprise_tables.sql + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS workflow_orchestrator; + +-- ============================================================================ +-- Tenant Table +-- Core tenant/organization management +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.tenants ( + id VARCHAR(255) PRIMARY KEY, + name VARCHAR(255) NOT NULL, + slug VARCHAR(255) NOT NULL UNIQUE, + + -- Contact information + admin_email VARCHAR(255) NOT NULL, + admin_name VARCHAR(255), + company_name VARCHAR(255), + + -- Subscription/billing + plan VARCHAR(50) DEFAULT 'free' NOT NULL, + billing_email VARCHAR(255), + stripe_customer_id VARCHAR(255), + + -- Status + status VARCHAR(50) DEFAULT 'active' NOT NULL, + trial_ends TIMESTAMP WITH TIME ZONE, + + -- Metadata + metadata JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- Indexes for tenants +CREATE INDEX IF NOT EXISTS idx_tenants_slug ON workflow_orchestrator.tenants(slug); +CREATE INDEX IF NOT EXISTS idx_tenants_status ON workflow_orchestrator.tenants(status); +CREATE INDEX IF NOT EXISTS idx_tenants_plan ON workflow_orchestrator.tenants(plan); +CREATE INDEX IF NOT EXISTS idx_tenants_created_at ON workflow_orchestrator.tenants(created_at); + +-- ============================================================================ +-- Tenant Settings Table +-- Configurable settings per tenant +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.tenant_settings ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL UNIQUE REFERENCES workflow_orchestrator.tenants(id) ON DELETE CASCADE, + + -- General settings + timezone VARCHAR(50) DEFAULT 'UTC' NOT NULL, + date_format VARCHAR(50) DEFAULT 'YYYY-MM-DD' NOT NULL, + default_workspace_mode VARCHAR(50) DEFAULT 'SHARED' NOT NULL, + + -- Security settings + require_mfa BOOLEAN DEFAULT FALSE NOT NULL, + session_timeout INTEGER DEFAULT 3600 NOT NULL, + ip_allowlist TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + allowed_domains TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + + -- Workflow settings + max_concurrent_goals INTEGER DEFAULT 5 NOT NULL, + default_approval_timeout INTEGER DEFAULT 3600 NOT NULL, + auto_replan_enabled BOOLEAN DEFAULT TRUE NOT NULL, + max_replan_attempts INTEGER DEFAULT 3 NOT NULL, + + -- Notification settings + notification_email VARCHAR(255), + slack_webhook_url TEXT, + teams_webhook_url TEXT, + webhook_secret_key VARCHAR(255), + + -- Data retention + audit_log_retention_days INTEGER DEFAULT 365 NOT NULL, + goal_run_retention_days INTEGER DEFAULT 90 NOT NULL, + + -- Feature flags + features JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- ============================================================================ +-- Tenant Quota Table +-- Usage quotas and limits per tenant +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.tenant_quotas ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL UNIQUE REFERENCES workflow_orchestrator.tenants(id) ON DELETE CASCADE, + + -- Goal run limits + monthly_goal_runs INTEGER DEFAULT 1000 NOT NULL, + monthly_goal_runs_used INTEGER DEFAULT 0 NOT NULL, + + -- LLM token limits + monthly_tokens INTEGER DEFAULT 1000000 NOT NULL, + monthly_tokens_used INTEGER DEFAULT 0 NOT NULL, + + -- Storage limits (bytes) + storage_limit BIGINT DEFAULT 10737418240 NOT NULL, -- 10GB + storage_used BIGINT DEFAULT 0 NOT NULL, + + -- Concurrent limits + max_concurrent_workspaces INTEGER DEFAULT 10 NOT NULL, + max_users_per_tenant INTEGER DEFAULT 50 NOT NULL, + max_templates INTEGER DEFAULT 100 NOT NULL, + max_batch_size INTEGER DEFAULT 50 NOT NULL, + + -- API limits + api_rate_limit_per_minute INTEGER DEFAULT 100 NOT NULL, + + -- Reset tracking + quota_period_start TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- ============================================================================ +-- SSO Configuration Table +-- SAML/SSO settings per tenant +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.sso_configurations ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL UNIQUE REFERENCES workflow_orchestrator.tenants(id) ON DELETE CASCADE, + + -- SSO type + provider VARCHAR(50) DEFAULT 'saml' NOT NULL, + + -- SAML configuration + entity_id VARCHAR(512), + sso_url TEXT, + slo_url TEXT, + certificate TEXT, + signature_algorithm VARCHAR(50) DEFAULT 'sha256' NOT NULL, + + -- Attribute mapping + attribute_mapping JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Just-in-time provisioning + jit_provisioning BOOLEAN DEFAULT TRUE NOT NULL, + default_role VARCHAR(50) DEFAULT 'member' NOT NULL, + auto_update_attributes BOOLEAN DEFAULT TRUE NOT NULL, + + -- Domain validation + enforced_domains TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + allow_bypass_sso BOOLEAN DEFAULT FALSE NOT NULL, + + -- Status + enabled BOOLEAN DEFAULT FALSE NOT NULL, + verified BOOLEAN DEFAULT FALSE NOT NULL, + + -- Metadata URLs + idp_metadata_url TEXT, + sp_metadata_url TEXT, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- ============================================================================ +-- LLM Provider Configuration Table +-- Custom LLM provider settings per tenant +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.llm_provider_configs ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.tenants(id) ON DELETE CASCADE, + + -- Provider info + provider VARCHAR(100) NOT NULL, + name VARCHAR(255) NOT NULL, + is_default BOOLEAN DEFAULT FALSE NOT NULL, + + -- Configuration + api_key TEXT, + api_endpoint TEXT, + model VARCHAR(255), + region VARCHAR(100), + + -- Provider-specific settings + config JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Usage tracking + total_tokens_used BIGINT DEFAULT 0 NOT NULL, + total_requests_count INTEGER DEFAULT 0 NOT NULL, + last_used_at TIMESTAMP WITH TIME ZONE, + + -- Fallback configuration + priority INTEGER DEFAULT 0 NOT NULL, + is_enabled BOOLEAN DEFAULT TRUE NOT NULL, + is_fallback BOOLEAN DEFAULT FALSE NOT NULL, + + -- Rate limiting + max_requests_per_minute INTEGER, + max_tokens_per_request INTEGER, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + + -- Unique constraint + UNIQUE(tenant_id, provider, name) +); + +-- Indexes for llm_provider_configs +CREATE INDEX IF NOT EXISTS idx_llm_provider_configs_tenant_id ON workflow_orchestrator.llm_provider_configs(tenant_id); +CREATE INDEX IF NOT EXISTS idx_llm_provider_configs_tenant_default ON workflow_orchestrator.llm_provider_configs(tenant_id, is_default); +CREATE INDEX IF NOT EXISTS idx_llm_provider_configs_provider ON workflow_orchestrator.llm_provider_configs(provider); + +-- ============================================================================ +-- Compliance Report Table +-- Generated compliance reports (SOC2, GDPR) +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.compliance_reports ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.tenants(id) ON DELETE CASCADE, + + -- Report info + report_type VARCHAR(100) NOT NULL, + report_name VARCHAR(255) NOT NULL, + report_period VARCHAR(50) NOT NULL, + + -- Date range + start_date TIMESTAMP WITH TIME ZONE NOT NULL, + end_date TIMESTAMP WITH TIME ZONE NOT NULL, + + -- Report content + summary TEXT, + findings JSONB DEFAULT '[]'::jsonb NOT NULL, + metrics JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Status + status VARCHAR(50) DEFAULT 'generating' NOT NULL, + generated_at TIMESTAMP WITH TIME ZONE, + expires_at TIMESTAMP WITH TIME ZONE, + + -- Export info + export_format VARCHAR(50), + export_url TEXT, + + -- Audit + generated_by VARCHAR(255), + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- Indexes for compliance_reports +CREATE INDEX IF NOT EXISTS idx_compliance_reports_tenant_id ON workflow_orchestrator.compliance_reports(tenant_id); +CREATE INDEX IF NOT EXISTS idx_compliance_reports_tenant_type ON workflow_orchestrator.compliance_reports(tenant_id, report_type); +CREATE INDEX IF NOT EXISTS idx_compliance_reports_type ON workflow_orchestrator.compliance_reports(report_type); +CREATE INDEX IF NOT EXISTS idx_compliance_reports_created_at ON workflow_orchestrator.compliance_reports(created_at); + +-- ============================================================================ +-- Data Processing Record Table +-- GDPR Article 30 - Records of processing activities +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.data_processing_records ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.tenants(id) ON DELETE CASCADE, + + -- Processing activity info + activity_name VARCHAR(255) NOT NULL, + activity_description TEXT, + + -- Data subjects + data_subject_categories TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + + -- Personal data categories + personal_data_categories TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + + -- Legal basis + legal_basis VARCHAR(100) NOT NULL, + legal_basis_details TEXT, + + -- Purpose + processing_purposes TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + + -- Recipients + recipient_categories TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + third_country_transfers TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + transfer_safeguards TEXT, + + -- Retention + retention_period VARCHAR(255), + retention_criteria TEXT, + + -- Security measures + technical_measures TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + organizational_measures TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + + -- Status + status VARCHAR(50) DEFAULT 'active' NOT NULL, + review_date TIMESTAMP WITH TIME ZONE, + reviewed_by VARCHAR(255), + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- Indexes for data_processing_records +CREATE INDEX IF NOT EXISTS idx_data_processing_records_tenant_id ON workflow_orchestrator.data_processing_records(tenant_id); +CREATE INDEX IF NOT EXISTS idx_data_processing_records_status ON workflow_orchestrator.data_processing_records(status); +CREATE INDEX IF NOT EXISTS idx_data_processing_records_legal_basis ON workflow_orchestrator.data_processing_records(legal_basis); + +-- ============================================================================ +-- Trigger for updated_at timestamps +-- ============================================================================ + +CREATE OR REPLACE FUNCTION workflow_orchestrator.update_updated_at_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Apply trigger to all tables +DO $$ +DECLARE + t text; +BEGIN + FOR t IN SELECT unnest(ARRAY[ + 'tenants', + 'tenant_settings', + 'tenant_quotas', + 'sso_configurations', + 'llm_provider_configs', + 'compliance_reports', + 'data_processing_records' + ]) + LOOP + EXECUTE format(' + DROP TRIGGER IF EXISTS update_updated_at ON workflow_orchestrator.%I; + CREATE TRIGGER update_updated_at + BEFORE UPDATE ON workflow_orchestrator.%I + FOR EACH ROW + EXECUTE FUNCTION workflow_orchestrator.update_updated_at_column(); + ', t, t); + END LOOP; +END; +$$; + +-- ============================================================================ +-- Grant permissions (adjust user as needed) +-- ============================================================================ + +-- Example: GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA workflow_orchestrator TO bytebot_app; + +SELECT 'Phase 10 Enterprise tables created successfully' AS status; diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20251217_phase7_enhanced_features_tables.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20251217_phase7_enhanced_features_tables.sql new file mode 100644 index 000000000..b3f185bbe --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20251217_phase7_enhanced_features_tables.sql @@ -0,0 +1,387 @@ +-- Phase 7: Enhanced Features Tables +-- Creates tables for Goal Templates, Batch Execution, and Analytics +-- Run: psql -h -U -d -f 20251217_phase7_enhanced_features_tables.sql + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS workflow_orchestrator; + +-- ============================================================================ +-- Goal Template Table +-- Reusable templates for common goal patterns +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.goal_templates ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL, + + -- Template identification + name VARCHAR(255) NOT NULL, + description TEXT, + category VARCHAR(100), + tags TEXT[] DEFAULT ARRAY[]::TEXT[] NOT NULL, + icon VARCHAR(100), + + -- Template content + goal_pattern TEXT NOT NULL, + default_constraints JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Variable definitions (array of { name, type, required, default, description }) + variables JSONB DEFAULT '[]'::jsonb NOT NULL, + + -- Checklist template (pre-defined plan steps) + checklist_template JSONB DEFAULT '[]'::jsonb NOT NULL, + + -- Version control + version VARCHAR(50) DEFAULT '1.0.0' NOT NULL, + is_latest BOOLEAN DEFAULT TRUE NOT NULL, + previous_version_id VARCHAR(255), + + -- Publishing status + is_published BOOLEAN DEFAULT FALSE NOT NULL, + is_built_in BOOLEAN DEFAULT FALSE NOT NULL, + + -- Usage tracking + usage_count INTEGER DEFAULT 0 NOT NULL, + last_used_at TIMESTAMP WITH TIME ZONE, + + -- Audit + created_by VARCHAR(255), + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + + -- Constraints + CONSTRAINT uq_goal_templates_tenant_name_version UNIQUE (tenant_id, name, version) +); + +-- Indexes for goal_templates +DO $$ +DECLARE + stmt TEXT; +BEGIN + FOREACH stmt IN ARRAY ARRAY[ + 'CREATE INDEX IF NOT EXISTS idx_goal_templates_tenant_id ON workflow_orchestrator.goal_templates(tenant_id)', + 'CREATE INDEX IF NOT EXISTS idx_goal_templates_tenant_published ON workflow_orchestrator.goal_templates(tenant_id, is_published)', + 'CREATE INDEX IF NOT EXISTS idx_goal_templates_category ON workflow_orchestrator.goal_templates(category)', + 'CREATE INDEX IF NOT EXISTS idx_goal_templates_is_built_in ON workflow_orchestrator.goal_templates(is_built_in)', + 'CREATE INDEX IF NOT EXISTS idx_goal_templates_usage_count ON workflow_orchestrator.goal_templates(usage_count DESC)', + 'CREATE INDEX IF NOT EXISTS idx_goal_templates_created_at ON workflow_orchestrator.goal_templates(created_at)' + ] + LOOP + BEGIN + EXECUTE stmt; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping index create (insufficient_privilege): %', stmt; + END; + END LOOP; +END $$; + +-- ============================================================================ +-- Goal Run From Template Junction Table +-- Tracks which goal runs were created from templates +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.goal_runs_from_template ( + id VARCHAR(255) PRIMARY KEY, + goal_run_id VARCHAR(255) NOT NULL UNIQUE, + template_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.goal_templates(id) ON DELETE CASCADE, + + -- Variables used when instantiating + variable_values JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL +); + +-- Indexes for goal_runs_from_template +DO $$ +DECLARE + stmt TEXT; +BEGIN + FOREACH stmt IN ARRAY ARRAY[ + 'CREATE INDEX IF NOT EXISTS idx_goal_runs_from_template_template_id ON workflow_orchestrator.goal_runs_from_template(template_id)', + 'CREATE INDEX IF NOT EXISTS idx_goal_runs_from_template_goal_run_id ON workflow_orchestrator.goal_runs_from_template(goal_run_id)', + 'CREATE INDEX IF NOT EXISTS idx_goal_runs_from_template_created_at ON workflow_orchestrator.goal_runs_from_template(created_at)' + ] + LOOP + BEGIN + EXECUTE stmt; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping index create (insufficient_privilege): %', stmt; + END; + END LOOP; +END $$; + +-- ============================================================================ +-- Goal Run Batch Table +-- Groups multiple goal runs for batch execution +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.goal_run_batches ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL, + + -- Batch identification + name VARCHAR(255) NOT NULL, + description TEXT, + + -- Batch configuration + execution_mode VARCHAR(50) DEFAULT 'PARALLEL' NOT NULL, + max_concurrency INTEGER DEFAULT 5 NOT NULL, + stop_on_failure BOOLEAN DEFAULT FALSE NOT NULL, + + -- Batch status (PENDING, RUNNING, COMPLETED, PARTIALLY_COMPLETED, FAILED, CANCELLED) + status VARCHAR(50) DEFAULT 'PENDING' NOT NULL, + + -- Progress tracking + total_goals INTEGER DEFAULT 0 NOT NULL, + completed_goals INTEGER DEFAULT 0 NOT NULL, + failed_goals INTEGER DEFAULT 0 NOT NULL, + cancelled_goals INTEGER DEFAULT 0 NOT NULL, + + -- Error tracking + error TEXT, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + started_at TIMESTAMP WITH TIME ZONE, + completed_at TIMESTAMP WITH TIME ZONE +); + +-- Indexes for goal_run_batches +DO $$ +DECLARE + stmt TEXT; +BEGIN + FOREACH stmt IN ARRAY ARRAY[ + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batches_tenant_id ON workflow_orchestrator.goal_run_batches(tenant_id)', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batches_tenant_status ON workflow_orchestrator.goal_run_batches(tenant_id, status)', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batches_status ON workflow_orchestrator.goal_run_batches(status)', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batches_created_at ON workflow_orchestrator.goal_run_batches(created_at)' + ] + LOOP + BEGIN + EXECUTE stmt; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping index create (insufficient_privilege): %', stmt; + END; + END LOOP; +END $$; + +-- ============================================================================ +-- Goal Run Batch Item Table +-- Individual goal run within a batch +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.goal_run_batch_items ( + id VARCHAR(255) PRIMARY KEY, + batch_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.goal_run_batches(id) ON DELETE CASCADE, + + -- Goal definition + goal TEXT NOT NULL, + constraints JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Template reference (optional) + template_id VARCHAR(255), + variable_values JSONB DEFAULT '{}'::jsonb NOT NULL, + + -- Execution order (for SEQUENTIAL mode) + "order" INTEGER DEFAULT 0 NOT NULL, + + -- Status tracking (PENDING, QUEUED, RUNNING, COMPLETED, FAILED, CANCELLED, SKIPPED) + status VARCHAR(50) DEFAULT 'PENDING' NOT NULL, + goal_run_id VARCHAR(255), + + -- Error tracking + error TEXT, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + started_at TIMESTAMP WITH TIME ZONE, + completed_at TIMESTAMP WITH TIME ZONE +); + +-- Indexes for goal_run_batch_items +DO $$ +DECLARE + stmt TEXT; +BEGIN + FOREACH stmt IN ARRAY ARRAY[ + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batch_items_batch_id ON workflow_orchestrator.goal_run_batch_items(batch_id)', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batch_items_batch_order ON workflow_orchestrator.goal_run_batch_items(batch_id, "order")', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batch_items_batch_status ON workflow_orchestrator.goal_run_batch_items(batch_id, status)', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batch_items_goal_run_id ON workflow_orchestrator.goal_run_batch_items(goal_run_id)', + 'CREATE INDEX IF NOT EXISTS idx_goal_run_batch_items_status ON workflow_orchestrator.goal_run_batch_items(status)' + ] + LOOP + BEGIN + EXECUTE stmt; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping index create (insufficient_privilege): %', stmt; + END; + END LOOP; +END $$; + +-- ============================================================================ +-- Goal Run Analytics Snapshot Table +-- Pre-aggregated goal run metrics for analytics dashboard +-- ============================================================================ + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.goal_run_analytics_snapshots ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL, + + -- Time bucket + period VARCHAR(10) NOT NULL, -- 1h, 1d, 7d, 30d + bucket_start TIMESTAMP WITH TIME ZONE NOT NULL, + bucket_end TIMESTAMP WITH TIME ZONE NOT NULL, + + -- Goal Run Metrics + total_goal_runs INTEGER DEFAULT 0 NOT NULL, + completed_goal_runs INTEGER DEFAULT 0 NOT NULL, + failed_goal_runs INTEGER DEFAULT 0 NOT NULL, + cancelled_goal_runs INTEGER DEFAULT 0 NOT NULL, + + -- Duration metrics (in milliseconds) + avg_duration_ms DOUBLE PRECISION DEFAULT 0 NOT NULL, + min_duration_ms DOUBLE PRECISION DEFAULT 0 NOT NULL, + max_duration_ms DOUBLE PRECISION DEFAULT 0 NOT NULL, + p50_duration_ms DOUBLE PRECISION DEFAULT 0 NOT NULL, + p95_duration_ms DOUBLE PRECISION DEFAULT 0 NOT NULL, + p99_duration_ms DOUBLE PRECISION DEFAULT 0 NOT NULL, + + -- Step metrics + avg_steps_per_goal DOUBLE PRECISION DEFAULT 0 NOT NULL, + avg_replan_count DOUBLE PRECISION DEFAULT 0 NOT NULL, + total_steps_executed INTEGER DEFAULT 0 NOT NULL, + + -- Template usage + template_usage_count INTEGER DEFAULT 0 NOT NULL, + top_template_id VARCHAR(255), + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() NOT NULL, + + -- Constraints + CONSTRAINT uq_analytics_snapshots_tenant_period_bucket UNIQUE (tenant_id, period, bucket_start) +); + +-- Indexes for goal_run_analytics_snapshots +DO $$ +DECLARE + stmt TEXT; +BEGIN + FOREACH stmt IN ARRAY ARRAY[ + 'CREATE INDEX IF NOT EXISTS idx_analytics_snapshots_tenant_id ON workflow_orchestrator.goal_run_analytics_snapshots(tenant_id)', + 'CREATE INDEX IF NOT EXISTS idx_analytics_snapshots_tenant_period ON workflow_orchestrator.goal_run_analytics_snapshots(tenant_id, period)', + 'CREATE INDEX IF NOT EXISTS idx_analytics_snapshots_bucket_start ON workflow_orchestrator.goal_run_analytics_snapshots(bucket_start)', + 'CREATE INDEX IF NOT EXISTS idx_analytics_snapshots_created_at ON workflow_orchestrator.goal_run_analytics_snapshots(created_at)' + ] + LOOP + BEGIN + EXECUTE stmt; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping index create (insufficient_privilege): %', stmt; + END; + END LOOP; +END $$; + +-- ============================================================================ +-- Trigger for updated_at timestamps +-- ============================================================================ + +-- Create or replace the timestamp update function +CREATE OR REPLACE FUNCTION workflow_orchestrator.update_updated_at_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ language 'plpgsql'; + +-- Apply triggers to tables with updated_at column +DO $$ +BEGIN + BEGIN + EXECUTE 'DROP TRIGGER IF EXISTS update_goal_templates_updated_at ON workflow_orchestrator.goal_templates'; + EXECUTE 'CREATE TRIGGER update_goal_templates_updated_at BEFORE UPDATE ON workflow_orchestrator.goal_templates FOR EACH ROW EXECUTE FUNCTION workflow_orchestrator.update_updated_at_column()'; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping trigger update_goal_templates_updated_at (insufficient_privilege)'; + END; + + BEGIN + EXECUTE 'DROP TRIGGER IF EXISTS update_goal_run_batches_updated_at ON workflow_orchestrator.goal_run_batches'; + EXECUTE 'CREATE TRIGGER update_goal_run_batches_updated_at BEFORE UPDATE ON workflow_orchestrator.goal_run_batches FOR EACH ROW EXECUTE FUNCTION workflow_orchestrator.update_updated_at_column()'; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping trigger update_goal_run_batches_updated_at (insufficient_privilege)'; + END; + + BEGIN + EXECUTE 'DROP TRIGGER IF EXISTS update_goal_run_batch_items_updated_at ON workflow_orchestrator.goal_run_batch_items'; + EXECUTE 'CREATE TRIGGER update_goal_run_batch_items_updated_at BEFORE UPDATE ON workflow_orchestrator.goal_run_batch_items FOR EACH ROW EXECUTE FUNCTION workflow_orchestrator.update_updated_at_column()'; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping trigger update_goal_run_batch_items_updated_at (insufficient_privilege)'; + END; +END $$; + +-- ============================================================================ +-- Permissions +-- Grant all privileges to bytebot user for application access +-- ============================================================================ + +DO $$ +DECLARE + stmt TEXT; +BEGIN + FOREACH stmt IN ARRAY ARRAY[ + 'GRANT ALL PRIVILEGES ON workflow_orchestrator.goal_templates TO bytebot', + 'GRANT ALL PRIVILEGES ON workflow_orchestrator.goal_runs_from_template TO bytebot', + 'GRANT ALL PRIVILEGES ON workflow_orchestrator.goal_run_batches TO bytebot', + 'GRANT ALL PRIVILEGES ON workflow_orchestrator.goal_run_batch_items TO bytebot', + 'GRANT ALL PRIVILEGES ON workflow_orchestrator.goal_run_analytics_snapshots TO bytebot', + 'GRANT USAGE ON SCHEMA workflow_orchestrator TO bytebot' + ] + LOOP + BEGIN + EXECUTE stmt; + EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Skipping grant (insufficient_privilege): %', stmt; + END; + END LOOP; +END $$; + +-- ============================================================================ +-- Verification +-- ============================================================================ + +DO $$ +DECLARE + table_count INTEGER; +BEGIN + SELECT COUNT(*) INTO table_count + FROM information_schema.tables + WHERE table_schema = 'workflow_orchestrator' + AND table_name IN ( + 'goal_templates', + 'goal_runs_from_template', + 'goal_run_batches', + 'goal_run_batch_items', + 'goal_run_analytics_snapshots' + ); + + IF table_count = 5 THEN + RAISE NOTICE 'SUCCESS: All 5 Phase 7 Enhanced Features tables created successfully'; + ELSE + RAISE WARNING 'WARNING: Expected 5 tables, found %', table_count; + END IF; +END $$; diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260108_stark_fix_outbox_publisher_backoff.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260108_stark_fix_outbox_publisher_backoff.sql new file mode 100644 index 000000000..76f8deca3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260108_stark_fix_outbox_publisher_backoff.sql @@ -0,0 +1,9 @@ +-- Stark Fix (PR 3): Outbox publisher backoff scheduling +-- Adds a durable next_attempt_at timestamp to prevent tight retry loops. + +ALTER TABLE IF EXISTS workflow_orchestrator.outbox + ADD COLUMN IF NOT EXISTS next_attempt_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(); + +CREATE INDEX IF NOT EXISTS idx_outbox_next_attempt_at + ON workflow_orchestrator.outbox(next_attempt_at); + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260108_stark_fix_user_prompts_outbox.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260108_stark_fix_user_prompts_outbox.sql new file mode 100644 index 000000000..a345fd5fc --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260108_stark_fix_user_prompts_outbox.sql @@ -0,0 +1,201 @@ +-- Stark Fix (Atom 1): Durable User Prompts + Outbox (Idempotent Notifications) +-- Creates: +-- - workflow_orchestrator.user_prompts (dedupe_key unique) +-- - workflow_orchestrator.outbox (dedupe_key unique) +-- Adds: +-- - GoalRunPhase.WAITING_USER_INPUT enum value +-- - checklist_items.step_type + checklist_items.execution_surface +-- +-- Notes: +-- - Designed to be re-runnable (IF NOT EXISTS / guarded CREATE TYPE blocks). +-- - Run using a privileged DB role that can CREATE TYPE / ALTER TABLE. +-- - If you run as a privileged role (e.g., postgres), ensure ownership/privileges +-- are granted to the ByteBot application role ("bytebot") at the end of this file. + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS workflow_orchestrator; + +-- --------------------------------------------------------------------------- +-- Enums +-- --------------------------------------------------------------------------- + +-- Add GoalRunPhase.WAITING_USER_INPUT (guarded, supports reruns) +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'GoalRunPhase' + ) AND NOT EXISTS ( + SELECT 1 + FROM pg_enum e + JOIN pg_type t ON e.enumtypid = t.oid + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'GoalRunPhase' + AND e.enumlabel = 'WAITING_USER_INPUT' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."GoalRunPhase" ADD VALUE ''WAITING_USER_INPUT'''; + END IF; +END $$; + +-- StepType enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."StepType" AS ENUM ('EXECUTE', 'USER_INPUT_REQUIRED'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- ExecutionSurface enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."ExecutionSurface" AS ENUM ('TEXT_ONLY', 'DESKTOP'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- UserPromptStatus enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."UserPromptStatus" AS ENUM ('OPEN', 'RESOLVED', 'CANCELLED'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- UserPromptKind enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."UserPromptKind" AS ENUM ('TEXT_CLARIFICATION', 'DESKTOP_TAKEOVER'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- --------------------------------------------------------------------------- +-- checklist_items: add step_type + execution_surface +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS step_type workflow_orchestrator."StepType" NOT NULL DEFAULT 'EXECUTE', + ADD COLUMN IF NOT EXISTS execution_surface workflow_orchestrator."ExecutionSurface" NOT NULL DEFAULT 'TEXT_ONLY'; + +-- Backfill execution_surface from requires_desktop for existing rows +UPDATE workflow_orchestrator.checklist_items +SET execution_surface = CASE + WHEN requires_desktop THEN 'DESKTOP'::workflow_orchestrator."ExecutionSurface" + ELSE 'TEXT_ONLY'::workflow_orchestrator."ExecutionSurface" +END; + +-- --------------------------------------------------------------------------- +-- user_prompts: durable user interaction surface +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.user_prompts ( + id VARCHAR(255) PRIMARY KEY, + goal_run_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.goal_runs(id) ON DELETE CASCADE, + checklist_item_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.checklist_items(id) ON DELETE CASCADE, + + kind workflow_orchestrator."UserPromptKind" NOT NULL, + status workflow_orchestrator."UserPromptStatus" NOT NULL DEFAULT 'OPEN', + + dedupe_key TEXT NOT NULL, + payload JSONB NOT NULL DEFAULT '{}'::jsonb, + answers JSONB, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + resolved_at TIMESTAMP WITH TIME ZONE, + + CONSTRAINT uq_user_prompts_dedupe_key UNIQUE (dedupe_key) +); + +CREATE INDEX IF NOT EXISTS idx_user_prompts_goal_run_id + ON workflow_orchestrator.user_prompts(goal_run_id); +CREATE INDEX IF NOT EXISTS idx_user_prompts_checklist_item_id + ON workflow_orchestrator.user_prompts(checklist_item_id); +CREATE INDEX IF NOT EXISTS idx_user_prompts_status + ON workflow_orchestrator.user_prompts(status); + +-- --------------------------------------------------------------------------- +-- outbox: idempotent notifications (dedupe_key unique) +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.outbox ( + id VARCHAR(255) PRIMARY KEY, + dedupe_key TEXT NOT NULL, + aggregate_id VARCHAR(255), + event_type VARCHAR(255) NOT NULL, + payload JSONB NOT NULL, + + processed_at TIMESTAMP WITH TIME ZONE, + retry_count INTEGER NOT NULL DEFAULT 0, + error TEXT, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + + CONSTRAINT uq_outbox_dedupe_key UNIQUE (dedupe_key) +); + +CREATE INDEX IF NOT EXISTS idx_outbox_processed_at + ON workflow_orchestrator.outbox(processed_at); +CREATE INDEX IF NOT EXISTS idx_outbox_event_type + ON workflow_orchestrator.outbox(event_type); +CREATE INDEX IF NOT EXISTS idx_outbox_aggregate_id + ON workflow_orchestrator.outbox(aggregate_id); + +-- --------------------------------------------------------------------------- +-- Ownership / privileges (critical for production) +-- --------------------------------------------------------------------------- + +-- Existing workflow_orchestrator tables are owned by the ByteBot DB role ("bytebot"). +-- If you apply this migration as a privileged role (e.g., postgres), the new tables/types +-- will be owned by that privileged role by default which breaks runtime access. +-- Make ownership explicit so the orchestrator can read/write these objects. + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompts OWNER TO bytebot; +ALTER TABLE IF EXISTS workflow_orchestrator.outbox OWNER TO bytebot; + +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'StepType' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."StepType" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'ExecutionSurface' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."ExecutionSurface" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptStatus' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptStatus" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptKind' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptKind" OWNER TO bytebot'; + END IF; +END $$; diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260109_stark_eir_prompt_engine.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260109_stark_eir_prompt_engine.sql new file mode 100644 index 000000000..adf019c98 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260109_stark_eir_prompt_engine.sql @@ -0,0 +1,406 @@ +-- Stark Fix vNext: External Input Request (EIR) / Prompt Engine Hardening +-- +-- Adds/updates: +-- - workflow_orchestrator.goal_specs (GoalSpec gate before planning) +-- - workflow_orchestrator.desktop_leases (first-class desktop retention lease) +-- - workflow_orchestrator.user_prompts extensions: +-- - one OPEN prompt per run (partial unique index) +-- - revision/supersede pointers +-- - cancellation/expiry +-- - linkage to GoalSpec + ApprovalRequest + DesktopLease +-- - tenant_id (denormalized for query + access control) +-- - workflow_orchestrator.user_prompt_resolutions (immutable actor-stamped answers) +-- - workflow_orchestrator.outbox.event_sequence (monotonic cursor for replay) +-- +-- Design goals: +-- - Schema-forward (expand-first), rerunnable, and safe under retries. +-- - Keep existing columns (e.g., user_prompts.answers) for backwards compatibility. +-- - No destructive changes; loosen constraints only where needed (checklist_item_id nullable). +-- +-- IMPORTANT: +-- - Run using a DB role that can CREATE TYPE / ALTER TYPE / CREATE TABLE / ALTER TABLE. +-- - If you apply as a privileged role (e.g., postgres), ownership must be granted to the +-- ByteBot application role ("bytebot") at the end of this file. + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS workflow_orchestrator; + +-- --------------------------------------------------------------------------- +-- Enums (expand-only) +-- --------------------------------------------------------------------------- + +-- Add UserPromptStatus.EXPIRED (guarded, supports reruns) +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptStatus' + ) AND NOT EXISTS ( + SELECT 1 + FROM pg_enum e + JOIN pg_type t ON e.enumtypid = t.oid + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptStatus' + AND e.enumlabel = 'EXPIRED' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptStatus" ADD VALUE ''EXPIRED'''; + END IF; +END $$; + +-- Add UserPromptKind.GOAL_INTAKE + UserPromptKind.APPROVAL (guarded, supports reruns) +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptKind' + ) AND NOT EXISTS ( + SELECT 1 + FROM pg_enum e + JOIN pg_type t ON e.enumtypid = t.oid + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptKind' + AND e.enumlabel = 'GOAL_INTAKE' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptKind" ADD VALUE ''GOAL_INTAKE'''; + END IF; +END $$; + +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptKind' + ) AND NOT EXISTS ( + SELECT 1 + FROM pg_enum e + JOIN pg_type t ON e.enumtypid = t.oid + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptKind' + AND e.enumlabel = 'APPROVAL' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptKind" ADD VALUE ''APPROVAL'''; + END IF; +END $$; + +-- UserPromptCancelReason enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."UserPromptCancelReason" AS ENUM ( + 'SUPERSEDED', + 'USER_CANCELLED', + 'TIMEOUT', + 'POLICY_DENY', + 'RUN_ENDED' + ); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- ActorType enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."ActorType" AS ENUM ('HUMAN', 'AGENT', 'SYSTEM'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- GoalSpecStatus enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."GoalSpecStatus" AS ENUM ('INCOMPLETE', 'COMPLETE'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- Desktop lease enums +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."DesktopLeaseMode" AS ENUM ('EPHEMERAL', 'WORKSPACE'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."DesktopLeaseStatus" AS ENUM ('ACTIVE', 'RELEASED', 'EXPIRED', 'CANCELLED'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- --------------------------------------------------------------------------- +-- goal_specs +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.goal_specs ( + id VARCHAR(255) PRIMARY KEY, + goal_run_id VARCHAR(255) NOT NULL UNIQUE REFERENCES workflow_orchestrator.goal_runs(id) ON DELETE CASCADE, + tenant_id VARCHAR(255) NOT NULL, + + status workflow_orchestrator."GoalSpecStatus" NOT NULL DEFAULT 'INCOMPLETE', + + schema_id TEXT NOT NULL, + schema_version INTEGER NOT NULL DEFAULT 1, + json_schema JSONB NOT NULL, + ui_schema JSONB, + values JSONB NOT NULL DEFAULT '{}'::jsonb, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + completed_at TIMESTAMP WITH TIME ZONE +); + +CREATE INDEX IF NOT EXISTS idx_goal_specs_tenant_id + ON workflow_orchestrator.goal_specs(tenant_id); +CREATE INDEX IF NOT EXISTS idx_goal_specs_status + ON workflow_orchestrator.goal_specs(status); + +-- --------------------------------------------------------------------------- +-- desktop_leases +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.desktop_leases ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL, + goal_run_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.goal_runs(id) ON DELETE CASCADE, + + workspace_id VARCHAR(255), + task_id VARCHAR(255), + + mode workflow_orchestrator."DesktopLeaseMode" NOT NULL DEFAULT 'WORKSPACE', + status workflow_orchestrator."DesktopLeaseStatus" NOT NULL DEFAULT 'ACTIVE', + + keepalive_until TIMESTAMP WITH TIME ZONE, + expires_at TIMESTAMP WITH TIME ZONE, + released_at TIMESTAMP WITH TIME ZONE, + + reason TEXT, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_desktop_leases_tenant_id + ON workflow_orchestrator.desktop_leases(tenant_id); +CREATE INDEX IF NOT EXISTS idx_desktop_leases_goal_run_id + ON workflow_orchestrator.desktop_leases(goal_run_id); +CREATE INDEX IF NOT EXISTS idx_desktop_leases_status + ON workflow_orchestrator.desktop_leases(status); +CREATE INDEX IF NOT EXISTS idx_desktop_leases_keepalive_until + ON workflow_orchestrator.desktop_leases(keepalive_until); + +-- --------------------------------------------------------------------------- +-- user_prompts: expand + loosen constraints +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompts + ADD COLUMN IF NOT EXISTS tenant_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS goal_spec_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS approval_request_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS desktop_lease_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS schema_id TEXT, + ADD COLUMN IF NOT EXISTS schema_version INTEGER, + ADD COLUMN IF NOT EXISTS ui_schema JSONB, + ADD COLUMN IF NOT EXISTS validator_version TEXT, + ADD COLUMN IF NOT EXISTS root_prompt_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS supersedes_prompt_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS superseded_by_prompt_id VARCHAR(255), + ADD COLUMN IF NOT EXISTS revision INTEGER NOT NULL DEFAULT 1, + ADD COLUMN IF NOT EXISTS cancel_reason workflow_orchestrator."UserPromptCancelReason", + ADD COLUMN IF NOT EXISTS cancelled_at TIMESTAMP WITH TIME ZONE, + ADD COLUMN IF NOT EXISTS expires_at TIMESTAMP WITH TIME ZONE; + +-- Backfill tenant_id from goal_runs (safe to re-run) +UPDATE workflow_orchestrator.user_prompts up +SET tenant_id = gr.tenant_id +FROM workflow_orchestrator.goal_runs gr +WHERE up.goal_run_id = gr.id + AND (up.tenant_id IS NULL OR up.tenant_id = ''); + +-- Allow goal-intake / approval prompts that are not tied to a checklist item. +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM information_schema.columns + WHERE table_schema = 'workflow_orchestrator' + AND table_name = 'user_prompts' + AND column_name = 'checklist_item_id' + AND is_nullable = 'NO' + ) THEN + EXECUTE 'ALTER TABLE workflow_orchestrator.user_prompts ALTER COLUMN checklist_item_id DROP NOT NULL'; + END IF; +END $$; + +-- Add foreign keys (guarded) +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_constraint + WHERE conname = 'fk_user_prompts_goal_spec_id' + ) THEN + EXECUTE 'ALTER TABLE workflow_orchestrator.user_prompts ' || + 'ADD CONSTRAINT fk_user_prompts_goal_spec_id ' || + 'FOREIGN KEY (goal_spec_id) REFERENCES workflow_orchestrator.goal_specs(id) ON DELETE CASCADE'; + END IF; +END $$; + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_constraint + WHERE conname = 'fk_user_prompts_approval_request_id' + ) THEN + EXECUTE 'ALTER TABLE workflow_orchestrator.user_prompts ' || + 'ADD CONSTRAINT fk_user_prompts_approval_request_id ' || + 'FOREIGN KEY (approval_request_id) REFERENCES workflow_orchestrator.approval_requests(id) ON DELETE CASCADE'; + END IF; +END $$; + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_constraint + WHERE conname = 'fk_user_prompts_desktop_lease_id' + ) THEN + EXECUTE 'ALTER TABLE workflow_orchestrator.user_prompts ' || + 'ADD CONSTRAINT fk_user_prompts_desktop_lease_id ' || + 'FOREIGN KEY (desktop_lease_id) REFERENCES workflow_orchestrator.desktop_leases(id) ON DELETE CASCADE'; + END IF; +END $$; + +CREATE INDEX IF NOT EXISTS idx_user_prompts_tenant_id + ON workflow_orchestrator.user_prompts(tenant_id); +CREATE INDEX IF NOT EXISTS idx_user_prompts_goal_spec_id + ON workflow_orchestrator.user_prompts(goal_spec_id); +CREATE INDEX IF NOT EXISTS idx_user_prompts_approval_request_id + ON workflow_orchestrator.user_prompts(approval_request_id); +CREATE INDEX IF NOT EXISTS idx_user_prompts_desktop_lease_id + ON workflow_orchestrator.user_prompts(desktop_lease_id); +CREATE INDEX IF NOT EXISTS idx_user_prompts_expires_at + ON workflow_orchestrator.user_prompts(expires_at); + +-- Exactly-one OPEN prompt per run (default policy) +CREATE UNIQUE INDEX IF NOT EXISTS uq_user_prompts_one_open_per_run + ON workflow_orchestrator.user_prompts(goal_run_id) + WHERE status = 'OPEN'; + +-- --------------------------------------------------------------------------- +-- user_prompt_resolutions: immutable, actor-stamped answers +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.user_prompt_resolutions ( + id VARCHAR(255) PRIMARY KEY, + prompt_id VARCHAR(255) NOT NULL UNIQUE REFERENCES workflow_orchestrator.user_prompts(id) ON DELETE CASCADE, + + tenant_id VARCHAR(255) NOT NULL, + goal_run_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.goal_runs(id) ON DELETE CASCADE, + + actor_type workflow_orchestrator."ActorType" NOT NULL, + actor_id VARCHAR(255), + actor_email TEXT, + actor_name TEXT, + actor_ip_address TEXT, + actor_user_agent TEXT, + + request_id TEXT, + auth_context JSONB, + + answers JSONB NOT NULL DEFAULT '{}'::jsonb, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_tenant_id + ON workflow_orchestrator.user_prompt_resolutions(tenant_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_goal_run_id + ON workflow_orchestrator.user_prompt_resolutions(goal_run_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_actor_id + ON workflow_orchestrator.user_prompt_resolutions(actor_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_created_at + ON workflow_orchestrator.user_prompt_resolutions(created_at); + +-- --------------------------------------------------------------------------- +-- outbox: monotonic cursor for replay +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.outbox + ADD COLUMN IF NOT EXISTS event_sequence BIGINT GENERATED BY DEFAULT AS IDENTITY; + +CREATE INDEX IF NOT EXISTS idx_outbox_event_sequence + ON workflow_orchestrator.outbox(event_sequence); + +-- --------------------------------------------------------------------------- +-- Ownership / privileges (critical for production) +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.goal_specs OWNER TO bytebot; +ALTER TABLE IF EXISTS workflow_orchestrator.desktop_leases OWNER TO bytebot; +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompt_resolutions OWNER TO bytebot; + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompts OWNER TO bytebot; +ALTER TABLE IF EXISTS workflow_orchestrator.outbox OWNER TO bytebot; + +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptCancelReason' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptCancelReason" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'ActorType' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."ActorType" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'GoalSpecStatus' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."GoalSpecStatus" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'DesktopLeaseMode' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."DesktopLeaseMode" OWNER TO bytebot'; + END IF; + + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'DesktopLeaseStatus' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."DesktopLeaseStatus" OWNER TO bytebot'; + END IF; +END $$; diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260110_stark_prompt_resume_ack.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260110_stark_prompt_resume_ack.sql new file mode 100644 index 000000000..5d0cb9f2c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260110_stark_prompt_resume_ack.sql @@ -0,0 +1,35 @@ +-- Stark Fix vNext: Prompt resume acknowledgement + reconciler support +-- +-- Adds: +-- - workflow_orchestrator.user_prompt_resolutions.resume_acknowledged_at (timestamp) +-- - workflow_orchestrator.user_prompt_resolutions.resume_ack (jsonb) +-- +-- Backfill: +-- - If an existing prompt resolution has a processed resume outbox row, mark it acknowledged +-- to avoid reconciler churn on historic data. +-- +-- Safe to re-run. + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompt_resolutions + ADD COLUMN IF NOT EXISTS resume_acknowledged_at TIMESTAMP WITH TIME ZONE, + ADD COLUMN IF NOT EXISTS resume_ack JSONB; + +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_resume_acknowledged_at + ON workflow_orchestrator.user_prompt_resolutions(resume_acknowledged_at); + +-- Backfill ack for historical rows where the canonical resume outbox row already processed. +UPDATE workflow_orchestrator.user_prompt_resolutions r +SET + resume_acknowledged_at = o.processed_at, + resume_ack = COALESCE(r.resume_ack, '{}'::jsonb) || jsonb_build_object( + 'backfilled', true, + 'source', '20260110_stark_prompt_resume_ack', + 'outboxDedupeKey', o.dedupe_key + ) +FROM workflow_orchestrator.outbox o +WHERE r.resume_acknowledged_at IS NULL + AND o.dedupe_key = ('user_prompt.resume:' || r.prompt_id) + AND o.processed_at IS NOT NULL; + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompt_resolutions OWNER TO bytebot; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260110_stark_prompt_scope_authz_audit.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260110_stark_prompt_scope_authz_audit.sql new file mode 100644 index 000000000..79a237ea1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260110_stark_prompt_scope_authz_audit.sql @@ -0,0 +1,104 @@ +-- Stark Fix vNext: Prompt scope + audit-grade authz fields + parent-agent actor type +-- +-- Adds: +-- - ActorType.PARENT_AGENT (enum expansion) +-- - UserPromptScope enum + user_prompts.scope (explicit prompt scope) +-- - user_prompt_resolutions: client_request_id, idempotency_key, authz_* fields +-- +-- Safe to re-run (guarded). + +-- --------------------------------------------------------------------------- +-- Enums (expand-only) +-- --------------------------------------------------------------------------- + +-- Add ActorType.PARENT_AGENT (guarded) +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'ActorType' + ) AND NOT EXISTS ( + SELECT 1 + FROM pg_enum e + JOIN pg_type t ON e.enumtypid = t.oid + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'ActorType' + AND e.enumlabel = 'PARENT_AGENT' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."ActorType" ADD VALUE ''PARENT_AGENT'''; + END IF; +END $$; + +-- UserPromptScope enum +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."UserPromptScope" AS ENUM ('RUN', 'STEP', 'APPROVAL'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- --------------------------------------------------------------------------- +-- user_prompts.scope (explicit semantic source of truth) +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompts + ADD COLUMN IF NOT EXISTS scope workflow_orchestrator."UserPromptScope" NOT NULL DEFAULT 'RUN'; + +-- Backfill scope from existing linkage columns (safe to re-run) +UPDATE workflow_orchestrator.user_prompts +SET scope = 'APPROVAL' +WHERE approval_request_id IS NOT NULL + AND scope <> 'APPROVAL'; + +UPDATE workflow_orchestrator.user_prompts +SET scope = 'STEP' +WHERE approval_request_id IS NULL + AND checklist_item_id IS NOT NULL + AND scope <> 'STEP'; + +UPDATE workflow_orchestrator.user_prompts +SET scope = 'RUN' +WHERE approval_request_id IS NULL + AND checklist_item_id IS NULL + AND scope <> 'RUN'; + +CREATE INDEX IF NOT EXISTS idx_user_prompts_scope + ON workflow_orchestrator.user_prompts(scope); + +-- --------------------------------------------------------------------------- +-- user_prompt_resolutions audit-grade fields +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompt_resolutions + ADD COLUMN IF NOT EXISTS client_request_id TEXT, + ADD COLUMN IF NOT EXISTS idempotency_key TEXT, + ADD COLUMN IF NOT EXISTS authz_decision TEXT NOT NULL DEFAULT 'ALLOW', + ADD COLUMN IF NOT EXISTS authz_policy TEXT, + ADD COLUMN IF NOT EXISTS authz_rule_id TEXT, + ADD COLUMN IF NOT EXISTS authz_reason TEXT; + +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_client_request_id + ON workflow_orchestrator.user_prompt_resolutions(client_request_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_resolutions_idempotency_key + ON workflow_orchestrator.user_prompt_resolutions(idempotency_key); + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompts OWNER TO bytebot; +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompt_resolutions OWNER TO bytebot; + +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'UserPromptScope' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."UserPromptScope" OWNER TO bytebot'; + END IF; +END $$; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260111_stark_goal_run_execution_engine.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260111_stark_goal_run_execution_engine.sql new file mode 100644 index 000000000..771e5728b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260111_stark_goal_run_execution_engine.sql @@ -0,0 +1,83 @@ +-- Stark Fix vNext: Immutable per-run execution engine +-- +-- Adds: +-- - workflow_orchestrator.GoalRunExecutionEngine enum: LEGACY_DB_LOOP | TEMPORAL_WORKFLOW +-- - workflow_orchestrator.goal_runs.execution_engine (NOT NULL, default LEGACY_DB_LOOP) +-- +-- Invariant: +-- - execution_engine is set at run creation and MUST NOT change. +-- +-- Safe to re-run (guarded). + +-- --------------------------------------------------------------------------- +-- Enum (expand-only) +-- --------------------------------------------------------------------------- + +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."GoalRunExecutionEngine" AS ENUM ('LEGACY_DB_LOOP', 'TEMPORAL_WORKFLOW'); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +-- --------------------------------------------------------------------------- +-- Column + backfill +-- --------------------------------------------------------------------------- + +ALTER TABLE IF EXISTS workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS execution_engine workflow_orchestrator."GoalRunExecutionEngine" NOT NULL DEFAULT 'LEGACY_DB_LOOP'; + +UPDATE workflow_orchestrator.goal_runs +SET execution_engine = 'LEGACY_DB_LOOP' +WHERE execution_engine IS NULL; + +CREATE INDEX IF NOT EXISTS idx_goal_runs_execution_engine + ON workflow_orchestrator.goal_runs(execution_engine); + +-- --------------------------------------------------------------------------- +-- Immutability guard (DB-enforced) +-- --------------------------------------------------------------------------- + +CREATE OR REPLACE FUNCTION workflow_orchestrator.prevent_goal_run_execution_engine_update() +RETURNS trigger +LANGUAGE plpgsql +AS $$ +BEGIN + IF NEW.execution_engine IS DISTINCT FROM OLD.execution_engine THEN + RAISE EXCEPTION 'goal_runs.execution_engine is immutable (old %, new %)', OLD.execution_engine, NEW.execution_engine + USING ERRCODE = 'check_violation'; + END IF; + + RETURN NEW; +END; +$$; + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_trigger + WHERE tgname = 'tr_goal_runs_execution_engine_immutable' + ) THEN + CREATE TRIGGER tr_goal_runs_execution_engine_immutable + BEFORE UPDATE ON workflow_orchestrator.goal_runs + FOR EACH ROW + EXECUTE FUNCTION workflow_orchestrator.prevent_goal_run_execution_engine_update(); + END IF; +END $$; + +ALTER TABLE IF EXISTS workflow_orchestrator.goal_runs OWNER TO bytebot; + +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_type t + JOIN pg_namespace n ON n.oid = t.typnamespace + WHERE n.nspname = 'workflow_orchestrator' + AND t.typname = 'GoalRunExecutionEngine' + ) THEN + EXECUTE 'ALTER TYPE workflow_orchestrator."GoalRunExecutionEngine" OWNER TO bytebot'; + END IF; +END $$; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p0_checklist_items_block_metadata.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p0_checklist_items_block_metadata.sql new file mode 100644 index 000000000..c2ca50e29 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p0_checklist_items_block_metadata.sql @@ -0,0 +1,53 @@ +-- P0 Hardening (NEEDS_HELP durability): add explicit blocked metadata fields to checklist_items +-- IMPORTANT: This repo applies all prisma/migrations/*.sql on every Argo sync. +-- This migration MUST be idempotent. + +DO $$ +BEGIN + ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS blocked_by_prompt_id text; +EXCEPTION + WHEN duplicate_column THEN NULL; +END $$; + +DO $$ +BEGIN + ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS blocked_reason text; +EXCEPTION + WHEN duplicate_column THEN NULL; +END $$; + +DO $$ +BEGIN + ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS blocked_at timestamptz; +EXCEPTION + WHEN duplicate_column THEN NULL; +END $$; + +-- Backfill best-effort from latest OPEN user_prompt for BLOCKED checklist items. +-- This avoids parsing arbitrary actual_outcome text as JSON. +WITH latest_open_prompt AS ( + SELECT DISTINCT ON (checklist_item_id) + id, + checklist_item_id, + created_at + FROM workflow_orchestrator.user_prompts + WHERE checklist_item_id IS NOT NULL + AND status = 'OPEN' + ORDER BY checklist_item_id, created_at DESC +) +UPDATE workflow_orchestrator.checklist_items ci +SET + blocked_by_prompt_id = lop.id, + blocked_at = COALESCE(ci.blocked_at, lop.created_at), + blocked_reason = COALESCE(ci.blocked_reason, 'WAITING_USER_INPUT') +FROM latest_open_prompt lop +WHERE ci.id = lop.checklist_item_id + AND ci.status = 'BLOCKED' + AND ci.blocked_by_prompt_id IS NULL; + +CREATE INDEX IF NOT EXISTS checklist_items_blocked_by_prompt_id_idx + ON workflow_orchestrator.checklist_items (blocked_by_prompt_id); + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p0_user_prompts_json_schema_snapshot.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p0_user_prompts_json_schema_snapshot.sql new file mode 100644 index 000000000..5b441b8a4 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p0_user_prompts_json_schema_snapshot.sql @@ -0,0 +1,50 @@ +-- P0: Prompt schema snapshotting (fail-closed validation uses snapshot) +-- +-- Adds: +-- - workflow_orchestrator.user_prompts.json_schema (JSONB) +-- +-- Backfills: +-- - GOAL_INTAKE prompts from goal_specs + payload fallbacks +-- - schema_id/schema_version/ui_schema/validator_version where missing +-- +-- Enforces: +-- - GOAL_INTAKE prompts must have json_schema (CHECK constraint) +-- +-- Safe to re-run (guarded / idempotent). + +ALTER TABLE IF EXISTS workflow_orchestrator.user_prompts + ADD COLUMN IF NOT EXISTS json_schema jsonb; + +-- Backfill GOAL_INTAKE prompt schema snapshots from goal_specs (authoritative) and payload (fallback). +UPDATE workflow_orchestrator.user_prompts p +SET + schema_id = COALESCE(p.schema_id, gs.schema_id, p.payload->>'schemaId'), + schema_version = COALESCE(p.schema_version, gs.schema_version, NULLIF(p.payload->>'schemaVersion','')::int), + json_schema = COALESCE(p.json_schema, gs.json_schema, p.payload->'jsonSchema'), + ui_schema = COALESCE(p.ui_schema, gs.ui_schema, p.payload->'uiSchema'), + validator_version = COALESCE(p.validator_version, 'ajv@8') +FROM workflow_orchestrator.goal_specs gs +WHERE p.kind = 'GOAL_INTAKE' + AND p.goal_spec_id = gs.id + AND ( + p.schema_id IS NULL OR + p.schema_version IS NULL OR + p.json_schema IS NULL OR + p.ui_schema IS NULL OR + p.validator_version IS NULL + ); + +-- Enforce: GOAL_INTAKE prompts require a schema snapshot. +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_constraint + WHERE conname = 'ck_user_prompts_goal_intake_requires_json_schema' + ) THEN + ALTER TABLE workflow_orchestrator.user_prompts + ADD CONSTRAINT ck_user_prompts_goal_intake_requires_json_schema + CHECK (kind <> 'GOAL_INTAKE' OR json_schema IS NOT NULL); + END IF; +END $$; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p1_goal_runs_temporal_workflow_ids.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p1_goal_runs_temporal_workflow_ids.sql new file mode 100644 index 000000000..0b9fe7560 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260112_p1_goal_runs_temporal_workflow_ids.sql @@ -0,0 +1,48 @@ +-- P1 Hardening: persist Temporal workflow identifiers on goal_runs for auditability +-- IMPORTANT: This repo applies all prisma/migrations/*.sql on every Argo sync. +-- This migration MUST be idempotent. + +DO $$ +BEGIN + ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS temporal_workflow_id text; +EXCEPTION + WHEN duplicate_column THEN NULL; +END $$; + +DO $$ +BEGIN + ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS temporal_run_id text; +EXCEPTION + WHEN duplicate_column THEN NULL; +END $$; + +DO $$ +BEGIN + ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS temporal_started_at timestamptz; +EXCEPTION + WHEN duplicate_column THEN NULL; +END $$; + +-- Best-effort backfill: workflowId is deterministic for current Temporal engine runs. +-- runId cannot be backfilled without querying Temporal history, so it remains NULL until start. +UPDATE workflow_orchestrator.goal_runs +SET temporal_workflow_id = COALESCE(temporal_workflow_id, 'goal-run-' || id) +WHERE execution_engine = 'TEMPORAL_WORKFLOW' + AND temporal_workflow_id IS NULL; + +-- Best-effort backfill for started time (use goal_runs.started_at as a proxy when present). +UPDATE workflow_orchestrator.goal_runs +SET temporal_started_at = COALESCE(temporal_started_at, started_at) +WHERE execution_engine = 'TEMPORAL_WORKFLOW' + AND temporal_started_at IS NULL + AND started_at IS NOT NULL; + +CREATE UNIQUE INDEX IF NOT EXISTS goal_runs_temporal_workflow_id_uidx + ON workflow_orchestrator.goal_runs (temporal_workflow_id) + WHERE temporal_workflow_id IS NOT NULL; + +CREATE INDEX IF NOT EXISTS goal_runs_temporal_run_id_idx + ON workflow_orchestrator.goal_runs (temporal_run_id); diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260114_desktop_vision_waiting_provider_phase.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260114_desktop_vision_waiting_provider_phase.sql new file mode 100644 index 000000000..be2c2588e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260114_desktop_vision_waiting_provider_phase.sql @@ -0,0 +1,18 @@ +-- Desktop/Vision Reliability: WAITING_PROVIDER run phase +-- +-- Adds: +-- - workflow_orchestrator."GoalRunPhase" enum value: WAITING_PROVIDER +-- +-- Purpose: +-- - Allows the orchestrator to pause safely when LLM/provider capacity is unavailable +-- (instead of retry storms or semantic replans). +-- +-- Safe to re-run (guarded). + +DO $$ +BEGIN + ALTER TYPE workflow_orchestrator."GoalRunPhase" ADD VALUE 'WAITING_PROVIDER'; +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260114_stark_user_prompt_attempts.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260114_stark_user_prompt_attempts.sql new file mode 100644 index 000000000..477b960c4 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260114_stark_user_prompt_attempts.sql @@ -0,0 +1,69 @@ +-- Stark Fix vNext: User Prompt Attempts (append-only) +-- +-- Adds: +-- - workflow_orchestrator.user_prompt_attempts +-- +-- Purpose: +-- - Record every prompt resolution submission attempt (including invalid attempts) +-- - Enable schema-driven validation feedback without mutating prompt history +-- - Provide audit-grade forensic trail for "help spam" and repeated resolution retries +-- +-- Safe to re-run (IF NOT EXISTS / guarded indexes). + +CREATE SCHEMA IF NOT EXISTS workflow_orchestrator; + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.user_prompt_attempts ( + id VARCHAR(255) PRIMARY KEY, + prompt_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.user_prompts(id) ON DELETE CASCADE, + + tenant_id VARCHAR(255) NOT NULL, + goal_run_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.goal_runs(id) ON DELETE CASCADE, + + actor_type workflow_orchestrator."ActorType" NOT NULL, + actor_id VARCHAR(255), + actor_email TEXT, + actor_name TEXT, + actor_ip_address TEXT, + actor_user_agent TEXT, + + request_id TEXT, + auth_context JSONB, + client_request_id TEXT, + idempotency_key TEXT, + + authz_decision TEXT NOT NULL DEFAULT 'ALLOW', + authz_policy TEXT, + authz_rule_id TEXT, + authz_reason TEXT, + + answers JSONB NOT NULL DEFAULT '{}'::jsonb, + + is_valid BOOLEAN NOT NULL DEFAULT TRUE, + validation_result JSONB, + error_code TEXT, + error_message TEXT, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() +); + +-- Fast lookups +CREATE INDEX IF NOT EXISTS idx_user_prompt_attempts_tenant_id + ON workflow_orchestrator.user_prompt_attempts(tenant_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_attempts_goal_run_id + ON workflow_orchestrator.user_prompt_attempts(goal_run_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_attempts_prompt_id + ON workflow_orchestrator.user_prompt_attempts(prompt_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_attempts_actor_id + ON workflow_orchestrator.user_prompt_attempts(actor_id); +CREATE INDEX IF NOT EXISTS idx_user_prompt_attempts_created_at + ON workflow_orchestrator.user_prompt_attempts(created_at); + +-- Idempotency (prompt-scoped) +CREATE UNIQUE INDEX IF NOT EXISTS uq_user_prompt_attempts_prompt_id_idempotency_key + ON workflow_orchestrator.user_prompt_attempts(prompt_id, idempotency_key) + WHERE idempotency_key IS NOT NULL; + +CREATE UNIQUE INDEX IF NOT EXISTS uq_user_prompt_attempts_prompt_id_client_request_id + ON workflow_orchestrator.user_prompt_attempts(prompt_id, client_request_id) + WHERE client_request_id IS NOT NULL; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase10_checklist_item_retry_gating.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase10_checklist_item_retry_gating.sql new file mode 100644 index 000000000..445b59e11 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase10_checklist_item_retry_gating.sql @@ -0,0 +1,30 @@ +-- Stark Closeout: Durable retry gating for checklist items +-- +-- Adds (workflow_orchestrator.checklist_items): +-- - infra_retry_count, infra_retry_after +-- - heartbeat_retry_count, heartbeat_retry_after +-- +-- Purpose: +-- - Ensures exponential backoff is enforced even after a retry resets a step back to PENDING +-- - Makes retry behavior restart-safe and stable across leader changes / multiple replicas +-- +-- Safe to re-run (guarded). + +ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS infra_retry_count integer NOT NULL DEFAULT 0; + +ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS infra_retry_after timestamptz; + +ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS heartbeat_retry_count integer NOT NULL DEFAULT 0; + +ALTER TABLE workflow_orchestrator.checklist_items + ADD COLUMN IF NOT EXISTS heartbeat_retry_after timestamptz; + +CREATE INDEX IF NOT EXISTS checklist_items_infra_retry_after_idx + ON workflow_orchestrator.checklist_items (infra_retry_after); + +CREATE INDEX IF NOT EXISTS checklist_items_heartbeat_retry_after_idx + ON workflow_orchestrator.checklist_items (heartbeat_retry_after); + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase8_notification_channels.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase8_notification_channels.sql new file mode 100644 index 000000000..e2b6bfabf --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase8_notification_channels.sql @@ -0,0 +1,74 @@ +-- Phase 8: External Integrations - Notification Channels +-- +-- Creates: +-- - workflow_orchestrator.notification_channels +-- - workflow_orchestrator.notification_deliveries +-- +-- Design goals: +-- - Rerunnable (CREATE IF NOT EXISTS) +-- - Expand-only (no destructive changes) +-- - Matches Prisma schema models NotificationChannel / NotificationDelivery + +CREATE SCHEMA IF NOT EXISTS workflow_orchestrator; + +-- --------------------------------------------------------------------------- +-- notification_channels +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.notification_channels ( + id VARCHAR(255) PRIMARY KEY, + tenant_id VARCHAR(255) NOT NULL, + + type TEXT NOT NULL, + name TEXT NOT NULL, + description TEXT, + + config JSONB NOT NULL DEFAULT '{}'::jsonb, + events TEXT[] NOT NULL DEFAULT ARRAY[]::text[], + filters JSONB NOT NULL DEFAULT '{}'::jsonb, + + enabled BOOLEAN NOT NULL DEFAULT TRUE, + verified BOOLEAN NOT NULL DEFAULT FALSE, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_notification_channels_tenant_id + ON workflow_orchestrator.notification_channels(tenant_id); +CREATE INDEX IF NOT EXISTS idx_notification_channels_tenant_id_enabled + ON workflow_orchestrator.notification_channels(tenant_id, enabled); +CREATE INDEX IF NOT EXISTS idx_notification_channels_tenant_id_type + ON workflow_orchestrator.notification_channels(tenant_id, type); + +-- --------------------------------------------------------------------------- +-- notification_deliveries +-- --------------------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS workflow_orchestrator.notification_deliveries ( + id VARCHAR(255) PRIMARY KEY, + channel_id VARCHAR(255) NOT NULL REFERENCES workflow_orchestrator.notification_channels(id) ON DELETE CASCADE, + + event_id VARCHAR(255) NOT NULL, + event_type TEXT NOT NULL, + + success BOOLEAN NOT NULL, + status_code INTEGER, + error TEXT, + attempts INTEGER NOT NULL DEFAULT 1, + + payload JSONB DEFAULT '{}'::jsonb, + + delivered_at TIMESTAMP WITH TIME ZONE, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_notification_deliveries_channel_id + ON workflow_orchestrator.notification_deliveries(channel_id); +CREATE INDEX IF NOT EXISTS idx_notification_deliveries_event_id + ON workflow_orchestrator.notification_deliveries(event_id); +CREATE INDEX IF NOT EXISTS idx_notification_deliveries_event_type + ON workflow_orchestrator.notification_deliveries(event_type); +CREATE INDEX IF NOT EXISTS idx_notification_deliveries_created_at + ON workflow_orchestrator.notification_deliveries(created_at); + diff --git a/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase9_wait_reasons_and_capacity.sql b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase9_wait_reasons_and_capacity.sql new file mode 100644 index 000000000..9dc6d652d --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/migrations/20260121_phase9_wait_reasons_and_capacity.sql @@ -0,0 +1,76 @@ +-- Stark Closeout: Canonical wait reasons + WAITING_CAPACITY phase +-- +-- Adds: +-- - workflow_orchestrator."GoalRunPhase" enum value: WAITING_CAPACITY +-- - workflow_orchestrator."GoalRunWaitReason" enum type +-- - workflow_orchestrator.goal_runs columns: +-- wait_reason, wait_detail, wait_started_at, wait_until +-- - Backfill wait_reason for existing waiting runs +-- +-- Purpose: +-- - Make CAPACITY waits first-class (distinct from provider outages) +-- - Provide an engine-agnostic, audit-friendly “why are we waiting?” surface +-- +-- Safe to re-run (guarded). + +DO $$ +BEGIN + ALTER TYPE workflow_orchestrator."GoalRunPhase" ADD VALUE 'WAITING_CAPACITY'; +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +DO $$ +BEGIN + CREATE TYPE workflow_orchestrator."GoalRunWaitReason" AS ENUM ( + 'USER_INPUT', + 'APPROVAL', + 'PROVIDER', + 'CAPACITY', + 'POLICY', + 'UNKNOWN' + ); +EXCEPTION + WHEN duplicate_object THEN NULL; +END $$; + +ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS wait_reason workflow_orchestrator."GoalRunWaitReason"; + +ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS wait_detail jsonb; + +ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS wait_started_at timestamptz; + +ALTER TABLE workflow_orchestrator.goal_runs + ADD COLUMN IF NOT EXISTS wait_until timestamptz; + +CREATE INDEX IF NOT EXISTS goal_runs_wait_reason_idx + ON workflow_orchestrator.goal_runs (wait_reason); + +-- Backfill existing waiting runs to have a durable wait_reason. +UPDATE workflow_orchestrator.goal_runs +SET + wait_reason = 'USER_INPUT', + wait_started_at = COALESCE(wait_started_at, updated_at) +WHERE + phase = 'WAITING_USER_INPUT' + AND wait_reason IS NULL; + +UPDATE workflow_orchestrator.goal_runs +SET + wait_reason = 'APPROVAL', + wait_started_at = COALESCE(wait_started_at, updated_at) +WHERE + phase = 'WAITING_APPROVAL' + AND wait_reason IS NULL; + +UPDATE workflow_orchestrator.goal_runs +SET + wait_reason = 'PROVIDER', + wait_started_at = COALESCE(wait_started_at, updated_at) +WHERE + phase = 'WAITING_PROVIDER' + AND wait_reason IS NULL; + diff --git a/packages/bytebot-workflow-orchestrator/prisma/schema.prisma b/packages/bytebot-workflow-orchestrator/prisma/schema.prisma new file mode 100644 index 000000000..5f2290b83 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/prisma/schema.prisma @@ -0,0 +1,2873 @@ +// ByteBot Workflow Orchestrator - Prisma Schema +// v1.0.0: Defines workflow, workspace, and node entities +// +// This schema supports the Option C (Hybrid) architecture: +// - Orchestrator owns the DB entity for Workspace (lifecycle, locking) +// - Task-controller owns K8s resources (PVC, Pod) + +generator client { + provider = "prisma-client-js" + previewFeatures = ["multiSchema"] +} + +datasource db { + provider = "postgresql" + url = env("DATABASE_URL") + schemas = ["workflow_orchestrator"] +} + +// All models use the workflow_orchestrator schema to avoid conflicts +// with existing tables in the shared database + +// ============================================================================ +// Workspace Entity +// Represents a persistent workspace environment for workflow execution +// ============================================================================ + +model Workspace { + id String @id + tenantId String @map("tenant_id") + // v1.1.0: Status now includes PENDING, CREATING, READY, WAITING_FOR_CAPACITY, HIBERNATED, TERMINATED, FAILED + status String @default("PENDING") + + // Persistence configuration (passed to task-controller) + persistenceEnabled Boolean @default(true) @map("persistence_enabled") + storageClass String? @map("storage_class") + storageSize String? @map("storage_size") + + // Desktop endpoint caching (updated when workspace is ready) + desktopEndpoint String? @map("desktop_endpoint") + vncEndpoint String? @map("vnc_endpoint") + + // Simple locking (for backward compatibility) + lockedBy String? @map("locked_by") + lockedAt DateTime? @map("locked_at") + + // Granular locking for desktop tool batches + // Lock is held ONLY during active desktop tool execution + // NOT for the entire node run (allows concurrent non-desktop work) + lockOwnerNodeRunId String? @map("lock_owner_node_run_id") + lockAcquiredAt DateTime? @map("lock_acquired_at") + lockExpiresAt DateTime? @map("lock_expires_at") // 30-60 second leases + + // Health tracking + lastHeartbeatAt DateTime? @map("last_heartbeat_at") + + // Error tracking + error String? + + // v1.1.0: Provisioning tracking (prevents runaway loop bug) + // Tracks attempts and timing for idempotent provisioning with backoff + provisioningAttemptCount Int @default(0) @map("provisioning_attempt_count") + lastProvisioningAttemptAt DateTime? @map("last_provisioning_attempt_at") + // v1.5.0: DB-driven retry gating (prevents tight loop bug) + // When set, the orchestrator loop will skip processing until this time + nextAttemptAt DateTime? @map("next_attempt_at") + // v1.5.0: Tracks last activity emission time for throttling + lastActivityEmittedAt DateTime? @map("last_activity_emitted_at") + + // v1.2.0: Hibernation tracking (prevents orphan pods) + // Tracks hibernation attempts to retry on failure and enable GC cleanup + hibernationAttemptCount Int @default(0) @map("hibernation_attempt_count") + lastHibernationAttemptAt DateTime? @map("last_hibernation_attempt_at") + hibernationError String? @map("hibernation_error") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + terminatedAt DateTime? @map("terminated_at") + + // Session hints (for faster resume after hibernation) + lastKnownUrl String? @map("last_known_url") + sessionMetadata Json? @map("session_metadata") + + // Relations + workflowRun WorkflowRun? + + @@index([tenantId]) + @@index([status]) + @@index([lockOwnerNodeRunId]) + @@index([lockExpiresAt]) + @@map("workspaces") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Workflow Run Entity +// Represents an instance of a workflow execution +// ============================================================================ + +model WorkflowRun { + id String @id + workspaceId String @unique @map("workspace_id") + tenantId String @map("tenant_id") + templateId String? @map("template_id") // Reference to workflow template + + // Metadata + name String + description String? + status String @default("PENDING") // PENDING, RUNNING, COMPLETED, FAILED, CANCELLED + + // Error tracking + error String? + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Relations + workspace Workspace @relation(fields: [workspaceId], references: [id]) + nodes WorkflowNode[] + goalRun GoalRun? // Reverse relation - a workflow run may belong to a goal run + + @@index([tenantId]) + @@index([status]) + @@index([templateId]) + @@index([createdAt]) + @@map("workflow_runs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Workflow Node Entity (DEFINITION) +// Represents the definition of a step within a workflow +// Runtime state is tracked separately in WorkflowNodeRun +// ============================================================================ + +model WorkflowNode { + id String @id + workflowRunId String @map("workflow_run_id") + + // Stable key for referencing across versions (e.g., "step_1", "login_step") + nodeKey String? @map("node_key") + + // Node definition + name String + type String // TASK, DECISION, PARALLEL, WAIT + config Json // Node-specific configuration + order Int @default(0) // Execution order hint + + // Dependencies (array of node IDs that must complete before this node) + dependencies String[] @default([]) + + // Runtime state (for backward compatibility - new code should use WorkflowNodeRun) + status String @default("PENDING") // PENDING, READY, RUNNING, COMPLETED, FAILED, SKIPPED + output Json? // Output from execution + error String? // Error message if failed + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + durationMs Int? @map("duration_ms") + dependencyResolved Boolean @default(false) @map("dependency_resolved") + + // Tool configuration + allowedTools String[] @default([]) @map("allowed_tools") // ["search_web_search", "weather_get_current"] + gatewayToolsOnly Boolean @default(false) @map("gateway_tools_only") // No desktop access + highRiskTools String[] @default([]) @map("high_risk_tools") // ["communications_send_email"] + + // Retry configuration + maxRetries Int @default(3) @map("max_retries") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + workflowRun WorkflowRun @relation(fields: [workflowRunId], references: [id]) + nodeRuns WorkflowNodeRun[] + + @@unique([workflowRunId, nodeKey]) + @@index([workflowRunId]) + @@index([type]) + @@index([order]) + @@index([status]) + @@map("workflow_nodes") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Workflow Node Run Entity (RUNTIME STATE) +// Represents an execution attempt of a workflow node +// Supports retries with full history of each attempt +// ============================================================================ + +model WorkflowNodeRun { + id String @id @default(cuid()) + nodeId String @map("node_id") + + // Attempt tracking + attempt Int @default(1) + + // Execution state + status String @default("PENDING") // PENDING, READY, RUNNING, SUCCEEDED, FAILED, SKIPPED, WAITING_FOR_APPROVAL + + // Input/Output + input Json? // Input provided to this node run + output Json? // Output from successful execution + error String? // Error message if failed + + // Checkpointing (for long-running nodes) + checkpoint Json? // Intermediate state for resume + + // Performance tracking + durationMs Int? @map("duration_ms") + + // Desktop tool locking (granular - per tool batch, not per node) + // Lock is acquired only when desktop tools are about to execute + // and released immediately after the batch completes + desktopLockAcquiredAt DateTime? @map("desktop_lock_acquired_at") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Relations + node WorkflowNode @relation(fields: [nodeId], references: [id]) + approvalRequests ApprovalRequest[] + + @@unique([nodeId, attempt]) + @@index([nodeId]) + @@index([status]) + @@index([createdAt]) + @@map("workflow_node_runs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Approval Request Entity +// For high-risk actions that require human approval +// ============================================================================ + +model ApprovalRequest { + id String @id @default(cuid()) + nodeRunId String @map("node_run_id") + + // Action identification + actionHash String @map("action_hash") // Deterministic hash of the action + toolName String @map("tool_name") // "communications_send_email" + toolParams Json @map("tool_params") // Full parameters + + // Preview data for approver + previewData Json? @map("preview_data") // { recipient, subject, bodyPreview } + + // Status + status String @default("PENDING") // PENDING, APPROVED, REJECTED, EXPIRED + expiresAt DateTime @map("expires_at") + + // Approval info + approvedBy String? @map("approved_by") + approvedAt DateTime? @map("approved_at") + rejectedBy String? @map("rejected_by") + rejectedAt DateTime? @map("rejected_at") + reason String? // Rejection reason or approval note + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + nodeRun WorkflowNodeRun @relation(fields: [nodeRunId], references: [id]) + userPrompts UserPrompt[] + + @@unique([nodeRunId, actionHash]) + @@index([nodeRunId]) + @@index([status]) + @@index([expiresAt]) + @@map("approval_requests") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Idempotency Record Entity +// Ensures high-risk actions execute exactly once +// ============================================================================ + +model IdempotencyRecord { + id String @id @default(cuid()) + idempotencyKey String @unique @map("idempotency_key") // "{nodeRunId}:{actionHash}" + actionHash String @map("action_hash") + + // Execution state + status String @default("PROCESSING") // PROCESSING, COMPLETED, FAILED + + // Result (cached for idempotent returns) + result Json? + errorMessage String? @map("error_message") + + // TTL + expiresAt DateTime @map("expires_at") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + completedAt DateTime? @map("completed_at") + + @@index([actionHash]) + @@index([expiresAt]) + @@map("idempotency_records") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Workflow Template Entity (Optional - for reusable workflows) +// ============================================================================ + +model WorkflowTemplate { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Template definition + name String + description String? + version String @default("1.0.0") + nodes Json // Array of node definitions + + // Feature flags and settings + settings Json @default("{}") + + // Audit + createdBy String? @map("created_by") + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@unique([tenantId, name, version]) + @@index([tenantId]) + @@map("workflow_templates") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Outbox Event Entity (for transactional event publishing) +// ============================================================================ + +model OutboxEvent { + id String @id @default(cuid()) + aggregateId String @map("aggregate_id") + eventType String @map("event_type") + payload Json + + // Processing state + processedAt DateTime? @map("processed_at") + retryCount Int @default(0) @map("retry_count") + error String? + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + @@index([processedAt]) + @@index([eventType]) + @@index([aggregateId]) + @@map("outbox_events") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Outbox Entity (Stark Fix) +// Reliable, idempotent notifications with a durable dedupeKey +// ============================================================================ + +model Outbox { + id String @id @default(cuid()) + + // Monotonic event sequence (for cursor-based replay) + // NOTE: nullable for backward compatibility with existing rows. + eventSequence BigInt? @map("event_sequence") + + // Idempotency + dedupeKey String @unique @map("dedupe_key") + + // Event classification + aggregateId String? @map("aggregate_id") + eventType String @map("event_type") + payload Json + + // Processing state + processedAt DateTime? @map("processed_at") + nextAttemptAt DateTime @default(now()) @map("next_attempt_at") + retryCount Int @default(0) @map("retry_count") + error String? + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + @@index([processedAt]) + @@index([nextAttemptAt]) + @@index([eventType]) + @@index([aggregateId]) + @@index([eventSequence]) + @@map("outbox") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Feature Flag Entity (for progressive rollout) +// ============================================================================ + +model FeatureFlag { + id String @id @default(cuid()) + name String @unique + enabled Boolean @default(false) + + // Targeting + tenantIds String[] @default([]) @map("tenant_ids") // Empty = all tenants + percentage Int @default(100) // 0-100 for gradual rollout + + // Metadata + description String? + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@map("feature_flags") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Post-M5: Webhook Configuration Entity +// For notification webhooks when approvals are requested/decided +// ============================================================================ + +model WebhookConfig { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Webhook endpoint + url String + secret String // HMAC secret for signature + + // Events to send (array of WebhookEventType) + events String[] // ["approval.requested", "approval.approved", ...] + + // Status + enabled Boolean @default(true) + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + deliveries WebhookDelivery[] + + @@index([tenantId]) + @@index([tenantId, enabled]) + @@map("webhook_configs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Post-M5: Webhook Delivery Record Entity +// Tracks webhook delivery attempts for debugging and monitoring +// ============================================================================ + +model WebhookDelivery { + id String @id @default(cuid()) + webhookId String @map("webhook_id") + eventId String @map("event_id") // Unique event ID for idempotency + + // Delivery result + success Boolean + statusCode Int? @map("status_code") + error String? + attempts Int @default(1) + + // Timestamps + deliveredAt DateTime? @map("delivered_at") + createdAt DateTime @default(now()) @map("created_at") + + // Relations + webhook WebhookConfig @relation(fields: [webhookId], references: [id], onDelete: Cascade) + + @@index([webhookId]) + @@index([eventId]) + @@index([createdAt]) + @@map("webhook_deliveries") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Post-M5: Audit Log Entity +// Immutable compliance audit trail for approval actions +// ============================================================================ + +model AuditLog { + id String @id @default(cuid()) + timestamp DateTime @default(now()) + + // Event classification + eventType String @map("event_type") + + // WHO - Actor information + actorType String @map("actor_type") // 'user', 'system', 'agent' + actorId String? @map("actor_id") + actorEmail String? @map("actor_email") + actorName String? @map("actor_name") + actorIpAddress String? @map("actor_ip_address") + actorUserAgent String? @map("actor_user_agent") + + // WHAT - Resource being acted upon + resourceType String @map("resource_type") // 'approval', 'webhook', 'workflow', 'node' + resourceId String @map("resource_id") + resourceName String? @map("resource_name") + + // WHERE - Context + tenantId String @map("tenant_id") + workspaceId String? @map("workspace_id") + workflowRunId String? @map("workflow_run_id") + nodeRunId String? @map("node_run_id") + requestId String? @map("request_id") + + // WHY - Action details + actionType String @map("action_type") + actionReason String? @map("action_reason") + previousState String? @map("previous_state") + newState String? @map("new_state") + + // Additional metadata (JSON for flexibility) + metadata Json? + + // Retention (for cleanup) + expiresAt DateTime @map("expires_at") + + // Immutable - no updatedAt field + @@index([tenantId]) + @@index([tenantId, timestamp]) + @@index([resourceType, resourceId]) + @@index([eventType]) + @@index([actorId]) + @@index([expiresAt]) + @@map("audit_logs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 7: Multi-Agent Orchestration +// ============================================================================ + +// ============================================================================ +// Agent Registry Entity +// Tracks available agent instances for task dispatch +// Implements the Supervisor/Hierarchical orchestration pattern +// ============================================================================ + +model Agent { + id String @id @default(cuid()) + name String // Human-readable agent name + endpoint String // Base URL (e.g., "http://bytebot-agent.bytebot.svc.cluster.local:8080") + + // Agent identification + podName String? @map("pod_name") // K8s pod name for correlation + nodeIp String? @map("node_ip") // K8s node IP for locality-aware routing + namespace String @default("bytebot") // K8s namespace + + // Status tracking + status String @default("STARTING") // STARTING, HEALTHY, UNHEALTHY, DRAINING, OFFLINE + lastHeartbeatAt DateTime @default(now()) @map("last_heartbeat_at") + + // Capacity management + maxConcurrentTasks Int @default(3) @map("max_concurrent_tasks") + currentTaskCount Int @default(0) @map("current_task_count") + + // Weight for load balancing (higher = more tasks) + weight Int @default(100) + + // Agent version for compatibility checks + version String @default("1.0.0") + + // Metadata (tags, labels, custom properties) + metadata Json @default("{}") + + // Timestamps + registeredAt DateTime @default(now()) @map("registered_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + capabilities AgentCapability[] + healthChecks AgentHealthCheck[] + taskAssignments TaskAssignment[] + + @@unique([endpoint]) + @@unique([podName, namespace]) + @@index([status]) + @@index([lastHeartbeatAt]) + @@index([namespace]) + @@map("agents") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Agent Capability Entity +// Defines what tools/skills an agent can perform +// Used by AgentRouterService for capability-based task routing +// ============================================================================ + +model AgentCapability { + id String @id @default(cuid()) + agentId String @map("agent_id") + + // Capability definition + name String // e.g., "desktop_automation", "web_search", "code_execution" + toolPattern String @map("tool_pattern") // Glob pattern: "desktop_*", "search_*", "*" + + // Priority (higher = preferred for this capability) + priority Int @default(100) + + // Cost multiplier for this capability (for cost-aware routing) + costMultiplier Float @default(1.0) @map("cost_multiplier") + + // Whether this capability requires exclusive workspace access + requiresExclusiveWorkspace Boolean @default(false) @map("requires_exclusive_workspace") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + // Relations + agent Agent @relation(fields: [agentId], references: [id], onDelete: Cascade) + + @@unique([agentId, name]) + @@index([agentId]) + @@index([name]) + @@index([toolPattern]) + @@map("agent_capabilities") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Agent Health Check Entity +// Historical health check records for monitoring and alerting +// ============================================================================ + +model AgentHealthCheck { + id String @id @default(cuid()) + agentId String @map("agent_id") + + // Check result + success Boolean + statusCode Int? @map("status_code") + latencyMs Int @map("latency_ms") + error String? + + // Endpoint checked + endpoint String // Usually "/health/live" or "/health/ready" + + // Timestamp + checkedAt DateTime @default(now()) @map("checked_at") + + // Relations + agent Agent @relation(fields: [agentId], references: [id], onDelete: Cascade) + + @@index([agentId]) + @@index([agentId, checkedAt]) + @@index([checkedAt]) + @@map("agent_health_checks") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Task Assignment Entity +// Tracks which agent is assigned to execute which node run +// Implements distributed work tracking for multi-agent coordination +// ============================================================================ + +model TaskAssignment { + id String @id @default(cuid()) + nodeRunId String @map("node_run_id") + agentId String @map("agent_id") + + // Assignment state + status String @default("ASSIGNED") // ASSIGNED, RUNNING, COMPLETED, FAILED, REASSIGNED + + // Routing info (why this agent was selected) + routingReason String? @map("routing_reason") // e.g., "capability_match", "load_balance", "affinity" + + // Execution tracking + dispatchedAt DateTime? @map("dispatched_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Result caching + result Json? + error String? + + // Retry tracking (if reassigned) + previousAssignmentId String? @map("previous_assignment_id") + attempt Int @default(1) + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + agent Agent @relation(fields: [agentId], references: [id]) + + @@unique([nodeRunId, attempt]) + @@index([agentId]) + @@index([nodeRunId]) + @@index([status]) + @@index([dispatchedAt]) + @@map("task_assignments") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Agent Pool Entity (Optional) +// Groups agents for tenant isolation or workload separation +// ============================================================================ + +model AgentPool { + id String @id @default(cuid()) + name String @unique + tenantId String? @map("tenant_id") // null = shared pool + + // Pool settings + minAgents Int @default(1) @map("min_agents") + maxAgents Int @default(10) @map("max_agents") + scaleUpThreshold Int @default(80) @map("scale_up_threshold") // % utilization + scaleDownThreshold Int @default(20) @map("scale_down_threshold") // % utilization + + // Agent selector (label selector for K8s) + selector Json @default("{}") + + // Status + enabled Boolean @default(true) + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@index([tenantId]) + @@map("agent_pools") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 8: Advanced Analytics Dashboard +// ============================================================================ + +// ============================================================================ +// Workflow Execution Metric Entity +// Captures execution-level metrics for analytics and dashboards +// ============================================================================ + +model WorkflowExecutionMetric { + id String @id @default(cuid()) + workflowRunId String @map("workflow_run_id") + tenantId String @map("tenant_id") + + // Workflow identification + workflowName String @map("workflow_name") + templateId String? @map("template_id") + + // Execution status + status String // STARTED, COMPLETED, FAILED, CANCELLED + + // Timing metrics + startedAt DateTime @map("started_at") + completedAt DateTime? @map("completed_at") + durationMs Int? @map("duration_ms") + + // Execution counts + nodeCount Int @default(0) @map("node_count") + completedNodeCount Int @default(0) @map("completed_node_count") + failedNodeCount Int @default(0) @map("failed_node_count") + retriedNodeCount Int @default(0) @map("retried_node_count") + + // Agent metrics (Phase 7 integration) + agentId String? @map("agent_id") + agentName String? @map("agent_name") + reassignments Int @default(0) + + // Error tracking + errorType String? @map("error_type") + errorMessage String? @map("error_message") + + // Resource usage (optional - from agent reports) + peakMemoryMb Float? @map("peak_memory_mb") + avgCpuPercent Float? @map("avg_cpu_percent") + + // Tags for dimensional analysis + tags Json @default("{}") + + // Timestamp for time-series indexing + timestamp DateTime @default(now()) + + @@index([tenantId]) + @@index([tenantId, timestamp]) + @@index([workflowRunId]) + @@index([status]) + @@index([timestamp]) + @@index([templateId]) + @@index([agentId]) + @@map("workflow_execution_metrics") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Workflow Step Metric Entity +// Captures step-level metrics for detailed performance analysis +// ============================================================================ + +model WorkflowStepMetric { + id String @id @default(cuid()) + nodeId String @map("node_id") + nodeRunId String @map("node_run_id") + workflowRunId String @map("workflow_run_id") + tenantId String @map("tenant_id") + + // Step identification + stepName String @map("step_name") + stepType String @map("step_type") // TASK, DECISION, PARALLEL, WAIT + + // Execution status + status String // STARTED, COMPLETED, FAILED, SKIPPED + + // Timing metrics + startedAt DateTime @map("started_at") + completedAt DateTime? @map("completed_at") + durationMs Int? @map("duration_ms") + queueTimeMs Int? @map("queue_time_ms") // Time from READY to RUNNING + + // Attempt tracking + attempt Int @default(1) + retryCount Int @default(0) @map("retry_count") + + // Tool metrics + toolsUsed String[] @default([]) @map("tools_used") + highRiskTools String[] @default([]) @map("high_risk_tools") + + // Agent that executed this step + agentId String? @map("agent_id") + agentName String? @map("agent_name") + + // Resource usage + memoryMb Float? @map("memory_mb") + cpuPercent Float? @map("cpu_percent") + + // Error tracking + errorType String? @map("error_type") + errorMessage String? @map("error_message") + + // Timestamp for time-series indexing + timestamp DateTime @default(now()) + + @@index([tenantId]) + @@index([tenantId, timestamp]) + @@index([workflowRunId]) + @@index([nodeId]) + @@index([nodeRunId]) + @@index([stepType]) + @@index([status]) + @@index([timestamp]) + @@index([agentId]) + @@map("workflow_step_metrics") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Metrics Snapshot Entity +// Pre-aggregated metrics for efficient dashboard queries +// Aggregated at 1m, 5m, 1h, 1d intervals +// ============================================================================ + +model MetricsSnapshot { + id String @id @default(cuid()) + + // Dimensions + tenantId String @map("tenant_id") + metricName String @map("metric_name") // e.g., "workflow_executions", "success_rate", "avg_duration" + workflowId String @default("_all") @map("workflow_id") // "_all" = aggregate across all workflows + agentId String @default("_all") @map("agent_id") // "_all" = aggregate across all agents + + // Time bucket + period String @map("period") // 1m, 5m, 15m, 1h, 1d + bucketStart DateTime @map("bucket_start") + bucketEnd DateTime @map("bucket_end") + + // Aggregate values + count Int @default(0) + sum Float @default(0) + min Float @default(0) + max Float @default(0) + avg Float @default(0) + percentile50 Float @default(0) @map("percentile_50") + percentile95 Float @default(0) @map("percentile_95") + percentile99 Float @default(0) @map("percentile_99") + + // Status breakdowns (for execution metrics) + successCount Int @default(0) @map("success_count") + failureCount Int @default(0) @map("failure_count") + + // Additional dimensions as JSON + tags Json @default("{}") + + // Timestamp + createdAt DateTime @default(now()) @map("created_at") + + @@unique([tenantId, metricName, period, bucketStart, workflowId, agentId]) + @@index([tenantId]) + @@index([tenantId, metricName]) + @@index([tenantId, bucketStart]) + @@index([metricName, period]) + @@index([bucketStart]) + @@map("metrics_snapshots") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Dashboard Widget Entity +// Stores user-configured dashboard widgets +// ============================================================================ + +model DashboardWidget { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + userId String? @map("user_id") // null = shared dashboard + + // Widget configuration + name String + description String? + type String // line_chart, bar_chart, gauge, kpi_card, table, heatmap + metrics String[] // Metrics to display + filters Json @default("{}") // Filter configuration + visualization Json @default("{}") // Visualization settings (colors, thresholds, etc.) + + // Layout + position Json @default("{}") // { x, y, w, h } + order Int @default(0) + + // Dashboard grouping + dashboardId String? @map("dashboard_id") + + // Status + enabled Boolean @default(true) + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@index([tenantId]) + @@index([tenantId, userId]) + @@index([dashboardId]) + @@map("dashboard_widgets") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Alert Rule Entity +// Configurable alerts based on metric thresholds +// ============================================================================ + +model AlertRule { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Rule configuration + name String + description String? + metricName String @map("metric_name") + condition String // gt, gte, lt, lte, eq + threshold Float + + // Evaluation settings + evaluationPeriod String @default("5m") @map("evaluation_period") // Time window to evaluate + aggregation String @default("avg") // sum, avg, min, max, count + + // Filters + workflowId String? @map("workflow_id") + agentId String? @map("agent_id") + filters Json @default("{}") + + // Notification settings + severity String @default("warning") // info, warning, error, critical + notificationUrls String[] @default([]) @map("notification_urls") + + // Rate limiting + cooldownMinutes Int @default(15) @map("cooldown_minutes") + + // Status + enabled Boolean @default(true) + lastTriggered DateTime? @map("last_triggered") + lastChecked DateTime? @map("last_checked") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + alerts AlertHistory[] + + @@index([tenantId]) + @@index([tenantId, metricName]) + @@index([enabled]) + @@map("alert_rules") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Alert History Entity +// Records triggered alerts for audit trail +// ============================================================================ + +model AlertHistory { + id String @id @default(cuid()) + ruleId String @map("rule_id") + + // Alert details + metricValue Float @map("metric_value") + threshold Float + message String + + // Resolution + resolved Boolean @default(false) + resolvedAt DateTime? @map("resolved_at") + resolvedBy String? @map("resolved_by") + + // Notification status + notified Boolean @default(false) + notificationError String? @map("notification_error") + + // Timestamp + triggeredAt DateTime @default(now()) @map("triggered_at") + + // Relations + rule AlertRule @relation(fields: [ruleId], references: [id], onDelete: Cascade) + + @@index([ruleId]) + @@index([triggeredAt]) + @@index([resolved]) + @@map("alert_history") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 9: Self-Healing & Auto-Recovery +// ============================================================================ + +// ============================================================================ +// Workflow Checkpoint Entity +// Stores durable execution state for crash recovery +// Implements the DBOS durable execution pattern +// ============================================================================ + +model WorkflowCheckpoint { + id String @id @default(cuid()) + workflowRunId String @map("workflow_run_id") + tenantId String @map("tenant_id") + + // Checkpoint identification + checkpointKey String @map("checkpoint_key") // e.g., "node:node_id:attempt:1" + version Int @default(1) // For optimistic locking + + // Checkpoint data + state Json // Serialized workflow/node state + metadata Json @default("{}") + + // Recovery info + recoverable Boolean @default(true) + recoveryHint String? @map("recovery_hint") // Instructions for recovery + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + expiresAt DateTime @map("expires_at") // For cleanup + + @@unique([workflowRunId, checkpointKey]) + @@index([workflowRunId]) + @@index([tenantId]) + @@index([expiresAt]) + @@map("workflow_checkpoints") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Dead Letter Entry Entity +// Captures permanently failed tasks for manual intervention +// ============================================================================ + +model DeadLetterEntry { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + workflowRunId String @map("workflow_run_id") + nodeRunId String @map("node_run_id") + + // Original task info + taskType String @map("task_type") // WORKFLOW_NODE, AGENT_TASK, etc. + originalPayload Json @map("original_payload") + + // Failure info + failureReason String @map("failure_reason") + failureCount Int @default(1) @map("failure_count") + lastFailedAt DateTime @map("last_failed_at") + errorDetails Json? @map("error_details") // Stack traces, etc. + + // Classification + failureCategory String @map("failure_category") // RETRYABLE, PERMANENT, UNKNOWN + severity String @default("high") // low, medium, high, critical + + // Resolution + status String @default("PENDING") // PENDING, RETRYING, RESOLVED, DISCARDED + resolvedAt DateTime? @map("resolved_at") + resolvedBy String? @map("resolved_by") + resolutionNote String? @map("resolution_note") + + // Retry tracking + maxRetries Int @default(3) @map("max_retries") + retryCount Int @default(0) @map("retry_count") + nextRetryAt DateTime? @map("next_retry_at") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@index([tenantId]) + @@index([tenantId, status]) + @@index([workflowRunId]) + @@index([nodeRunId]) + @@index([status]) + @@index([severity]) + @@index([failureCategory]) + @@index([nextRetryAt]) + @@map("dead_letter_entries") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Circuit Breaker State Entity +// Persistent circuit breaker state per external service +// ============================================================================ + +model CircuitBreakerState { + id String @id @default(cuid()) + serviceName String @unique @map("service_name") // e.g., "task-controller", "agent-pool-1" + + // Circuit state + state String @default("CLOSED") // CLOSED, OPEN, HALF_OPEN + failureCount Int @default(0) @map("failure_count") + successCount Int @default(0) @map("success_count") + + // Thresholds (configurable per service) + failureThreshold Int @default(5) @map("failure_threshold") + successThreshold Int @default(3) @map("success_threshold") // For half-open -> closed + resetTimeoutMs Int @default(30000) @map("reset_timeout_ms") // Time to try half-open + + // Timing + lastFailureAt DateTime? @map("last_failure_at") + lastSuccessAt DateTime? @map("last_success_at") + openedAt DateTime? @map("opened_at") + halfOpenAt DateTime? @map("half_open_at") + + // Statistics + totalRequests Int @default(0) @map("total_requests") + totalFailures Int @default(0) @map("total_failures") + totalSuccesses Int @default(0) @map("total_successes") + totalTimeouts Int @default(0) @map("total_timeouts") + + // Metadata + metadata Json @default("{}") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@index([state]) + @@map("circuit_breaker_states") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Recovery Log Entity +// Audit trail of all recovery actions +// ============================================================================ + +model RecoveryLog { + id String @id @default(cuid()) + tenantId String? @map("tenant_id") + + // Recovery action + actionType String @map("action_type") // TASK_REASSIGNED, WORKFLOW_RESUMED, CIRCUIT_OPENED, DLQ_RETRY, etc. + targetType String @map("target_type") // WORKFLOW, NODE, AGENT, CIRCUIT_BREAKER + targetId String @map("target_id") + + // Context + previousState String? @map("previous_state") + newState String? @map("new_state") + reason String + details Json @default("{}") + + // Actor (who/what triggered recovery) + actorType String @map("actor_type") // SYSTEM, USER, SCHEDULER + actorId String? @map("actor_id") + + // Result + success Boolean + errorMessage String? @map("error_message") + + // Timestamp + createdAt DateTime @default(now()) @map("created_at") + + @@index([tenantId]) + @@index([actionType]) + @@index([targetType, targetId]) + @@index([createdAt]) + @@map("recovery_logs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Stale Task Entity +// Tracks tasks that have been detected as stale for recovery +// ============================================================================ + +model StaleTask { + id String @id @default(cuid()) + nodeRunId String @unique @map("node_run_id") + workflowRunId String @map("workflow_run_id") + tenantId String @map("tenant_id") + + // Detection info + detectedAt DateTime @default(now()) @map("detected_at") + staleReason String @map("stale_reason") // TIMEOUT, AGENT_OFFLINE, HEARTBEAT_MISSING + + // Original assignment + originalAgentId String? @map("original_agent_id") + assignedAt DateTime? @map("assigned_at") + lastHeartbeatAt DateTime? @map("last_heartbeat_at") + + // Recovery status + status String @default("DETECTED") // DETECTED, RECOVERING, RECOVERED, FAILED + recoveryAttempts Int @default(0) @map("recovery_attempts") + newAgentId String? @map("new_agent_id") + recoveredAt DateTime? @map("recovered_at") + + // Error tracking + errorMessage String? @map("error_message") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@index([tenantId]) + @@index([workflowRunId]) + @@index([status]) + @@index([detectedAt]) + @@map("stale_tasks") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Manus-Style Goal-First Orchestration +// Phase 10: Transform to goal-first, agent-owned orchestration model +// ============================================================================ + +// ============================================================================ +// Goal Run Phase Enum +// Represents the current phase of the orchestrator loop +// ============================================================================ + +enum GoalRunPhase { + INITIALIZING + PLANNING + EXECUTING + CONTROLLING_DESKTOP + WAITING_USER_INPUT + WAITING_APPROVAL + WAITING_PROVIDER + WAITING_CAPACITY + VERIFYING + REPLANNING + COMPLETED + FAILED + PAUSED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Wait Reason Enum +// Canonical “why are we waiting?” category (engine-agnostic) +// ============================================================================ + +enum GoalRunWaitReason { + USER_INPUT + APPROVAL + PROVIDER + CAPACITY + POLICY + UNKNOWN + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Status Enum +// High-level status of a goal run +// ============================================================================ + +enum GoalRunStatus { + PENDING + RUNNING + COMPLETED + FAILED + CANCELLED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Execution Engine Enum +// Immutable per-run execution engine selection +// ============================================================================ + +enum GoalRunExecutionEngine { + LEGACY_DB_LOOP + TEMPORAL_WORKFLOW + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Checklist Item Status Enum +// Status of individual plan steps +// ============================================================================ + +enum ChecklistItemStatus { + PENDING + IN_PROGRESS + COMPLETED + SKIPPED + FAILED + BLOCKED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Step Type Enum +// Classifies whether a checklist item is executable or requires user input +// ============================================================================ + +enum StepType { + EXECUTE + USER_INPUT_REQUIRED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Execution Surface Enum +// Explicitly declares the execution surface to enforce text-only vs desktop +// ============================================================================ + +enum ExecutionSurface { + TEXT_ONLY + DESKTOP + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Status Enum +// Tracks durable prompt lifecycle for WAITING_USER_INPUT runs +// ============================================================================ + +enum UserPromptStatus { + OPEN + RESOLVED + CANCELLED + EXPIRED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Kind Enum +// Distinguishes clarification prompts from desktop takeover prompts +// ============================================================================ + +enum UserPromptKind { + TEXT_CLARIFICATION + DESKTOP_TAKEOVER + GOAL_INTAKE + APPROVAL + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Scope Enum +// Explicitly defines what the prompt is scoped to (no null-based inference) +// ============================================================================ + +enum UserPromptScope { + RUN + STEP + APPROVAL + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Cancel Reason Enum +// Captures why an OPEN prompt was cancelled +// ============================================================================ + +enum UserPromptCancelReason { + SUPERSEDED + USER_CANCELLED + TIMEOUT + POLICY_DENY + RUN_ENDED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Actor Type Enum +// Captures who resolved a prompt (required) +// ============================================================================ + +enum ActorType { + HUMAN + AGENT + SYSTEM + PARENT_AGENT + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// GoalSpec Status Enum +// Used for goal intake gating prior to planning +// ============================================================================ + +enum GoalSpecStatus { + INCOMPLETE + COMPLETE + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Desktop Lease Enums +// Orchestrator-owned lease for desktop retention while awaiting input +// ============================================================================ + +enum DesktopLeaseMode { + EPHEMERAL + WORKSPACE + + @@schema("workflow_orchestrator") +} + +enum DesktopLeaseStatus { + ACTIVE + RELEASED + EXPIRED + CANCELLED + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Steering Message Type Enum +// Types of user intervention commands +// ============================================================================ + +enum SteeringMessageType { + PAUSE + RESUME + CANCEL + MODIFY_PLAN + APPROVE + REJECT + INSTRUCTION + + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Entity +// Represents a goal-first execution instance +// User states a goal, system plans and executes autonomously +// ============================================================================ + +model GoalRun { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Goal definition + goal String // Natural language goal statement + constraints Json @default("{}") // { workspaceMode, allowedTools, riskPolicy, deadlineMinutes } + + // Orchestrator state + phase GoalRunPhase @default(INITIALIZING) + status GoalRunStatus @default(PENDING) + executionEngine GoalRunExecutionEngine @default(LEGACY_DB_LOOP) @map("execution_engine") + + // Canonical wait state (engine-agnostic). Phase is still the durable “what,” + // but these fields capture the durable “why” and timing hints. + waitReason GoalRunWaitReason? @map("wait_reason") + waitDetail Json? @map("wait_detail") + waitStartedAt DateTime? @map("wait_started_at") + waitUntil DateTime? @map("wait_until") + + // Link to execution (GoalRun creates a WorkflowRun for execution) + workflowRunId String? @unique @map("workflow_run_id") + workflowRun WorkflowRun? @relation(fields: [workflowRunId], references: [id]) + + // Temporal engine audit fields (nullable for LEGACY_DB_LOOP runs) + temporalWorkflowId String? @map("temporal_workflow_id") + temporalRunId String? @map("temporal_run_id") + temporalStartedAt DateTime? @map("temporal_started_at") + + // Current plan tracking + currentPlanVersion Int @default(0) @map("current_plan_version") + + // Error tracking + error String? + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Relations + planVersions PlanVersion[] + steeringMessages SteeringMessage[] + activityEvents ActivityEvent[] + userPrompts UserPrompt[] + goalSpec GoalSpec? + desktopLeases DesktopLease[] + + @@index([tenantId]) + @@index([tenantId, status]) + @@index([status]) + @@index([phase]) + @@index([waitReason]) + @@index([createdAt]) + @@map("goal_runs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Spec Entity +// Typed goal intake surface (gate before planning) +// ============================================================================ + +model GoalSpec { + id String @id @default(cuid()) + goalRunId String @unique @map("goal_run_id") + tenantId String @map("tenant_id") + + status GoalSpecStatus @default(INCOMPLETE) + + schemaId String @map("schema_id") + schemaVersion Int @default(1) @map("schema_version") + jsonSchema Json @map("json_schema") + uiSchema Json? @map("ui_schema") + values Json @default("{}") + + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + completedAt DateTime? @map("completed_at") + + // Relations + goalRun GoalRun @relation(fields: [goalRunId], references: [id], onDelete: Cascade) + userPrompts UserPrompt[] + + @@index([tenantId]) + @@index([status]) + @@map("goal_specs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Desktop Lease Entity +// First-class, orchestrator-owned retention lease for desktops/workspaces +// ============================================================================ + +model DesktopLease { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + goalRunId String @map("goal_run_id") + + // Optional external identifiers + workspaceId String? @map("workspace_id") + taskId String? @map("task_id") + + mode DesktopLeaseMode @default(WORKSPACE) + status DesktopLeaseStatus @default(ACTIVE) + + // Retention controls + keepaliveUntil DateTime? @map("keepalive_until") + expiresAt DateTime? @map("expires_at") + releasedAt DateTime? @map("released_at") + + // Why we’re holding it (e.g., DESKTOP_TAKEOVER) + reason String? @map("reason") + + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + goalRun GoalRun @relation(fields: [goalRunId], references: [id], onDelete: Cascade) + userPrompts UserPrompt[] + + @@index([tenantId]) + @@index([goalRunId]) + @@index([status]) + @@index([keepaliveUntil]) + @@map("desktop_leases") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Plan Version Entity +// Represents a version of the plan (checklist) for a goal run +// New version created on each replan +// ============================================================================ + +model PlanVersion { + id String @id @default(cuid()) + goalRunId String @map("goal_run_id") + + // Version tracking + version Int // 1, 2, 3, etc. + + // Plan metadata + summary String? // Brief description of this plan version + previousVersionId String? @map("previous_version_id") + replanReason String? @map("replan_reason") // Why was a replan needed? + + // LLM generation metadata + llmModel String? @map("llm_model") // Model used for planning + llmTokens Int? @map("llm_tokens") // Tokens used + confidence Float? @map("confidence") // Planning confidence score (0-1) + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + // Relations + goalRun GoalRun @relation(fields: [goalRunId], references: [id], onDelete: Cascade) + checklistItems ChecklistItem[] + + @@unique([goalRunId, version]) + @@index([goalRunId]) + @@index([goalRunId, version]) + @@map("plan_versions") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Checklist Item Entity +// Individual step in a plan version +// Maps to the "todo.md" / checklist concept from Manus +// ============================================================================ + +model ChecklistItem { + id String @id @default(cuid()) + planVersionId String @map("plan_version_id") + + // Item definition + order Int // Execution order (1, 2, 3, etc.) + description String // What needs to be done + type StepType @default(EXECUTE) @map("step_type") + + // Status + status ChecklistItemStatus @default(PENDING) + + // Durable blocked metadata (P0: restart-safe NEEDS_HELP / INPUT_REQUIRED handling) + blockedByPromptId String? @map("blocked_by_prompt_id") + blockedReason String? @map("blocked_reason") + blockedAt DateTime? @map("blocked_at") + + // Durable retry gating (restart-safe, multi-replica safe). + // Prevents tight-loop retries when a step is reset to PENDING with a backoff delay. + infraRetryCount Int @default(0) @map("infra_retry_count") + infraRetryAfter DateTime? @map("infra_retry_after") + heartbeatRetryCount Int @default(0) @map("heartbeat_retry_count") + heartbeatRetryAfter DateTime? @map("heartbeat_retry_after") + + // Link to execution (item may create a WorkflowNode for execution) + workflowNodeId String? @map("workflow_node_id") + + // Verification criteria + expectedOutcome String? @map("expected_outcome") // What should happen if successful + actualOutcome String? @map("actual_outcome") // What actually happened + + // Dependencies (item IDs that must complete first) + dependencies String[] @default([]) + + // Tool configuration + suggestedTools String[] @default([]) @map("suggested_tools") + requiresDesktop Boolean @default(false) @map("requires_desktop") + executionSurface ExecutionSurface @default(TEXT_ONLY) @map("execution_surface") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Relations + planVersion PlanVersion @relation(fields: [planVersionId], references: [id], onDelete: Cascade) + verificationResults VerificationResult[] + userPrompts UserPrompt[] + + @@index([planVersionId]) + @@index([planVersionId, order]) + @@index([status]) + @@index([infraRetryAfter]) + @@index([heartbeatRetryAfter]) + @@map("checklist_items") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Entity +// Durable user-interaction surface for USER_INPUT_REQUIRED steps +// ============================================================================ + +model UserPrompt { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + goalRunId String @map("goal_run_id") + checklistItemId String? @map("checklist_item_id") + goalSpecId String? @map("goal_spec_id") + approvalRequestId String? @map("approval_request_id") + desktopLeaseId String? @map("desktop_lease_id") + + // Prompt classification + kind UserPromptKind + scope UserPromptScope @default(RUN) + status UserPromptStatus @default(OPEN) + + // Idempotency + // Recommended format: prompt:${runId}:${stepId}:${kind} + dedupeKey String @unique @map("dedupe_key") + + // Content + schemaId String? @map("schema_id") + schemaVersion Int? @map("schema_version") + jsonSchema Json? @map("json_schema") + uiSchema Json? @map("ui_schema") + validatorVersion String? @map("validator_version") + + payload Json @default("{}") + // Deprecated (kept for compatibility): prefer UserPromptResolution.answers + answers Json? + + // Revisioning / supersede semantics (no history overwrites) + rootPromptId String? @map("root_prompt_id") + supersedesPromptId String? @map("supersedes_prompt_id") + supersededByPromptId String? @map("superseded_by_prompt_id") + revision Int @default(1) + + // Cancellation / expiry + cancelReason UserPromptCancelReason? @map("cancel_reason") + cancelledAt DateTime? @map("cancelled_at") + expiresAt DateTime? @map("expires_at") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + resolvedAt DateTime? @map("resolved_at") + + // Relations + goalRun GoalRun @relation(fields: [goalRunId], references: [id], onDelete: Cascade) + checklistItem ChecklistItem? @relation(fields: [checklistItemId], references: [id], onDelete: Cascade) + goalSpec GoalSpec? @relation(fields: [goalSpecId], references: [id], onDelete: Cascade) + approvalRequest ApprovalRequest? @relation(fields: [approvalRequestId], references: [id], onDelete: Cascade) + desktopLease DesktopLease? @relation(fields: [desktopLeaseId], references: [id], onDelete: Cascade) + resolutions UserPromptResolution[] + attempts UserPromptAttempt[] + + @@index([tenantId]) + @@index([goalRunId]) + @@index([checklistItemId]) + @@index([goalSpecId]) + @@index([approvalRequestId]) + @@index([desktopLeaseId]) + @@index([scope]) + @@index([status]) + @@map("user_prompts") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Resolution Entity +// Immutable resolution record with actor identity + auth context +// ============================================================================ + +model UserPromptResolution { + id String @id @default(cuid()) + promptId String @unique @map("prompt_id") + + tenantId String @map("tenant_id") + goalRunId String @map("goal_run_id") + + actorType ActorType @map("actor_type") + actorId String? @map("actor_id") + actorEmail String? @map("actor_email") + actorName String? @map("actor_name") + actorIpAddress String? @map("actor_ip_address") + actorUserAgent String? @map("actor_user_agent") + + requestId String? @map("request_id") + authContext Json? @map("auth_context") + clientRequestId String? @map("client_request_id") + idempotencyKey String? @map("idempotency_key") + + // Audit-grade authorization decision snapshot (ALLOW by default; denies do not create a resolution row) + authzDecision String @default("ALLOW") @map("authz_decision") + authzPolicy String? @map("authz_policy") + authzRuleId String? @map("authz_rule_id") + authzReason String? @map("authz_reason") + + answers Json @default("{}") + + // Resume acknowledgement (set by outbox resumer after successful Temporal Update) + resumeAcknowledgedAt DateTime? @map("resume_acknowledged_at") + resumeAck Json? @map("resume_ack") + + createdAt DateTime @default(now()) @map("created_at") + + // Relations + prompt UserPrompt @relation(fields: [promptId], references: [id], onDelete: Cascade) + + @@index([tenantId]) + @@index([goalRunId]) + @@index([actorId]) + @@index([clientRequestId]) + @@index([idempotencyKey]) + @@index([resumeAcknowledgedAt]) + @@index([createdAt]) + @@map("user_prompt_resolutions") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// User Prompt Attempt Entity +// Append-only submission log (records invalid attempts without mutating prompt history) +// ============================================================================ + +model UserPromptAttempt { + id String @id @default(cuid()) + promptId String @map("prompt_id") + + tenantId String @map("tenant_id") + goalRunId String @map("goal_run_id") + + actorType ActorType @map("actor_type") + actorId String? @map("actor_id") + actorEmail String? @map("actor_email") + actorName String? @map("actor_name") + actorIpAddress String? @map("actor_ip_address") + actorUserAgent String? @map("actor_user_agent") + + requestId String? @map("request_id") + authContext Json? @map("auth_context") + clientRequestId String? @map("client_request_id") + idempotencyKey String? @map("idempotency_key") + + // Authorization snapshot (ALLOW/DENY) for the attempt. + authzDecision String @default("ALLOW") @map("authz_decision") + authzPolicy String? @map("authz_policy") + authzRuleId String? @map("authz_rule_id") + authzReason String? @map("authz_reason") + + answers Json @default("{}") + + // Validation outcome (schema-driven where possible) + isValid Boolean @default(true) @map("is_valid") + validationResult Json? @map("validation_result") + errorCode String? @map("error_code") + errorMessage String? @map("error_message") + + createdAt DateTime @default(now()) @map("created_at") + + // Relations + prompt UserPrompt @relation(fields: [promptId], references: [id], onDelete: Cascade) + + @@index([tenantId]) + @@index([goalRunId]) + @@index([promptId]) + @@index([actorId]) + @@index([clientRequestId]) + @@index([idempotencyKey]) + @@index([createdAt]) + @@map("user_prompt_attempts") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Steering Message Entity +// User intervention commands during goal execution +// Enables human-in-the-loop control +// ============================================================================ + +model SteeringMessage { + id String @id @default(cuid()) + goalRunId String @map("goal_run_id") + + // Message content + type SteeringMessageType + content String? // Additional instructions or reason + + // Target (for MODIFY_PLAN, APPROVE, REJECT) + targetItemId String? @map("target_item_id") + + // Processing state + acknowledged Boolean @default(false) + acknowledgedAt DateTime? @map("acknowledged_at") + processedAt DateTime? @map("processed_at") + + // Actor info + userId String? @map("user_id") + userEmail String? @map("user_email") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + // Relations + goalRun GoalRun @relation(fields: [goalRunId], references: [id], onDelete: Cascade) + + @@index([goalRunId]) + @@index([goalRunId, acknowledged]) + @@index([type]) + @@index([createdAt]) + @@map("steering_messages") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Verification Result Entity +// Records outcome verification for checklist items +// Enables intelligent replanning based on actual results +// ============================================================================ + +model VerificationResult { + id String @id @default(cuid()) + checklistItemId String @map("checklist_item_id") + + // Verification outcome + passed Boolean + confidence Float? // Verification confidence (0-1) + + // Evidence + evidence Json? // Screenshots, tool outputs, etc. + failureReason String? @map("failure_reason") + + // LLM verification metadata + llmModel String? @map("llm_model") + llmTokens Int? @map("llm_tokens") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + // Relations + checklistItem ChecklistItem @relation(fields: [checklistItemId], references: [id], onDelete: Cascade) + + @@index([checklistItemId]) + @@index([passed]) + @@index([createdAt]) + @@map("verification_results") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Activity Event Entity +// Event stream for the activity feed +// Provides narrated autonomy - visibility into agent operations +// ============================================================================ + +model ActivityEvent { + id String @id @default(cuid()) + goalRunId String @map("goal_run_id") + + // Event classification + eventType String @map("event_type") // GOAL_CREATED, PLANNING_STARTED, STEP_COMPLETED, etc. + severity String @default("info") // info, warning, error + + // Event content + title String + description String? + details Json? // Additional structured data + + // Related entities + planVersionId String? @map("plan_version_id") + checklistItemId String? @map("checklist_item_id") + workflowNodeId String? @map("workflow_node_id") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + // Relations + goalRun GoalRun @relation(fields: [goalRunId], references: [id], onDelete: Cascade) + + @@index([goalRunId]) + @@index([goalRunId, createdAt]) + @@index([eventType]) + @@index([createdAt]) + @@map("activity_events") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 7: Enhanced Features +// Goal Templates, Batch Execution, Analytics Insights +// ============================================================================ + +// ============================================================================ +// Goal Template Entity +// Reusable templates for common goal patterns +// ============================================================================ + +model GoalTemplate { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Template identification + name String + description String? + category String? // e.g., "data-processing", "web-automation", "reporting" + tags String[] @default([]) + icon String? // Icon identifier for UI display + + // Template content + goalPattern String @map("goal_pattern") // Template with {{variables}} + defaultConstraints Json @default("{}") @map("default_constraints") + + // Variable definitions + variables Json @default("[]") // Array of { name, type, required, default, description } + + // Checklist template (pre-defined plan steps) + checklistTemplate Json @default("[]") @map("checklist_template") // Array of step templates + + // Version control + version String @default("1.0.0") + isLatest Boolean @default(true) @map("is_latest") + previousVersionId String? @map("previous_version_id") + + // Publishing status + isPublished Boolean @default(false) @map("is_published") + isBuiltIn Boolean @default(false) @map("is_built_in") // System-provided templates + + // Usage tracking + usageCount Int @default(0) @map("usage_count") + lastUsedAt DateTime? @map("last_used_at") + + // Audit + createdBy String? @map("created_by") + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + goalRuns GoalRunFromTemplate[] + + @@unique([tenantId, name, version]) + @@index([tenantId]) + @@index([tenantId, isPublished]) + @@index([category]) + @@index([isBuiltIn]) + @@index([usageCount]) + @@map("goal_templates") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run From Template Junction +// Tracks which goal runs were created from templates +// ============================================================================ + +model GoalRunFromTemplate { + id String @id @default(cuid()) + goalRunId String @unique @map("goal_run_id") + templateId String @map("template_id") + + // Variables used when instantiating + variableValues Json @default("{}") @map("variable_values") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + // Relations + template GoalTemplate @relation(fields: [templateId], references: [id]) + + @@index([templateId]) + @@index([goalRunId]) + @@map("goal_runs_from_template") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Batch Entity +// Groups multiple goal runs for batch execution +// ============================================================================ + +model GoalRunBatch { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Batch identification + name String + description String? + + // Batch configuration + executionMode String @default("PARALLEL") @map("execution_mode") // PARALLEL, SEQUENTIAL + maxConcurrency Int @default(5) @map("max_concurrency") // Max parallel executions + stopOnFailure Boolean @default(false) @map("stop_on_failure") // Stop batch on first failure + + // Batch status + status String @default("PENDING") // PENDING, RUNNING, COMPLETED, PARTIALLY_COMPLETED, FAILED, CANCELLED + + // Progress tracking + totalGoals Int @default(0) @map("total_goals") + completedGoals Int @default(0) @map("completed_goals") + failedGoals Int @default(0) @map("failed_goals") + cancelledGoals Int @default(0) @map("cancelled_goals") + + // Error tracking + error String? + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Relations + items GoalRunBatchItem[] + + @@index([tenantId]) + @@index([tenantId, status]) + @@index([status]) + @@index([createdAt]) + @@map("goal_run_batches") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Batch Item Entity +// Individual goal run within a batch +// ============================================================================ + +model GoalRunBatchItem { + id String @id @default(cuid()) + batchId String @map("batch_id") + + // Goal definition + goal String + constraints Json @default("{}") + + // Template reference (optional) + templateId String? @map("template_id") + variableValues Json @default("{}") @map("variable_values") + + // Execution order (for SEQUENTIAL mode) + order Int @default(0) + + // Status tracking + status String @default("PENDING") // PENDING, QUEUED, RUNNING, COMPLETED, FAILED, CANCELLED, SKIPPED + goalRunId String? @map("goal_run_id") // Linked after goal run is created + + // Error tracking + error String? + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + startedAt DateTime? @map("started_at") + completedAt DateTime? @map("completed_at") + + // Relations + batch GoalRunBatch @relation(fields: [batchId], references: [id], onDelete: Cascade) + + @@index([batchId]) + @@index([batchId, order]) + @@index([batchId, status]) + @@index([goalRunId]) + @@map("goal_run_batch_items") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Goal Run Analytics Snapshot +// Pre-aggregated goal run metrics for analytics dashboard +// ============================================================================ + +model GoalRunAnalyticsSnapshot { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Time bucket + period String @map("period") // 1h, 1d, 7d, 30d + bucketStart DateTime @map("bucket_start") + bucketEnd DateTime @map("bucket_end") + + // Goal Run Metrics + totalGoalRuns Int @default(0) @map("total_goal_runs") + completedGoalRuns Int @default(0) @map("completed_goal_runs") + failedGoalRuns Int @default(0) @map("failed_goal_runs") + cancelledGoalRuns Int @default(0) @map("cancelled_goal_runs") + + // Duration metrics (in milliseconds) + avgDurationMs Float @default(0) @map("avg_duration_ms") + minDurationMs Float @default(0) @map("min_duration_ms") + maxDurationMs Float @default(0) @map("max_duration_ms") + p50DurationMs Float @default(0) @map("p50_duration_ms") + p95DurationMs Float @default(0) @map("p95_duration_ms") + p99DurationMs Float @default(0) @map("p99_duration_ms") + + // Step metrics + avgStepsPerGoal Float @default(0) @map("avg_steps_per_goal") + avgReplanCount Float @default(0) @map("avg_replan_count") + totalStepsExecuted Int @default(0) @map("total_steps_executed") + + // Template usage + templateUsageCount Int @default(0) @map("template_usage_count") + topTemplateId String? @map("top_template_id") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + @@unique([tenantId, period, bucketStart]) + @@index([tenantId]) + @@index([tenantId, period]) + @@index([bucketStart]) + @@map("goal_run_analytics_snapshots") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 8: External Integrations - Notification Channels +// Unified notification system for Slack, Teams, and other channels +// ============================================================================ + +model NotificationChannel { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Channel type: SLACK, TEAMS, EMAIL, CUSTOM_WEBHOOK + type String + + // Human-readable name + name String + description String? + + // Configuration (JSON) - varies by channel type + // Slack: { webhookUrl, channel?, username?, iconEmoji? } + // Teams: { webhookUrl } + // Email: { smtpConfig, fromAddress } + config Json @default("{}") + + // Event subscriptions (array of event types) + // goal.started, goal.completed, goal.failed, batch.started, batch.completed, + // approval.requested, approval.approved, approval.rejected + events String[] @default([]) + + // Filters for event targeting (JSON) + // { categories: [], templateIds: [], minRiskLevel: 'HIGH' } + filters Json @default("{}") + + // Status + enabled Boolean @default(true) + verified Boolean @default(false) @map("verified") // Verified via test message + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + deliveries NotificationDelivery[] + + @@index([tenantId]) + @@index([tenantId, enabled]) + @@index([tenantId, type]) + @@map("notification_channels") + @@schema("workflow_orchestrator") +} + +model NotificationDelivery { + id String @id @default(cuid()) + channelId String @map("channel_id") + eventId String @map("event_id") // Unique event ID for idempotency + eventType String @map("event_type") + + // Delivery result + success Boolean + statusCode Int? @map("status_code") + error String? + attempts Int @default(1) + + // Payload sent (for debugging) + payload Json? @default("{}") + + // Timestamps + deliveredAt DateTime? @map("delivered_at") + createdAt DateTime @default(now()) @map("created_at") + + // Relations + channel NotificationChannel @relation(fields: [channelId], references: [id], onDelete: Cascade) + + @@index([channelId]) + @@index([eventId]) + @@index([eventType]) + @@index([createdAt]) + @@map("notification_deliveries") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 8: External Integrations - Git Integration +// GitHub/GitLab repository connections for triggering workflows +// ============================================================================ + +model GitIntegration { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Provider: GITHUB, GITLAB + provider String + + // Human-readable name + name String + description String? + + // Repository info + owner String // GitHub org/user or GitLab namespace + repository String // Repository name + branch String? @default("main") // Default branch for triggers + + // Authentication (encrypted) + // GitHub: { appId, installationId, privateKey } or { personalAccessToken } + // GitLab: { accessToken, projectId } + credentials Json @default("{}") + + // Webhook configuration + webhookId String? @map("webhook_id") // External webhook ID from Git provider + webhookSecret String? @map("webhook_secret") // For verifying incoming webhooks + + // Event subscriptions from Git provider + // push, pull_request, issues, release, etc. + subscribedEvents String[] @default([]) @map("subscribed_events") + + // Trigger configuration + // Which ByteBot events should update Git (status checks, comments) + triggerConfig Json @default("{}") @map("trigger_config") + + // Status + enabled Boolean @default(true) + lastSyncAt DateTime? @map("last_sync_at") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + events GitIntegrationEvent[] + + @@unique([tenantId, provider, owner, repository]) + @@index([tenantId]) + @@index([tenantId, provider]) + @@index([webhookId]) + @@map("git_integrations") + @@schema("workflow_orchestrator") +} + +model GitIntegrationEvent { + id String @id @default(cuid()) + integrationId String @map("integration_id") + + // Event from Git provider + eventType String @map("event_type") // push, pull_request, etc. + eventAction String? @map("event_action") // opened, closed, merged, etc. + + // Reference info + ref String? // refs/heads/main, refs/tags/v1.0.0 + commitSha String? @map("commit_sha") + prNumber Int? @map("pr_number") + issueNumber Int? @map("issue_number") + + // Payload (full event data from Git provider) + payload Json @default("{}") + + // Processing status + processed Boolean @default(false) + goalRunId String? @map("goal_run_id") // If triggered a goal run + + // Error info + error String? + + // Timestamps + receivedAt DateTime @default(now()) @map("received_at") + processedAt DateTime? @map("processed_at") + + // Relations + integration GitIntegration @relation(fields: [integrationId], references: [id], onDelete: Cascade) + + @@index([integrationId]) + @@index([eventType]) + @@index([commitSha]) + @@index([processed]) + @@index([receivedAt]) + @@map("git_integration_events") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 9: Advanced AI Features +// Goal Refinement, Template Generation, Failure Analysis +// ============================================================================ + +// ============================================================================ +// Goal Refinement Suggestion Entity +// Stores AI-generated goal refinement suggestions +// ============================================================================ + +model GoalRefinementSuggestion { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Original goal + originalGoal String @map("original_goal") + + // Analysis results + analysisScore Float @map("analysis_score") // Overall quality score (0-1) + suggestionsCount Int @map("suggestions_count") // Number of suggestions generated + topSuggestion String @map("top_suggestion") // Best refined goal suggestion + complexity String // simple, moderate, complex, very_complex + + // Issues identified + issues String[] @default([]) + + // Processing info + tokensUsed Int? @map("tokens_used") + llmModel String? @map("llm_model") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + @@index([tenantId]) + @@index([tenantId, createdAt]) + @@index([analysisScore]) + @@index([createdAt]) + @@map("goal_refinement_suggestions") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Failure Analysis Result Entity +// Stores AI-generated failure analysis for goal runs +// ============================================================================ + +model FailureAnalysisResult { + id String @id @default(cuid()) + goalRunId String @map("goal_run_id") + + // Primary classification + primaryCategory String @map("primary_category") // configuration, network, etc. + severity String // critical, high, medium, low + + // Analysis results (JSON arrays) + rootCauses Json @map("root_causes") // Array of root cause objects + remediations Json @map("remediations") // Array of remediation objects + + // Confidence score + analysisConfidence Float @map("analysis_confidence") + + // Processing info + tokensUsed Int? @map("tokens_used") + llmModel String? @map("llm_model") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + + @@unique([goalRunId]) + @@index([goalRunId]) + @@index([primaryCategory]) + @@index([severity]) + @@index([createdAt]) + @@map("failure_analysis_results") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Phase 10: Enterprise Features +// Multi-Tenant Administration, Compliance, SSO, Custom LLM Providers +// ============================================================================ + +// ============================================================================ +// Tenant Entity +// Core tenant/organization management +// ============================================================================ + +model Tenant { + id String @id @default(cuid()) + name String + slug String @unique // URL-friendly identifier + + // Contact information + adminEmail String @map("admin_email") + adminName String? @map("admin_name") + companyName String? @map("company_name") + + // Subscription/billing + plan String @default("free") // free, starter, professional, enterprise + billingEmail String? @map("billing_email") + stripeCustomerId String? @map("stripe_customer_id") + + // Status + status String @default("active") // active, suspended, pending, cancelled + trialEnds DateTime? @map("trial_ends") + + // Metadata + metadata Json @default("{}") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + settings TenantSettings? + quotas TenantQuota? + ssoConfig SSOConfiguration? + llmProviders LLMProviderConfig[] + complianceReports ComplianceReport[] + dataProcessingRecords DataProcessingRecord[] + + @@index([slug]) + @@index([status]) + @@index([plan]) + @@index([createdAt]) + @@map("tenants") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Tenant Settings Entity +// Configurable settings per tenant +// ============================================================================ + +model TenantSettings { + id String @id @default(cuid()) + tenantId String @unique @map("tenant_id") + + // General settings + timezone String @default("UTC") + dateFormat String @default("YYYY-MM-DD") @map("date_format") + defaultWorkspaceMode String @default("SHARED") @map("default_workspace_mode") + + // Security settings + requireMfa Boolean @default(false) @map("require_mfa") + sessionTimeout Int @default(3600) @map("session_timeout") // seconds + ipAllowlist String[] @default([]) @map("ip_allowlist") + allowedDomains String[] @default([]) @map("allowed_domains") + + // Workflow settings + maxConcurrentGoals Int @default(5) @map("max_concurrent_goals") + defaultApprovalTimeout Int @default(3600) @map("default_approval_timeout") // seconds + autoReplanEnabled Boolean @default(true) @map("auto_replan_enabled") + maxReplanAttempts Int @default(3) @map("max_replan_attempts") + + // Notification settings + notificationEmail String? @map("notification_email") + slackWebhookUrl String? @map("slack_webhook_url") + teamsWebhookUrl String? @map("teams_webhook_url") + webhookSecretKey String? @map("webhook_secret_key") + + // Data retention + auditLogRetentionDays Int @default(365) @map("audit_log_retention_days") + goalRunRetentionDays Int @default(90) @map("goal_run_retention_days") + + // Feature flags + features Json @default("{}") // Feature flag overrides + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade) + + @@map("tenant_settings") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Tenant Quota Entity +// Usage quotas and limits per tenant +// ============================================================================ + +model TenantQuota { + id String @id @default(cuid()) + tenantId String @unique @map("tenant_id") + + // Goal run limits + monthlyGoalRuns Int @default(1000) @map("monthly_goal_runs") + monthlyGoalRunsUsed Int @default(0) @map("monthly_goal_runs_used") + + // LLM token limits + monthlyTokens Int @default(1000000) @map("monthly_tokens") + monthlyTokensUsed Int @default(0) @map("monthly_tokens_used") + + // Storage limits (bytes) + storageLimit BigInt @default(10737418240) @map("storage_limit") // 10GB + storageUsed BigInt @default(0) @map("storage_used") + + // Concurrent limits + maxConcurrentWorkspaces Int @default(10) @map("max_concurrent_workspaces") + maxUsersPerTenant Int @default(50) @map("max_users_per_tenant") + maxTemplates Int @default(100) @map("max_templates") + maxBatchSize Int @default(50) @map("max_batch_size") + + // API limits + apiRateLimitPerMinute Int @default(100) @map("api_rate_limit_per_minute") + + // Reset tracking + quotaPeriodStart DateTime @default(now()) @map("quota_period_start") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade) + + @@map("tenant_quotas") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// SSO Configuration Entity +// SAML/SSO settings per tenant +// ============================================================================ + +model SSOConfiguration { + id String @id @default(cuid()) + tenantId String @unique @map("tenant_id") + + // SSO type + provider String @default("saml") // saml, oidc + + // SAML configuration + entityId String? @map("entity_id") + ssoUrl String? @map("sso_url") + sloUrl String? @map("slo_url") + certificate String? // IdP public certificate (PEM) + signatureAlgorithm String @default("sha256") @map("signature_algorithm") + + // Attribute mapping + attributeMapping Json @default("{}") @map("attribute_mapping") + // Example: { "email": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" } + + // Just-in-time provisioning + jitProvisioning Boolean @default(true) @map("jit_provisioning") + defaultRole String @default("member") @map("default_role") + autoUpdateAttributes Boolean @default(true) @map("auto_update_attributes") + + // Domain validation + enforcedDomains String[] @default([]) @map("enforced_domains") + allowBypassSSO Boolean @default(false) @map("allow_bypass_sso") + + // Status + enabled Boolean @default(false) + verified Boolean @default(false) + + // Metadata + idpMetadataUrl String? @map("idp_metadata_url") + spMetadataUrl String? @map("sp_metadata_url") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade) + + @@map("sso_configurations") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// LLM Provider Configuration Entity +// Custom LLM provider settings per tenant +// ============================================================================ + +model LLMProviderConfig { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Provider info + provider String // anthropic, openai, azure_openai, google_vertex, aws_bedrock + name String // Display name + isDefault Boolean @default(false) @map("is_default") + + // Configuration + apiKey String? @map("api_key") // Encrypted + apiEndpoint String? @map("api_endpoint") + model String? // Model ID to use + region String? // For cloud providers + + // Provider-specific settings + config Json @default("{}") // Additional provider-specific config + + // Usage tracking + totalTokensUsed BigInt @default(0) @map("total_tokens_used") + totalRequestsCount Int @default(0) @map("total_requests_count") + lastUsedAt DateTime? @map("last_used_at") + + // Fallback configuration + priority Int @default(0) // Lower = higher priority for fallback + isEnabled Boolean @default(true) @map("is_enabled") + isFallback Boolean @default(false) @map("is_fallback") + + // Rate limiting + maxRequestsPerMinute Int? @map("max_requests_per_minute") + maxTokensPerRequest Int? @map("max_tokens_per_request") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade) + + @@unique([tenantId, provider, name]) + @@index([tenantId]) + @@index([tenantId, isDefault]) + @@index([provider]) + @@map("llm_provider_configs") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Compliance Report Entity +// Generated compliance reports (SOC2, GDPR) +// ============================================================================ + +model ComplianceReport { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Report info + reportType String @map("report_type") // soc2, gdpr_article30, dsar, data_retention + reportName String @map("report_name") + reportPeriod String @map("report_period") // e.g., "2025-Q1", "2025-01" + + // Date range + startDate DateTime @map("start_date") + endDate DateTime @map("end_date") + + // Report content + summary String? // Executive summary + findings Json @default("[]") // Array of findings/items + metrics Json @default("{}") // Key metrics + + // Status + status String @default("generating") // generating, completed, failed + generatedAt DateTime? @map("generated_at") + expiresAt DateTime? @map("expires_at") + + // Export info + exportFormat String? @map("export_format") // pdf, csv, json + exportUrl String? @map("export_url") + + // Audit + generatedBy String? @map("generated_by") // User ID or "system" + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade) + + @@index([tenantId]) + @@index([tenantId, reportType]) + @@index([reportType]) + @@index([createdAt]) + @@map("compliance_reports") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// Data Processing Record Entity +// GDPR Article 30 - Records of processing activities +// ============================================================================ + +model DataProcessingRecord { + id String @id @default(cuid()) + tenantId String @map("tenant_id") + + // Processing activity info + activityName String @map("activity_name") + activityDescription String? @map("activity_description") + + // Data subjects + dataSubjectCategories String[] @default([]) @map("data_subject_categories") + // e.g., ["employees", "customers", "prospects"] + + // Personal data categories + personalDataCategories String[] @default([]) @map("personal_data_categories") + // e.g., ["name", "email", "ip_address", "usage_data"] + + // Legal basis + legalBasis String @map("legal_basis") // consent, contract, legal_obligation, vital_interests, public_task, legitimate_interests + legalBasisDetails String? @map("legal_basis_details") + + // Purpose + processingPurposes String[] @default([]) @map("processing_purposes") + + // Recipients + recipientCategories String[] @default([]) @map("recipient_categories") + thirdCountryTransfers String[] @default([]) @map("third_country_transfers") + transferSafeguards String? @map("transfer_safeguards") + + // Retention + retentionPeriod String? @map("retention_period") // e.g., "2 years", "until consent withdrawn" + retentionCriteria String? @map("retention_criteria") + + // Security measures + technicalMeasures String[] @default([]) @map("technical_measures") + organizationalMeasures String[] @default([]) @map("organizational_measures") + + // Status + status String @default("active") // active, archived, deleted + reviewDate DateTime? @map("review_date") + reviewedBy String? @map("reviewed_by") + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + // Relations + tenant Tenant @relation(fields: [tenantId], references: [id], onDelete: Cascade) + + @@index([tenantId]) + @@index([activityName]) + @@index([legalBasis]) + @@index([status]) + @@map("data_processing_records") + @@schema("workflow_orchestrator") +} + +// ============================================================================ +// System Configuration +// v1.0.0: Phase E - Maintenance mode handling +// Key-value store for system-level configuration +// ============================================================================ + +model SystemConfig { + id String @id @default(cuid()) + key String @unique + value String @db.Text + + // Timestamps + createdAt DateTime @default(now()) @map("created_at") + updatedAt DateTime @updatedAt @map("updated_at") + + @@map("system_config") + @@schema("workflow_orchestrator") +} diff --git a/packages/bytebot-workflow-orchestrator/scripts/unstick-desktop-not-allowed-prompts.ts b/packages/bytebot-workflow-orchestrator/scripts/unstick-desktop-not-allowed-prompts.ts new file mode 100644 index 000000000..b36d44c4c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/scripts/unstick-desktop-not-allowed-prompts.ts @@ -0,0 +1,321 @@ +import { + PrismaClient, + Prisma, + UserPromptKind, + UserPromptStatus, + GoalRunPhase, + ChecklistItemStatus, + ExecutionSurface, +} from '@prisma/client'; +import { createId } from '@paralleldrive/cuid2'; + +type Args = { + apply: boolean; + dryRun: boolean; + limit: number; + tenantId?: string; + goalRunId?: string; +}; + +function parseArgs(argv: string[]): Args { + const args: Args = { + apply: false, + dryRun: true, + limit: 200, + }; + + for (let i = 0; i < argv.length; i++) { + const token = argv[i]; + if (token === '--apply') { + args.apply = true; + args.dryRun = false; + continue; + } + if (token === '--dry-run') { + args.apply = false; + args.dryRun = true; + continue; + } + if (token === '--limit') { + const raw = argv[i + 1]; + i++; + const parsed = Number(raw); + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`Invalid --limit: ${raw}`); + } + args.limit = Math.floor(parsed); + continue; + } + if (token === '--tenant-id') { + args.tenantId = argv[i + 1]; + i++; + continue; + } + if (token === '--goal-run-id') { + args.goalRunId = argv[i + 1]; + i++; + continue; + } + if (token === '--help' || token === '-h') { + printHelpAndExit(0); + } + } + + return args; +} + +function printHelpAndExit(code: number): never { + // eslint-disable-next-line no-console + console.log(` +Unstick feasibility-misroute prompts (DESKTOP_NOT_ALLOWED) by auto-resolving them, upgrading the step to DESKTOP, and resuming runs. + +This is a one-time maintenance tool to clean up prompts created before the feasibility gate prevented TEXT_ONLY desktop misroutes. + +Usage: + ts-node scripts/unstick-desktop-not-allowed-prompts.ts [--dry-run] [--apply] [--limit N] [--tenant-id T] [--goal-run-id GR] + +Defaults: + --dry-run (prints planned changes, makes no DB writes) + --limit 200 + +Notes: + - Targets OPEN TEXT_CLARIFICATION prompts where payload.result.errorCode == "DESKTOP_NOT_ALLOWED" + - Resolves the prompt as actorType=SYSTEM (policy prohibits feasibility prompts; this is an internal auto-repair) + - Unblocks checklist item if blocked by that prompt and upgrades it: + requiresDesktop=true, executionSurface=DESKTOP + - Resumes goal run from WAITING_USER_INPUT to EXECUTING only if no other OPEN prompts remain + - Enqueues outbox events per prompt (deduped): + user_prompt.resolved: + user_prompt.resume: +`.trim()); + process.exit(code); +} + +type PromptRow = { + id: string; + tenantId: string; + goalRunId: string; + checklistItemId: string | null; + kind: string; + dedupeKey: string; + payload: any; +}; + +async function main(): Promise { + const args = parseArgs(process.argv.slice(2)); + const prisma = new PrismaClient(); + + try { + const prompts = (await prisma.userPrompt.findMany({ + where: { + status: UserPromptStatus.OPEN, + kind: UserPromptKind.TEXT_CLARIFICATION, + ...(args.tenantId ? { tenantId: args.tenantId } : {}), + ...(args.goalRunId ? { goalRunId: args.goalRunId } : {}), + payload: { + path: ['result', 'errorCode'], + equals: 'DESKTOP_NOT_ALLOWED', + }, + }, + orderBy: { createdAt: 'asc' }, + take: args.limit, + select: { + id: true, + tenantId: true, + goalRunId: true, + checklistItemId: true, + kind: true, + dedupeKey: true, + payload: true, + }, + })) as unknown as PromptRow[]; + + // eslint-disable-next-line no-console + console.log( + `[unstick-desktop-not-allowed-prompts] found ${prompts.length} prompts (dryRun=${args.dryRun}, limit=${args.limit})`, + ); + + if (prompts.length === 0) return; + + let cancelled = 0; + let unblocked = 0; + let upgraded = 0; + let resumed = 0; + let outboxEnqueued = 0; + + for (const prompt of prompts) { + const now = new Date(); + const resolvedDedupeKey = `user_prompt.resolved:${prompt.id}`; + const resumeDedupeKey = `user_prompt.resume:${prompt.id}`; + const stepTitle = + typeof prompt.payload?.title === 'string' + ? prompt.payload.title + : typeof prompt.payload?.reason === 'string' + ? prompt.payload.reason + : null; + + if (args.dryRun) { + // eslint-disable-next-line no-console + console.log( + `[dry-run] resolve prompt ${prompt.id} (goalRun=${prompt.goalRunId}, checklistItem=${prompt.checklistItemId ?? 'none'})`, + ); + continue; + } + + await prisma.$transaction(async (tx) => { + const answers = { + systemAutoRepair: { + code: 'DESKTOP_NOT_ALLOWED', + action: 'UPGRADE_TO_DESKTOP', + message: + 'Auto-repaired a feasibility misroute. This prompt should not have been created; the system upgraded execution to DESKTOP.', + }, + }; + + // Immutable resolution record (unique per promptId) + try { + await tx.userPromptResolution.create({ + data: { + id: createId(), + promptId: prompt.id, + tenantId: prompt.tenantId, + goalRunId: prompt.goalRunId, + actorType: 'SYSTEM', + actorId: 'maintenance:unstick-desktop-not-allowed-prompts', + answers, + authzDecision: 'ALLOW', + authzPolicy: 'maintenance', + authzRuleId: 'maintenance:unstick-desktop-not-allowed-prompts', + authzReason: 'Auto-repair feasibility misroute (DESKTOP_NOT_ALLOWED)', + idempotencyKey: resolvedDedupeKey, + }, + }); + } catch (error: any) { + if (error?.code !== 'P2002') throw error; + } + + const updatedPrompt = await tx.userPrompt.updateMany({ + where: { id: prompt.id, status: UserPromptStatus.OPEN }, + data: { + status: UserPromptStatus.RESOLVED, + answers, + resolvedAt: now, + }, + }); + + if (updatedPrompt.count > 0) { + cancelled++; + } + + if (prompt.checklistItemId) { + const updatedItem = await tx.checklistItem.updateMany({ + where: { + id: prompt.checklistItemId, + status: ChecklistItemStatus.BLOCKED, + blockedByPromptId: prompt.id, + }, + data: { + status: ChecklistItemStatus.PENDING, + blockedByPromptId: null, + blockedReason: null, + blockedAt: null, + requiresDesktop: true, + executionSurface: ExecutionSurface.DESKTOP, + }, + }); + + if (updatedItem.count > 0) { + unblocked++; + upgraded++; + } + } + + const remainingOpen = await tx.userPrompt.count({ + where: { goalRunId: prompt.goalRunId, status: UserPromptStatus.OPEN }, + }); + + if (remainingOpen === 0) { + const updatedRun = await tx.goalRun.updateMany({ + where: { id: prompt.goalRunId, phase: GoalRunPhase.WAITING_USER_INPUT }, + data: { + phase: GoalRunPhase.EXECUTING, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + if (updatedRun.count > 0) resumed++; + } + + try { + await tx.outbox.create({ + data: { + id: createId(), + dedupeKey: resolvedDedupeKey, + aggregateId: prompt.goalRunId, + eventType: 'user_prompt.resolved', + payload: { + promptId: prompt.id, + tenantId: prompt.tenantId, + goalRunId: prompt.goalRunId, + checklistItemId: prompt.checklistItemId, + kind: prompt.kind, + stepDescription: stepTitle, + resolvedAt: now.toISOString(), + reason: { + code: 'AUTO_REPAIR_FEASIBILITY_MISROUTE', + message: + 'Resolved a DESKTOP_NOT_ALLOWED prompt via internal auto-repair (feasibility gate).', + }, + repair: prompt.checklistItemId + ? { + upgradedChecklistItemId: prompt.checklistItemId, + executionSurface: ExecutionSurface.DESKTOP, + } + : null, + }, + }, + }); + outboxEnqueued++; + } catch (error: any) { + // Idempotency: ignore unique dedupe constraint. + if (error?.code !== 'P2002') throw error; + } + + try { + await tx.outbox.create({ + data: { + id: createId(), + dedupeKey: resumeDedupeKey, + aggregateId: prompt.goalRunId, + eventType: 'user_prompt.resume', + payload: { + promptId: prompt.id, + goalRunId: prompt.goalRunId, + tenantId: prompt.tenantId, + updateId: resumeDedupeKey, + }, + }, + }); + outboxEnqueued++; + } catch (error: any) { + if (error?.code !== 'P2002') throw error; + } + }); + } + + // eslint-disable-next-line no-console + console.log( + `[unstick-desktop-not-allowed-prompts] resolved=${cancelled} unblocked=${unblocked} upgraded=${upgraded} resumed=${resumed} outbox=${outboxEnqueued}`, + ); + } finally { + await prisma.$disconnect(); + } +} + +main().catch((error) => { + // eslint-disable-next-line no-console + console.error(`[unstick-desktop-not-allowed-prompts] fatal: ${error?.message || error}`); + process.exit(1); +}); diff --git a/packages/bytebot-workflow-orchestrator/scripts/unstick-strategy-prompts.ts b/packages/bytebot-workflow-orchestrator/scripts/unstick-strategy-prompts.ts new file mode 100644 index 000000000..0a4f13c5c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/scripts/unstick-strategy-prompts.ts @@ -0,0 +1,257 @@ +import { + PrismaClient, + Prisma, + UserPromptCancelReason, + UserPromptKind, + UserPromptStatus, + GoalRunPhase, + ChecklistItemStatus, +} from '@prisma/client'; +import { createId } from '@paralleldrive/cuid2'; + +type Args = { + apply: boolean; + dryRun: boolean; + limit: number; + tenantId?: string; + goalRunId?: string; +}; + +function parseArgs(argv: string[]): Args { + const args: Args = { + apply: false, + dryRun: true, + limit: 200, + }; + + for (let i = 0; i < argv.length; i++) { + const token = argv[i]; + if (token === '--apply') { + args.apply = true; + args.dryRun = false; + continue; + } + if (token === '--dry-run') { + args.apply = false; + args.dryRun = true; + continue; + } + if (token === '--limit') { + const raw = argv[i + 1]; + i++; + const parsed = Number(raw); + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`Invalid --limit: ${raw}`); + } + args.limit = Math.floor(parsed); + continue; + } + if (token === '--tenant-id') { + args.tenantId = argv[i + 1]; + i++; + continue; + } + if (token === '--goal-run-id') { + args.goalRunId = argv[i + 1]; + i++; + continue; + } + if (token === '--help' || token === '-h') { + printHelpAndExit(0); + } + } + + return args; +} + +function printHelpAndExit(code: number): never { + // eslint-disable-next-line no-console + console.log(` +Unstick strategy-derived prompts (AGENT_REQUESTED_HELP) by canceling them and resuming runs. + +This is a one-time maintenance tool to clean up prompts created before the "no strategy prompts" fix. + +Usage: + ts-node scripts/unstick-strategy-prompts.ts [--dry-run] [--apply] [--limit N] [--tenant-id T] [--goal-run-id GR] + +Defaults: + --dry-run (prints planned changes, makes no DB writes) + --limit 200 + +Notes: + - Targets OPEN TEXT_CLARIFICATION prompts where payload.result.errorCode == "AGENT_REQUESTED_HELP" + - Cancels prompt with cancelReason=POLICY_DENY + - Unblocks checklist item if blocked by that prompt + - Resumes goal run from WAITING_USER_INPUT to EXECUTING only if no other OPEN prompts remain + - Enqueues one outbox event per cancelled prompt (dedupeKey=user_prompt.cancelled:) +`.trim()); + process.exit(code); +} + +type PromptRow = { + id: string; + tenantId: string; + goalRunId: string; + checklistItemId: string | null; + kind: string; + dedupeKey: string; + payload: any; +}; + +async function main(): Promise { + const args = parseArgs(process.argv.slice(2)); + const prisma = new PrismaClient(); + + try { + const prompts = (await prisma.userPrompt.findMany({ + where: { + status: UserPromptStatus.OPEN, + kind: UserPromptKind.TEXT_CLARIFICATION, + ...(args.tenantId ? { tenantId: args.tenantId } : {}), + ...(args.goalRunId ? { goalRunId: args.goalRunId } : {}), + payload: { + path: ['result', 'errorCode'], + equals: 'AGENT_REQUESTED_HELP', + }, + }, + orderBy: { createdAt: 'asc' }, + take: args.limit, + select: { + id: true, + tenantId: true, + goalRunId: true, + checklistItemId: true, + kind: true, + dedupeKey: true, + payload: true, + }, + })) as unknown as PromptRow[]; + + // eslint-disable-next-line no-console + console.log( + `[unstick-strategy-prompts] found ${prompts.length} prompts (dryRun=${args.dryRun}, limit=${args.limit})`, + ); + + if (prompts.length === 0) return; + + let cancelled = 0; + let unblocked = 0; + let resumed = 0; + let outboxEnqueued = 0; + + for (const prompt of prompts) { + const now = new Date(); + const outboxDedupeKey = `user_prompt.cancelled:${prompt.id}`; + const stepTitle = + typeof prompt.payload?.title === 'string' + ? prompt.payload.title + : typeof prompt.payload?.reason === 'string' + ? prompt.payload.reason + : null; + + if (args.dryRun) { + // eslint-disable-next-line no-console + console.log( + `[dry-run] cancel prompt ${prompt.id} (goalRun=${prompt.goalRunId}, checklistItem=${prompt.checklistItemId ?? 'none'})`, + ); + continue; + } + + await prisma.$transaction(async (tx) => { + const updatedPrompt = await tx.userPrompt.updateMany({ + where: { id: prompt.id, status: UserPromptStatus.OPEN }, + data: { + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.POLICY_DENY, + cancelledAt: now, + }, + }); + + if (updatedPrompt.count > 0) { + cancelled++; + } + + if (prompt.checklistItemId) { + const updatedItem = await tx.checklistItem.updateMany({ + where: { + id: prompt.checklistItemId, + status: ChecklistItemStatus.BLOCKED, + blockedByPromptId: prompt.id, + }, + data: { + status: ChecklistItemStatus.PENDING, + blockedByPromptId: null, + blockedReason: null, + blockedAt: null, + }, + }); + + if (updatedItem.count > 0) { + unblocked++; + } + } + + const remainingOpen = await tx.userPrompt.count({ + where: { goalRunId: prompt.goalRunId, status: UserPromptStatus.OPEN }, + }); + + if (remainingOpen === 0) { + const updatedRun = await tx.goalRun.updateMany({ + where: { id: prompt.goalRunId, phase: GoalRunPhase.WAITING_USER_INPUT }, + data: { + phase: GoalRunPhase.EXECUTING, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + if (updatedRun.count > 0) resumed++; + } + + try { + await tx.outbox.create({ + data: { + id: createId(), + dedupeKey: outboxDedupeKey, + aggregateId: prompt.goalRunId, + eventType: 'user_prompt.cancelled', + payload: { + promptId: prompt.id, + tenantId: prompt.tenantId, + goalRunId: prompt.goalRunId, + checklistItemId: prompt.checklistItemId, + kind: prompt.kind, + stepDescription: stepTitle, + cancelReason: 'POLICY_DENY', + cancelledAt: now.toISOString(), + reason: { + code: 'DEPRECATED_STRATEGY_PROMPT', + message: + 'Canceled a strategy-derived prompt created before the no-strategy-prompts contract fix.', + }, + }, + }, + }); + outboxEnqueued++; + } catch (error: any) { + // Idempotency: ignore unique dedupe constraint. + if (error?.code !== 'P2002') throw error; + } + }); + } + + // eslint-disable-next-line no-console + console.log( + `[unstick-strategy-prompts] cancelled=${cancelled} unblocked=${unblocked} resumed=${resumed} outbox=${outboxEnqueued}`, + ); + } finally { + await prisma.$disconnect(); + } +} + +main().catch((error) => { + // eslint-disable-next-line no-console + console.error(`[unstick-strategy-prompts] fatal: ${error?.message || error}`); + process.exit(1); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/app.module.ts b/packages/bytebot-workflow-orchestrator/src/app.module.ts new file mode 100644 index 000000000..d2c1dcce3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/app.module.ts @@ -0,0 +1,340 @@ +/** + * App Module + * v5.11.0: Advanced Enhancements V2 - Dashboard, Multi-Tenant Knowledge, + * Entity Resolution, Failure Prediction, Cross-Goal Learning + * v5.10.0: Advanced Enhancements - Context Summarization, Knowledge Extraction, + * Background Mode, Checkpoint Persistence + * v5.5.7: Added WorkspaceDbReconcilerService for K8s/DB drift reconciliation + * v5.6.1: Added OrphanPodGCService for workspace cleanup + * v5.6.0: Phase 4-5 Live Desktop Control APIs & Real-Time Event System + * Root module for the Workflow Orchestrator + */ + +import { Module } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { ScheduleModule } from '@nestjs/schedule'; +import { EventEmitterModule } from '@nestjs/event-emitter'; +import { TerminusModule } from '@nestjs/terminus'; +import { ThrottlerModule, ThrottlerGuard } from '@nestjs/throttler'; +import { JwtModule } from '@nestjs/jwt'; +import { APP_GUARD } from '@nestjs/core'; + +// Services +import { PrismaService } from './services/prisma.service'; +import { WorkflowService } from './services/workflow.service'; +import { WorkspaceService } from './services/workspace.service'; +import { SchedulerService } from './services/scheduler.service'; +import { NodeExecutorService } from './services/node-executor.service'; +// v1.0.3: Leader election for scheduler single-runner guarantee +import { LeaderElectionService } from './services/leader-election.service'; +// v1.0.0 M5: High-risk gating and idempotency services +import { HighRiskService } from './services/high-risk.service'; +import { ApprovalService } from './services/approval.service'; +import { IdempotencyService } from './services/idempotency.service'; +import { CleanupService } from './services/cleanup.service'; +// v5.6.1: Orphan pod garbage collection +import { OrphanPodGCService } from './services/orphan-pod-gc.service'; +// v5.5.7: Workspace DB reconciler for K8s/DB drift detection +import { WorkspaceDbReconcilerService } from './services/workspace-db-reconciler.service'; +// Post-M5: Webhook notifications and audit logging +import { WebhookService } from './services/webhook.service'; +import { AuditService } from './services/audit.service'; +// Phase 7: Multi-Agent Orchestration services +import { AgentRegistryService } from './services/agent-registry.service'; +import { AgentRouterService } from './services/agent-router.service'; +import { AgentHealthService } from './services/agent-health.service'; +// Phase 8: Advanced Analytics Dashboard services +import { MetricsCollectorService } from './services/metrics-collector.service'; +import { MetricsAggregationService } from './services/metrics-aggregation.service'; +import { AnalyticsQueryService } from './services/analytics-query.service'; +// Phase 9: Self-Healing & Auto-Recovery services +import { CircuitBreakerService } from './services/circuit-breaker.service'; +import { TaskRecoveryService } from './services/task-recovery.service'; +import { WorkflowCheckpointService } from './services/workflow-checkpoint.service'; +import { DeadLetterQueueService } from './services/dead-letter-queue.service'; +// Phase 10: Manus-Style Goal-First Orchestration services + import { GoalRunService } from './services/goal-run.service'; + import { PlannerService } from './services/planner.service'; + import { GoalIntakeService } from './services/goal-intake.service'; + import { OrchestratorLoopService } from './services/orchestrator-loop.service'; + import { UserPromptService } from './services/user-prompt.service'; + import { JsonSchemaValidatorService } from './services/json-schema-validator.service'; + import { OutboxService } from './services/outbox.service'; + import { OutboxPublisherService } from './services/outbox-publisher.service'; + import { PromptResumeReconcilerService } from './services/prompt-resume-reconciler.service'; + import { InteractionSliMetricsService } from './services/interaction-sli-metrics.service'; + import { UserPromptResolutionService } from './services/user-prompt-resolution.service'; +// Phase 7 (v5.2.0): Enhanced Features - Templates, Batch Execution +import { GoalTemplateService } from './services/goal-template.service'; +import { BatchService } from './services/batch.service'; +import { TemplateSeedService } from './services/template-seed.service'; +// Phase 8 (v5.3.0): External Integrations - Slack, Teams, GitHub/GitLab +import { SlackNotificationService } from './services/slack-notification.service'; +import { TeamsNotificationService } from './services/teams-notification.service'; +import { GitIntegrationService } from './services/git-integration.service'; +import { SlackBridgeService } from './services/slack-bridge.service'; +// Phase 9 (v5.4.0): Advanced AI Features - Goal Refinement, Template Generation, Failure Analysis +import { GoalRefinementService } from './services/goal-refinement.service'; +import { TemplateGenerationService } from './services/template-generation.service'; +import { FailureAnalysisService } from './services/failure-analysis.service'; +// Phase 10 (v5.5.0): Enterprise Features - Multi-Tenant Admin, Audit Export, Compliance, SSO, LLM Providers +import { TenantAdminService } from './services/tenant-admin.service'; +import { AuditExportService } from './services/audit-export.service'; +import { ComplianceService } from './services/compliance.service'; +import { SSOService } from './services/sso.service'; +import { LLMProviderService } from './services/llm-provider.service'; +// Phase 4 (v5.6.0): Live Desktop Control APIs +import { DesktopControlService } from './services/desktop-control.service'; +// v5.6.9: Poll-based task dispatch adapter +import { TaskDispatchService } from './services/task-dispatch.service'; +// v5.5.15: DB transient error resilience +import { DbTransientService } from './services/db-transient.service'; +// v5.5.18 Phase E: Maintenance mode handling +import { MaintenanceModeService } from './services/maintenance-mode.service'; +// v5.8.0: Option C Industry Standard - Failure classification for intelligent retry/replan +import { FailureClassificationService } from './services/failure-classification.service'; +// v5.9.0: Context-Preserving Replanning - Manus-style checkpoint service +import { GoalCheckpointService } from './services/goal-checkpoint.service'; +// v5.10.0: Advanced Enhancements - Context summarization, knowledge extraction, background mode +import { ContextSummarizationService } from './services/context-summarization.service'; +import { KnowledgeExtractionService } from './services/knowledge-extraction.service'; +import { BackgroundModeService } from './services/background-mode.service'; +import { CheckpointPersistenceService } from './services/checkpoint-persistence.service'; +// v5.11.0: Advanced Enhancements V2 - Dashboard, Multi-Tenant Knowledge, Entity Resolution, Failure Prediction, Cross-Goal Learning +import { DashboardService } from './services/dashboard.service'; +import { TenantKnowledgeService } from './services/tenant-knowledge.service'; +import { EntityResolutionService } from './services/entity-resolution.service'; +import { FailurePredictionService } from './services/failure-prediction.service'; +import { CrossGoalLearningService } from './services/cross-goal-learning.service'; +// v5.12.0: Temporal Workflow Integration +import { TemporalModule } from './temporal/temporal.module'; + +// Gateways +// Phase 5 (v5.6.0): Real-Time Event System +import { RunEventsGateway } from './gateways/run-events.gateway'; + +// Controllers +import { HealthController } from './controllers/health.controller'; +import { WorkflowController } from './controllers/workflow.controller'; +import { WorkspaceProxyController } from './controllers/workspace-proxy.controller'; +import { ApprovalController } from './controllers/approval.controller'; +// Post-M5: Webhook and audit controllers +import { WebhookController } from './controllers/webhook.controller'; +import { AuditController } from './controllers/audit.controller'; +// Phase 7: Multi-Agent Orchestration controller +import { AgentController } from './controllers/agent.controller'; +// Phase 8: Analytics controller +import { AnalyticsController } from './controllers/analytics.controller'; +// Phase 9: Self-Healing controller +import { SelfHealingController } from './controllers/self-healing.controller'; +// Phase 10: Manus-Style Goal-First Orchestration controller +import { GoalRunController } from './controllers/goal-run.controller'; +// Phase 7 (v5.2.0): Enhanced Features controllers +import { TemplateController } from './controllers/template.controller'; +import { BatchController } from './controllers/batch.controller'; +// Phase 8 (v5.3.0): External Integrations controllers +import { NotificationController } from './controllers/notification.controller'; +import { GitIntegrationController } from './controllers/git-integration.controller'; +import { SlackBridgeController } from './controllers/slack-bridge.controller'; +// Phase 9 (v5.4.0): Advanced AI Features controller +import { AiFeaturesController } from './controllers/ai-features.controller'; +// Phase 10 (v5.5.0): Enterprise Features controller +import { EnterpriseController } from './controllers/enterprise.controller'; +// Phase 4 (v5.6.0): Live Desktop Control APIs controller +import { DesktopControlController } from './controllers/desktop-control.controller'; +// v5.10.0: Checkpoint Visualization API +import { CheckpointController } from './controllers/checkpoint.controller'; +// v5.11.0: Dashboard Visualization API +import { DashboardController } from './controllers/dashboard.controller'; +// v5.13.0 Phase 11.3: Internal API for Temporal worker integration +import { InternalController } from './controllers/internal.controller'; +import { UserPromptController } from './controllers/user-prompt.controller'; +import { EventsController } from './controllers/events.controller'; + +// Modules +import { MetricsModule } from './modules/metrics.module'; + +@Module({ + imports: [ + // Configuration from environment + ConfigModule.forRoot({ + isGlobal: true, + envFilePath: ['.env.local', '.env'], + }), + + // Scheduling for orchestration loop + ScheduleModule.forRoot(), + + // Event emitter for workflow events + EventEmitterModule.forRoot(), + + // Health checks + TerminusModule, + + // Metrics + MetricsModule, + + // Phase 5 (v5.6.0): JWT for WebSocket authentication + JwtModule.register({ + secret: process.env.JWT_SECRET || 'change-me-in-production', + signOptions: { expiresIn: '24h' }, + }), + + // Phase 6: Rate limiting for API protection + ThrottlerModule.forRoot([ + { + name: 'short', + ttl: 1000, // 1 second + limit: 10, // 10 requests per second + }, + { + name: 'medium', + ttl: 10000, // 10 seconds + limit: 50, // 50 requests per 10 seconds + }, + { + name: 'long', + ttl: 60000, // 1 minute + limit: 100, // 100 requests per minute + }, + ]), + + // v5.12.0: Temporal Workflow Integration (conditionally connects based on TEMPORAL_WORKFLOW_ENABLED) + TemporalModule, + ], + controllers: [ + HealthController, + WorkflowController, + WorkspaceProxyController, + ApprovalController, // v1.0.0 M5: Approval management + // Post-M5: Webhook and audit endpoints + WebhookController, + AuditController, + // Phase 7: Multi-Agent Orchestration + AgentController, + // Phase 8: Analytics Dashboard + AnalyticsController, + // Phase 9: Self-Healing & Auto-Recovery + SelfHealingController, + // Phase 10: Manus-Style Goal-First Orchestration + GoalRunController, + // Phase 7 (v5.2.0): Enhanced Features + TemplateController, + BatchController, + // Phase 8 (v5.3.0): External Integrations + NotificationController, + SlackBridgeController, + GitIntegrationController, + // Phase 9 (v5.4.0): Advanced AI Features + AiFeaturesController, + // Phase 10 (v5.5.0): Enterprise Features + EnterpriseController, + // Phase 4 (v5.6.0): Live Desktop Control APIs + DesktopControlController, + // v5.10.0: Checkpoint Visualization API + CheckpointController, + // v5.11.0: Dashboard Visualization API + DashboardController, + // v5.13.0 Phase 11.3: Internal API for Temporal worker integration + InternalController, + UserPromptController, + EventsController, + ], + providers: [ + PrismaService, + // v5.5.15: DB transient error resilience (must be early, others depend on it) + DbTransientService, + WorkflowService, + WorkspaceService, + // v1.0.3: Leader election must be initialized before scheduler + LeaderElectionService, + // Phase 7: Multi-Agent Orchestration (must be before NodeExecutor) + AgentRegistryService, + AgentRouterService, + AgentHealthService, + // Phase 8: Analytics Dashboard (event-driven collection) + MetricsCollectorService, + MetricsAggregationService, + AnalyticsQueryService, + // Scheduler and executor + SchedulerService, + NodeExecutorService, + // v1.0.0 M5: High-risk gating and idempotency + HighRiskService, + ApprovalService, + IdempotencyService, + CleanupService, + // v5.6.1: Orphan pod garbage collection + OrphanPodGCService, + // v5.5.7: Workspace DB reconciler for K8s/DB drift detection + WorkspaceDbReconcilerService, + // Post-M5: Webhook notifications and audit logging + WebhookService, + AuditService, + // Phase 9: Self-Healing & Auto-Recovery + CircuitBreakerService, + TaskRecoveryService, + WorkflowCheckpointService, + DeadLetterQueueService, + // Phase 10: Manus-Style Goal-First Orchestration + GoalRunService, + PlannerService, + GoalIntakeService, + UserPromptService, + JsonSchemaValidatorService, + OutboxService, + OutboxPublisherService, + PromptResumeReconcilerService, + InteractionSliMetricsService, + UserPromptResolutionService, + // v5.5.18 Phase E: Maintenance mode (must be before orchestrator loop) + MaintenanceModeService, + // v5.8.0: Option C Industry Standard - Failure classification (must be before orchestrator loop) + FailureClassificationService, + // v5.9.0: Context-Preserving Replanning - Manus-style checkpoint (must be before orchestrator loop) + GoalCheckpointService, + // v5.10.0: Advanced Enhancements (must be before orchestrator loop) + ContextSummarizationService, + KnowledgeExtractionService, + BackgroundModeService, + CheckpointPersistenceService, + // v5.11.0: Advanced Enhancements V2 (must be before orchestrator loop) + DashboardService, + TenantKnowledgeService, + EntityResolutionService, + FailurePredictionService, + CrossGoalLearningService, + OrchestratorLoopService, + // Phase 7 (v5.2.0): Enhanced Features - Templates, Batch Execution + GoalTemplateService, + BatchService, + TemplateSeedService, // Seeds built-in templates on startup + // Phase 8 (v5.3.0): External Integrations - Slack, Teams, GitHub/GitLab + SlackNotificationService, + TeamsNotificationService, + GitIntegrationService, + SlackBridgeService, + // Phase 9 (v5.4.0): Advanced AI Features - Goal Refinement, Template Generation, Failure Analysis + GoalRefinementService, + TemplateGenerationService, + FailureAnalysisService, + // Phase 10 (v5.5.0): Enterprise Features - Multi-Tenant Admin, Audit Export, Compliance, SSO, LLM Providers + TenantAdminService, + AuditExportService, + ComplianceService, + SSOService, + LLMProviderService, + // Phase 4 (v5.6.0): Live Desktop Control APIs + DesktopControlService, + // v5.6.9: Poll-based task dispatch adapter + TaskDispatchService, + // Phase 5 (v5.6.0): Real-Time Event System Gateway + RunEventsGateway, + // Phase 6: Global rate limiting guard + { + provide: APP_GUARD, + useClass: ThrottlerGuard, + }, + ], +}) +export class AppModule {} diff --git a/packages/bytebot-workflow-orchestrator/src/contracts/goal-feasibility.spec.ts b/packages/bytebot-workflow-orchestrator/src/contracts/goal-feasibility.spec.ts new file mode 100644 index 000000000..0a016bd06 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/contracts/goal-feasibility.spec.ts @@ -0,0 +1,22 @@ +import { ExecutionSurface } from '@prisma/client'; +import { inferGoalFeasibility } from './goal-feasibility'; + +describe('inferGoalFeasibility', () => { + it('does not classify common programming "return" text as travel-shopping', () => { + expect(inferGoalFeasibility('Return the hex digest of the file')).toBeNull(); + expect(inferGoalFeasibility('Return 0 if the value is missing')).toBeNull(); + }); + + it('does not misclassify "department" as a travel "depart" signal', () => { + expect(inferGoalFeasibility('Department budget review for Q1')).toBeNull(); + expect(inferGoalFeasibility('departments and returns report')).toBeNull(); + }); + + it('classifies depart+return+date as travel-shopping', () => { + expect(inferGoalFeasibility('Depart 2/1 and return 2/7')).toEqual({ + requiredSurface: ExecutionSurface.DESKTOP, + reason: 'travel-shopping(keyword:depart+return+date)', + }); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/contracts/goal-feasibility.ts b/packages/bytebot-workflow-orchestrator/src/contracts/goal-feasibility.ts new file mode 100644 index 000000000..81f578ee9 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/contracts/goal-feasibility.ts @@ -0,0 +1,63 @@ +import { ExecutionSurface } from '@prisma/client'; + +export type GoalFeasibilityInference = { + requiredSurface: ExecutionSurface; + reason: string; +}; + +/** + * Infer the minimum required execution surface for a goal. + * + * This is a *feasibility* gate, not a strategy selector. It should: + * - Prefer deterministic signals when available + * - Be conservative (upgrade to DESKTOP when in doubt for web-shopping goals) + * - Avoid creating user prompts (strategy must never block execution) + * + * NOTE: Until GoalSpec schemas are domain-specific, we use a small, auditable + * keyword/regex gate for travel-shopping to prevent misroutes to TEXT_ONLY. + */ +export function inferGoalFeasibility(goal: string | null | undefined): GoalFeasibilityInference | null { + const text = String(goal ?? '').trim().toLowerCase(); + if (!text) return null; + + // Travel-shopping: requires interacting with live web UIs (aggregators, booking sites). + // Examples seen in incidents: "price a trip with flight and hotel", "search for round-trip flights". + const travelPatterns: Array<{ pattern: RegExp; reason: string }> = [ + { pattern: /\bflight(s)?\b/, reason: 'keyword:flight' }, + { pattern: /\bairfare\b/, reason: 'keyword:airfare' }, + { pattern: /\bround[- ]?trip\b/, reason: 'keyword:round-trip' }, + { pattern: /\bhotel(s)?\b/, reason: 'keyword:hotel' }, + { pattern: /\bcar rental\b/, reason: 'keyword:car-rental' }, + { pattern: /\bbooking\.com\b/, reason: 'site:booking.com' }, + { pattern: /\bexpedia\b/, reason: 'site:expedia' }, + { pattern: /\bkayak\b/, reason: 'site:kayak' }, + { pattern: /\bgoogle flights\b/, reason: 'site:google-flights' }, + ]; + + for (const entry of travelPatterns) { + if (entry.pattern.test(text)) { + return { requiredSurface: ExecutionSurface.DESKTOP, reason: `travel-shopping(${entry.reason})` }; + } + } + + // Weak travel cues must never trigger on their own (avoid false positives for common words like "return"). + // Only classify travel when the combined intent is very likely travel-shopping. + const hasDepartWord = /\bdepart(?:ing|ure)?\b/.test(text); + const hasReturnWord = /\breturn(?:ing)?\b/.test(text); + + // Minimal "date-like" matcher: common short numeric dates and month names. + // We purposely keep this small and auditable to avoid stringy NL heuristics. + const hasDateLikeToken = + /\b(?:\d{1,2}\/\d{1,2}(?:\/\d{2,4})?|\d{4}-\d{2}-\d{2}|jan(?:uary)?|feb(?:ruary)?|mar(?:ch)?|apr(?:il)?|may|jun(?:e)?|jul(?:y)?|aug(?:ust)?|sep(?:t(?:ember)?)?|oct(?:ober)?|nov(?:ember)?|dec(?:ember)?)\b/.test( + text, + ); + + if (hasDepartWord && hasReturnWord && hasDateLikeToken) { + return { + requiredSurface: ExecutionSurface.DESKTOP, + reason: 'travel-shopping(keyword:depart+return+date)', + }; + } + + return null; +} diff --git a/packages/bytebot-workflow-orchestrator/src/contracts/planner-tools.spec.ts b/packages/bytebot-workflow-orchestrator/src/contracts/planner-tools.spec.ts new file mode 100644 index 000000000..8b5acb48c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/contracts/planner-tools.spec.ts @@ -0,0 +1,15 @@ +import { normalizeSuggestedToolsOrThrow } from './planner-tools'; + +describe('planner-tools', () => { + describe('normalizeSuggestedToolsOrThrow', () => { + it('normalizes execution tool alias web_browser -> browser', () => { + expect( + normalizeSuggestedToolsOrThrow({ + suggestedTools: ['web_browser'], + allowedTools: null, + }), + ).toEqual(['browser']); + }); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/contracts/planner-tools.ts b/packages/bytebot-workflow-orchestrator/src/contracts/planner-tools.ts new file mode 100644 index 000000000..6ed9a8c3b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/contracts/planner-tools.ts @@ -0,0 +1,232 @@ +export type CanonicalInteractionToolToken = 'ASK_USER' | 'APPROVAL'; + +export const CANONICAL_INTERACTION_TOOL_TOKENS: ReadonlyArray = [ + 'ASK_USER', + 'APPROVAL', +]; + +export type PlannerOutputContractViolationCode = + | 'UNKNOWN_SUGGESTED_TOOL_TOKEN' + | 'EXECUTE_STEP_HAS_INTERACTION_TOOL'; + +export class PlannerOutputContractViolationError extends Error { + readonly code: PlannerOutputContractViolationCode; + readonly details?: Record; + + constructor(params: { + code: PlannerOutputContractViolationCode; + message: string; + details?: Record; + }) { + super(params.message); + this.name = 'PlannerOutputContractViolationError'; + this.code = params.code; + this.details = params.details; + } +} + +export class UnknownSuggestedToolTokenError extends PlannerOutputContractViolationError { + readonly toolToken: string; + + constructor(params: { toolToken: string; allowedTools: string[] }) { + super({ + code: 'UNKNOWN_SUGGESTED_TOOL_TOKEN', + message: `Planner output contains an unknown suggestedTools token: ${JSON.stringify(params.toolToken)}`, + details: { toolToken: params.toolToken, allowedTools: params.allowedTools }, + }); + this.name = 'UnknownSuggestedToolTokenError'; + this.toolToken = params.toolToken; + } +} + +export class ExecuteStepHasInteractionToolError extends PlannerOutputContractViolationError { + constructor(params: { stepIndex: number; stepDescription: string; interactionTool: string }) { + super({ + code: 'EXECUTE_STEP_HAS_INTERACTION_TOOL', + message: + `Planner output misclassified an interaction step as EXECUTE (stepIndex=${params.stepIndex}, ` + + `interactionTool=${JSON.stringify(params.interactionTool)}): ${params.stepDescription}`, + details: { + stepIndex: params.stepIndex, + stepDescription: params.stepDescription, + interactionTool: params.interactionTool, + }, + }); + this.name = 'ExecuteStepHasInteractionToolError'; + } +} + +const INTERACTION_TOOL_ALIASES: Record = { + // Canonical + ask_user: 'ASK_USER', + approval: 'APPROVAL', + + // Backwards-compatible aliases (planner drift) + chat: 'ASK_USER', + prompt_user: 'ASK_USER', + confirm_user: 'ASK_USER', + prompt_the_user: 'ASK_USER', + ask_the_user: 'ASK_USER', + user_input: 'ASK_USER', + + // Common synonyms + approve: 'APPROVAL', + request_approval: 'APPROVAL', + ask_approval: 'APPROVAL', +}; + +// Planner/tooling drift aliases for execution tools. +// Keep this list intentionally small and auditable: these map to existing canonical tool tokens. +const EXECUTION_TOOL_ALIASES: Record = { + // Common LLM/planner synonym for desktop browsing. + web_browser: 'browser', + webbrowser: 'browser', +}; + +// When GoalRun.constraints.allowedTools is missing, we still enforce a contract-level +// allowlist to fail-closed against "invented tools" and schema drift. +const DEFAULT_EXECUTION_TOOL_TOKENS: ReadonlyArray = [ + // Desktop / computer-use tools (agent-side) + 'computer', + 'click', + 'type', + 'key', + 'scroll', + 'screenshot', + 'move', + 'drag', + 'cursor_position', + + // Common planner hints used in ByteBot plans/templates + 'browser', + 'file_download', + 'email', + 'Shell', + + // Butler gateway tool names (agent-side) + 'search_web_search', + 'search_news', + 'weather_get_current', + 'weather_get_forecast', + 'communications_send_email', + 'communications_send_sms', + 'calendar_list_events', + 'calendar_create_event', + 'calendar_delete_event', + 'notes_create', + 'notes_list', + 'notes_delete', + 'document_parse', + 'document_summarize', + 'data_extract', + 'data_transform', + 'file_read', + 'file_write', + 'file_list', + 'integration_webhook', + 'integration_api_call', +]; + +function normalizeToolTokenKey(token: string): string { + return token.trim().toLowerCase(); +} + +export function toCanonicalInteractionToolToken(token: string): CanonicalInteractionToolToken | null { + return INTERACTION_TOOL_ALIASES[normalizeToolTokenKey(token)] ?? null; +} + +function toCanonicalExecutionToolKey(token: string): string { + const key = normalizeToolTokenKey(token); + return EXECUTION_TOOL_ALIASES[key] ?? key; +} + +export function hasUserInteractionTool(suggestedTools?: string[] | null): boolean { + const tools = Array.isArray(suggestedTools) ? suggestedTools : []; + for (const raw of tools) { + if (typeof raw !== 'string') continue; + if (toCanonicalInteractionToolToken(raw)) return true; + } + return false; +} + +const DESKTOP_EXECUTION_TOOL_KEYS = new Set( + [ + // Planner hints (bytebot templates) + 'browser', + 'file_download', + + // Desktop / computer-use tools (agent-side) + 'computer', + 'click', + 'type', + 'key', + 'scroll', + 'screenshot', + 'move', + 'drag', + 'cursor_position', + ].map(normalizeToolTokenKey), +); + +export function hasDesktopExecutionTool(suggestedTools?: string[] | null): boolean { + const tools = Array.isArray(suggestedTools) ? suggestedTools : []; + for (const raw of tools) { + if (typeof raw !== 'string') continue; + if (DESKTOP_EXECUTION_TOOL_KEYS.has(normalizeToolTokenKey(raw))) return true; + } + return false; +} + +export function normalizeSuggestedToolsOrThrow(params: { + suggestedTools?: string[] | null; + allowedTools?: string[] | null; +}): string[] { + const rawTools = Array.isArray(params.suggestedTools) ? params.suggestedTools : []; + + const allowedTools = Array.isArray(params.allowedTools) ? params.allowedTools : []; + const allowedByKey = new Map(); + for (const t of allowedTools) { + if (typeof t !== 'string') continue; + const trimmed = t.trim(); + if (!trimmed) continue; + allowedByKey.set(normalizeToolTokenKey(trimmed), trimmed); + } + + const defaultByKey = new Map(); + for (const t of DEFAULT_EXECUTION_TOOL_TOKENS) { + defaultByKey.set(normalizeToolTokenKey(t), t); + } + + const normalized: string[] = []; + const seen = new Set(); + + for (const raw of rawTools) { + if (typeof raw !== 'string') { + throw new UnknownSuggestedToolTokenError({ + toolToken: String(raw), + allowedTools, + }); + } + + const trimmed = raw.trim(); + if (!trimmed) continue; + + const interaction = toCanonicalInteractionToolToken(trimmed); + const key = toCanonicalExecutionToolKey(trimmed); + const canonical = interaction ?? allowedByKey.get(key) ?? defaultByKey.get(key); + + if (!canonical) { + throw new UnknownSuggestedToolTokenError({ + toolToken: trimmed, + allowedTools, + }); + } + + if (!seen.has(canonical)) { + normalized.push(canonical); + seen.add(canonical); + } + } + + return normalized; +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/agent.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/agent.controller.ts new file mode 100644 index 000000000..08f76309c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/agent.controller.ts @@ -0,0 +1,381 @@ +/** + * Agent Controller + * v1.0.0: Phase 7 Multi-Agent Orchestration + * + * REST API endpoints for managing agents in the multi-agent orchestration system. + * Provides endpoints for: + * - Agent registration and deregistration + * - Agent health and status + * - Agent discovery and listing + * - Task assignment tracking + * + * Security: These endpoints should be protected by internal auth in production. + */ + +import { + Controller, + Get, + Post, + Delete, + Body, + Param, + Query, + HttpCode, + HttpStatus, + Logger, +} from '@nestjs/common'; +import { + AgentRegistryService, + AgentInfo, + AgentStatus, + RegisterAgentInput, +} from '../services/agent-registry.service'; +import { AgentRouterService } from '../services/agent-router.service'; +import { AgentHealthService } from '../services/agent-health.service'; + +// DTOs +interface RegisterAgentDto { + name: string; + endpoint: string; + podName?: string; + nodeIp?: string; + namespace?: string; + maxConcurrentTasks?: number; + weight?: number; + version?: string; + metadata?: Record; + capabilities?: Array<{ + name: string; + toolPattern: string; + priority?: number; + costMultiplier?: number; + requiresExclusiveWorkspace?: boolean; + }>; +} + +interface HeartbeatDto { + currentTaskCount?: number; +} + +@Controller('agents') +export class AgentController { + private readonly logger = new Logger(AgentController.name); + + constructor( + private readonly agentRegistry: AgentRegistryService, + private readonly agentRouter: AgentRouterService, + private readonly agentHealth: AgentHealthService, + ) {} + + /** + * Register a new agent or update existing + * POST /api/v1/agents + */ + @Post() + @HttpCode(HttpStatus.CREATED) + async registerAgent(@Body() dto: RegisterAgentDto): Promise<{ + success: boolean; + agent: AgentInfo; + }> { + this.logger.log(`Registering agent: ${dto.name} at ${dto.endpoint}`); + + const agent = await this.agentRegistry.registerAgent(dto as RegisterAgentInput); + + return { + success: true, + agent, + }; + } + + /** + * List all agents with optional filtering + * GET /api/v1/agents + */ + @Get() + async listAgents( + @Query('status') status?: string, + @Query('namespace') namespace?: string, + @Query('hasCapacity') hasCapacity?: string, + ): Promise<{ + success: boolean; + agents: AgentInfo[]; + count: number; + }> { + const filters: { + status?: AgentStatus | AgentStatus[]; + namespace?: string; + hasCapacity?: boolean; + } = {}; + + if (status) { + if (status.includes(',')) { + filters.status = status.split(',') as AgentStatus[]; + } else { + filters.status = status as AgentStatus; + } + } + + if (namespace) { + filters.namespace = namespace; + } + + if (hasCapacity === 'true') { + filters.hasCapacity = true; + } + + const agents = await this.agentRegistry.getAgents(filters); + + return { + success: true, + agents, + count: agents.length, + }; + } + + /** + * Get available agents (healthy with capacity) + * GET /api/v1/agents/available + */ + @Get('available') + async getAvailableAgents(): Promise<{ + success: boolean; + agents: AgentInfo[]; + count: number; + }> { + const agents = await this.agentRegistry.getAvailableAgents(); + + return { + success: true, + agents, + count: agents.length, + }; + } + + /** + * Get agent registry statistics + * GET /api/v1/agents/stats + */ + @Get('stats') + async getStats(): Promise<{ + success: boolean; + stats: { + total: number; + healthy: number; + unhealthy: number; + draining: number; + offline: number; + totalCapacity: number; + usedCapacity: number; + utilizationPercent: number; + }; + }> { + const stats = await this.agentRegistry.getStats(); + + return { + success: true, + stats, + }; + } + + /** + * Get routing statistics + * GET /api/v1/agents/routing/stats + */ + @Get('routing/stats') + async getRoutingStats(): Promise<{ + success: boolean; + stats: { + totalAssignments: number; + completedAssignments: number; + failedAssignments: number; + reassignments: number; + avgCompletionTimeMs: number; + routingReasons: Record; + }; + }> { + const stats = await this.agentRouter.getStats(); + + return { + success: true, + stats, + }; + } + + /** + * Get overall health summary + * GET /api/v1/agents/health/summary + */ + @Get('health/summary') + async getHealthSummary(): Promise<{ + success: boolean; + summary: { + totalAgents: number; + healthyAgents: number; + unhealthyAgents: number; + avgSuccessRate: number; + avgLatencyMs: number; + agentsSummary: Array<{ + agentId: string; + name: string; + status: string; + successRate: number; + avgLatencyMs: number; + }>; + }; + }> { + const summary = await this.agentHealth.getOverallHealthSummary(); + + return { + success: true, + summary, + }; + } + + /** + * Get a specific agent by ID + * GET /api/v1/agents/:agentId + */ + @Get(':agentId') + async getAgent(@Param('agentId') agentId: string): Promise<{ + success: boolean; + agent: AgentInfo | null; + }> { + const agent = await this.agentRegistry.getAgent(agentId); + + return { + success: agent !== null, + agent, + }; + } + + /** + * Update agent heartbeat + * POST /api/v1/agents/:agentId/heartbeat + */ + @Post(':agentId/heartbeat') + @HttpCode(HttpStatus.OK) + async heartbeat( + @Param('agentId') agentId: string, + @Body() dto: HeartbeatDto, + ): Promise<{ + success: boolean; + }> { + await this.agentRegistry.updateHeartbeat(agentId, dto.currentTaskCount); + + return { + success: true, + }; + } + + /** + * Get agent health statistics + * GET /api/v1/agents/:agentId/health + */ + @Get(':agentId/health') + async getAgentHealth(@Param('agentId') agentId: string): Promise<{ + success: boolean; + health: { + successRate: number; + avgLatencyMs: number; + p95LatencyMs: number; + checksInLastHour: number; + failuresInLastHour: number; + consecutiveSuccesses: number; + consecutiveFailures: number; + }; + }> { + const health = await this.agentHealth.getHealthStats(agentId); + + return { + success: true, + health, + }; + } + + /** + * Get agent health check history + * GET /api/v1/agents/:agentId/health/history + */ + @Get(':agentId/health/history') + async getAgentHealthHistory( + @Param('agentId') agentId: string, + @Query('limit') limit?: string, + ): Promise<{ + success: boolean; + history: Array<{ + agentId: string; + endpoint: string; + success: boolean; + statusCode?: number; + latencyMs: number; + error?: string; + timestamp: Date; + }>; + }> { + const parsedLimit = limit ? parseInt(limit, 10) : 100; + const history = await this.agentHealth.getHealthHistory(agentId, parsedLimit); + + return { + success: true, + history, + }; + } + + /** + * Force a health check on a specific agent + * POST /api/v1/agents/:agentId/health/check + */ + @Post(':agentId/health/check') + @HttpCode(HttpStatus.OK) + async forceHealthCheck(@Param('agentId') agentId: string): Promise<{ + success: boolean; + result: { + agentId: string; + endpoint: string; + success: boolean; + statusCode?: number; + latencyMs: number; + error?: string; + timestamp: Date; + }; + }> { + const result = await this.agentHealth.forceHealthCheck(agentId); + + return { + success: true, + result, + }; + } + + /** + * Mark agent as draining (no new tasks) + * POST /api/v1/agents/:agentId/drain + */ + @Post(':agentId/drain') + @HttpCode(HttpStatus.OK) + async drainAgent(@Param('agentId') agentId: string): Promise<{ + success: boolean; + }> { + await this.agentRegistry.drainAgent(agentId); + + return { + success: true, + }; + } + + /** + * Deregister an agent + * DELETE /api/v1/agents/:agentId + */ + @Delete(':agentId') + @HttpCode(HttpStatus.OK) + async deregisterAgent(@Param('agentId') agentId: string): Promise<{ + success: boolean; + }> { + await this.agentRegistry.deregisterAgent(agentId); + + return { + success: true, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/ai-features.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/ai-features.controller.ts new file mode 100644 index 000000000..8a97f8611 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/ai-features.controller.ts @@ -0,0 +1,630 @@ +/** + * AI Features Controller + * Phase 9 (v5.4.0): Advanced AI Features + * + * REST API endpoints for AI-powered features: + * - Goal refinement and analysis + * - Automatic template generation + * - Failure pattern analysis and predictions + */ + +import { + Controller, + Get, + Post, + Body, + Param, + Query, + HttpCode, + HttpStatus, + Headers, + BadRequestException, +} from '@nestjs/common'; +import { Throttle } from '@nestjs/throttler'; +import { + ApiTags, + ApiOperation, + ApiResponse, + ApiParam, + ApiQuery, + ApiHeader, + ApiProperty, +} from '@nestjs/swagger'; +import { + GoalRefinementService, + RefinementRequest, + RefinementResult, + QuickAnalysisResult, +} from '../services/goal-refinement.service'; +import { + TemplateGenerationService, + TemplateGenerationRequest, + TemplateGenerationResult, + TemplateCandidate, +} from '../services/template-generation.service'; +import { + FailureAnalysisService, + FailureCategory, + FailureAnalysisResult, + FailurePattern, + FailureTrend, + PredictiveAnalysis, +} from '../services/failure-analysis.service'; + +// ============================================================================ +// Goal Refinement DTOs +// ============================================================================ + +class RefineGoalContextDto { + @ApiProperty({ required: false, type: [String], description: 'Previous goals by the user' }) + previousGoals?: string[]; + + @ApiProperty({ required: false, description: 'User preferences' }) + userPreferences?: Record; + + @ApiProperty({ required: false, description: 'Domain context (e.g., web-automation, data-processing)' }) + domain?: string; +} + +class RefineGoalOptionsDto { + @ApiProperty({ required: false, description: 'Maximum number of suggestions to generate', default: 3 }) + maxSuggestions?: number; + + @ApiProperty({ required: false, description: 'Include clarifying questions', default: true }) + includeQuestions?: boolean; + + @ApiProperty({ required: false, description: 'Include goal decomposition', default: true }) + includeDecomposition?: boolean; + + @ApiProperty({ required: false, enum: ['concise', 'detailed', 'technical'], description: 'Response style' }) + style?: 'concise' | 'detailed' | 'technical'; +} + +class RefineGoalDto { + @ApiProperty({ description: 'The goal text to refine', minLength: 5 }) + goal: string; + + @ApiProperty({ required: false, type: RefineGoalContextDto }) + context?: RefineGoalContextDto; + + @ApiProperty({ required: false, type: RefineGoalOptionsDto }) + options?: RefineGoalOptionsDto; +} + +class QuickAnalyzeDto { + @ApiProperty({ description: 'The goal text to quickly analyze' }) + goal: string; +} + +class DecomposeGoalDto { + @ApiProperty({ description: 'The goal to decompose into sub-goals' }) + goal: string; + + @ApiProperty({ required: false, description: 'Maximum number of sub-goals', default: 5 }) + maxSubGoals?: number; +} + +class HistorySuggestionsDto { + @ApiProperty({ description: 'Partial goal text to find similar goals' }) + partialGoal: string; + + @ApiProperty({ required: false, description: 'Maximum suggestions', default: 5 }) + limit?: number; +} + +// ============================================================================ +// Template Generation DTOs +// ============================================================================ + +class GenerateTemplatesOptionsDto { + @ApiProperty({ required: false, description: 'Minimum goals required for pattern detection', default: 3 }) + minGoalsForPattern?: number; + + @ApiProperty({ required: false, description: 'Similarity threshold (0-1)', default: 0.6 }) + similarityThreshold?: number; + + @ApiProperty({ required: false, description: 'Maximum templates to generate', default: 10 }) + maxTemplates?: number; + + @ApiProperty({ required: false, description: 'Include checklist templates', default: true }) + includeChecklist?: boolean; +} + +class SuggestVariablesDto { + @ApiProperty({ description: 'The goal text to analyze for variables' }) + goal: string; +} + +// ============================================================================ +// Failure Analysis DTOs +// ============================================================================ + +class AnalyzeFailureDto { + @ApiProperty({ description: 'Goal run ID to analyze' }) + goalRunId: string; + + @ApiProperty({ required: false, description: 'Include historical context', default: true }) + includeHistory?: boolean; +} + +class PredictFailureDto { + @ApiProperty({ description: 'The goal to predict failure risk for' }) + goal: string; + + @ApiProperty({ required: false, description: 'Constraints for the goal execution' }) + constraints?: Record; +} + +class FailurePatternsQueryDto { + @ApiProperty({ required: false, description: 'Number of days to analyze', default: '30' }) + days?: string; + + @ApiProperty({ required: false, description: 'Maximum patterns to return', default: '10' }) + limit?: string; +} + +class FailureTrendsQueryDto { + @ApiProperty({ required: false, description: 'Number of days to analyze', default: '30' }) + days?: string; + + @ApiProperty({ required: false, enum: ['day', 'week', 'month'], description: 'Trend granularity' }) + granularity?: 'day' | 'week' | 'month'; +} + +// ============================================================================ +// AI Features Controller +// ============================================================================ + +@ApiTags('ai-features') +@Controller('ai') +export class AiFeaturesController { + constructor( + private goalRefinementService: GoalRefinementService, + private templateGenerationService: TemplateGenerationService, + private failureAnalysisService: FailureAnalysisService, + ) {} + + // ========================================================================== + // Goal Refinement Endpoints + // ========================================================================== + + /** + * POST /api/v1/ai/refinement/refine + * Analyze and refine a goal using AI + * Rate limited: 10 per minute (LLM operation) + */ + @Post('refinement/refine') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 10, ttl: 60000 } }) + @ApiOperation({ + summary: 'Refine a goal', + description: 'Uses AI to analyze a goal and generate refined SMART goal suggestions with clarifying questions.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Goal refined successfully' }) + @ApiResponse({ status: 400, description: 'Invalid input' }) + @ApiResponse({ status: 429, description: 'Rate limit exceeded' }) + async refineGoal( + @Body() dto: RefineGoalDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: RefinementResult }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.goal || dto.goal.trim().length < 5) { + throw new BadRequestException('Goal must be at least 5 characters'); + } + + const request: RefinementRequest = { + tenantId, + goal: dto.goal.trim(), + context: dto.context, + options: dto.options, + }; + + const result = await this.goalRefinementService.refineGoal(request); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/ai/refinement/quick-analyze + * Quick heuristic analysis without LLM call + */ + @Post('refinement/quick-analyze') + @HttpCode(HttpStatus.OK) + @ApiOperation({ + summary: 'Quick goal analysis', + description: 'Fast heuristic-based analysis without calling the LLM. Returns clarity, specificity, and actionability scores.', + }) + @ApiResponse({ status: 200, description: 'Analysis completed' }) + async quickAnalyze( + @Body() dto: QuickAnalyzeDto, + ): Promise<{ success: boolean; data: QuickAnalysisResult }> { + if (!dto.goal || dto.goal.trim().length < 3) { + throw new BadRequestException('Goal must be at least 3 characters'); + } + + const result = await this.goalRefinementService.quickAnalyze(dto.goal.trim()); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/ai/refinement/decompose + * Decompose a complex goal into sub-goals + */ + @Post('refinement/decompose') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 10, ttl: 60000 } }) + @ApiOperation({ + summary: 'Decompose goal into sub-goals', + description: 'Breaks down a complex goal into smaller, actionable sub-goals using AI.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Goal decomposed successfully' }) + @ApiResponse({ status: 429, description: 'Rate limit exceeded' }) + async decomposeGoal( + @Body() dto: DecomposeGoalDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: Array<{ subGoal: string; order: number; estimatedDuration?: string }> }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.goal || dto.goal.trim().length < 5) { + throw new BadRequestException('Goal must be at least 5 characters'); + } + + const result = await this.goalRefinementService.decomposeGoal( + tenantId, + dto.goal.trim(), + dto.maxSubGoals || 5, + ); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/ai/refinement/history-suggestions + * Get suggestions from historical successful goals + */ + @Post('refinement/history-suggestions') + @HttpCode(HttpStatus.OK) + @ApiOperation({ + summary: 'Get suggestions from history', + description: 'Returns similar successful goals from history based on partial input.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Suggestions retrieved' }) + async getHistorySuggestions( + @Body() dto: HistorySuggestionsDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: Array<{ goal: string; similarity: number; usageCount: number }> }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.partialGoal || dto.partialGoal.trim().length < 3) { + throw new BadRequestException('Partial goal must be at least 3 characters'); + } + + const result = await this.goalRefinementService.getSuggestionsFromHistory( + tenantId, + dto.partialGoal.trim(), + dto.limit || 5, + ); + + return { + success: true, + data: result, + }; + } + + // ========================================================================== + // Template Generation Endpoints + // ========================================================================== + + /** + * POST /api/v1/ai/templates/generate + * Generate templates from historical goals + * Rate limited: 5 per minute (expensive operation) + */ + @Post('templates/generate') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 5, ttl: 60000 } }) + @ApiOperation({ + summary: 'Generate templates from history', + description: 'Analyzes completed goals to automatically generate reusable templates. Identifies patterns and extracts variables.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Templates generated successfully' }) + @ApiResponse({ status: 429, description: 'Rate limit exceeded' }) + async generateTemplates( + @Body() dto: GenerateTemplatesOptionsDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: TemplateGenerationResult }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const request: TemplateGenerationRequest = { + tenantId, + options: { + minGoalsForPattern: dto.minGoalsForPattern, + similarityThreshold: dto.similarityThreshold, + maxTemplates: dto.maxTemplates, + includeChecklist: dto.includeChecklist, + }, + }; + + const result = await this.templateGenerationService.generateTemplatesFromHistory(request); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/ai/templates/from-goal/:goalRunId + * Generate a template from a specific goal run + */ + @Post('templates/from-goal/:goalRunId') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 10, ttl: 60000 } }) + @ApiOperation({ + summary: 'Generate template from goal run', + description: 'Creates a reusable template from a specific completed goal run.', + }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Template generated successfully' }) + @ApiResponse({ status: 404, description: 'Goal run not found' }) + async generateTemplateFromGoal( + @Param('goalRunId') goalRunId: string, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: TemplateCandidate | null }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const result = await this.templateGenerationService.generateTemplateFromGoal(tenantId, goalRunId); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/ai/templates/suggest-variables + * Suggest variables for a goal pattern + */ + @Post('templates/suggest-variables') + @HttpCode(HttpStatus.OK) + @ApiOperation({ + summary: 'Suggest variables for goal', + description: 'Analyzes a goal and suggests variables that could be extracted for templating.', + }) + @ApiResponse({ status: 200, description: 'Variables suggested' }) + async suggestVariables( + @Body() dto: SuggestVariablesDto, + ): Promise<{ success: boolean; data: Array<{ name: string; value: string; type: string; confidence: number }> }> { + if (!dto.goal || dto.goal.trim().length < 5) { + throw new BadRequestException('Goal must be at least 5 characters'); + } + + const result = await this.templateGenerationService.suggestVariables(dto.goal.trim()); + + return { + success: true, + data: result, + }; + } + + // ========================================================================== + // Failure Analysis Endpoints + // ========================================================================== + + /** + * POST /api/v1/ai/failures/analyze + * Analyze a specific failed goal run + * Rate limited: 10 per minute (LLM operation) + */ + @Post('failures/analyze') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 10, ttl: 60000 } }) + @ApiOperation({ + summary: 'Analyze failure', + description: 'Uses AI to analyze a failed goal run, identify root causes, and suggest remediations.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Failure analyzed successfully' }) + @ApiResponse({ status: 404, description: 'Goal run not found' }) + @ApiResponse({ status: 429, description: 'Rate limit exceeded' }) + async analyzeFailure( + @Body() dto: AnalyzeFailureDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: FailureAnalysisResult }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.goalRunId) { + throw new BadRequestException('goalRunId is required'); + } + + const result = await this.failureAnalysisService.analyzeFailure({ + tenantId, + goalRunId: dto.goalRunId, + includeHistory: dto.includeHistory, + }); + + return { + success: true, + data: result, + }; + } + + /** + * GET /api/v1/ai/failures/patterns + * Get failure patterns for the tenant + */ + @Get('failures/patterns') + @ApiOperation({ + summary: 'Get failure patterns', + description: 'Returns clustered failure patterns detected across goal runs.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiQuery({ name: 'days', required: false, description: 'Number of days to analyze' }) + @ApiQuery({ name: 'limit', required: false, description: 'Maximum patterns to return' }) + @ApiResponse({ status: 200, description: 'Patterns retrieved' }) + async getFailurePatterns( + @Query() query: FailurePatternsQueryDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: FailurePattern[] }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const result = await this.failureAnalysisService.getFailurePatterns(tenantId, { + days: query.days ? parseInt(query.days, 10) : 30, + limit: query.limit ? parseInt(query.limit, 10) : 10, + }); + + return { + success: true, + data: result, + }; + } + + /** + * GET /api/v1/ai/failures/trends + * Get failure trends over time + */ + @Get('failures/trends') + @ApiOperation({ + summary: 'Get failure trends', + description: 'Returns failure trends aggregated by time period.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiQuery({ name: 'days', required: false, description: 'Number of days to analyze' }) + @ApiQuery({ name: 'granularity', required: false, enum: ['day', 'week', 'month'] }) + @ApiResponse({ status: 200, description: 'Trends retrieved' }) + async getFailureTrends( + @Query() query: FailureTrendsQueryDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: FailureTrend[] }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const result = await this.failureAnalysisService.getFailureTrends(tenantId, { + days: query.days ? parseInt(query.days, 10) : 30, + granularity: query.granularity, + }); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/ai/failures/predict + * Predict failure risk for a goal + */ + @Post('failures/predict') + @HttpCode(HttpStatus.OK) + @ApiOperation({ + summary: 'Predict failure risk', + description: 'Analyzes historical data to predict potential failure points for a goal before execution.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Prediction completed' }) + async predictFailure( + @Body() dto: PredictFailureDto, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ success: boolean; data: PredictiveAnalysis }> { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.goal || dto.goal.trim().length < 5) { + throw new BadRequestException('Goal must be at least 5 characters'); + } + + const result = await this.failureAnalysisService.predictFailureRisk( + tenantId, + dto.goal.trim(), + dto.constraints, + ); + + return { + success: true, + data: result, + }; + } + + /** + * GET /api/v1/ai/failures/remediations/:category + * Get remediation suggestions for a failure category + */ + @Get('failures/remediations/:category') + @ApiOperation({ + summary: 'Get remediation suggestions', + description: 'Returns common remediation actions for a specific failure category.', + }) + @ApiParam({ + name: 'category', + enum: Object.values(FailureCategory), + description: 'Failure category', + }) + @ApiResponse({ status: 200, description: 'Remediations retrieved' }) + async getRemediations( + @Param('category') category: string, + ): Promise<{ success: boolean; data: Array<{ action: string; successRate: number }> }> { + if (!Object.values(FailureCategory).includes(category as FailureCategory)) { + throw new BadRequestException(`Invalid category. Must be one of: ${Object.values(FailureCategory).join(', ')}`); + } + + const result = await this.failureAnalysisService.getRemediationSuggestions(category as FailureCategory); + + return { + success: true, + data: result, + }; + } + + // ========================================================================== + // Utility Endpoints + // ========================================================================== + + /** + * GET /api/v1/ai/categories + * Get available failure categories + */ + @Get('categories') + @ApiOperation({ + summary: 'Get failure categories', + description: 'Returns all available failure categories for classification.', + }) + @ApiResponse({ status: 200, description: 'Categories retrieved' }) + getFailureCategories(): { success: boolean; data: string[] } { + return { + success: true, + data: Object.values(FailureCategory), + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/analytics.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/analytics.controller.ts new file mode 100644 index 000000000..04d237793 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/analytics.controller.ts @@ -0,0 +1,666 @@ +/** + * Analytics Controller + * v2.0.0: Phase 7 Enhanced Features - Execution Insights + * + * REST API endpoints for analytics and dashboard data. + * Provides access to KPIs, time-series data, and comparative analysis. + * Phase 7: Goal run analytics, template analytics, batch analytics. + */ + +import { + Controller, + Get, + Query, + Param, + HttpCode, + HttpStatus, + Logger, + BadRequestException, +} from '@nestjs/common'; +import { + AnalyticsQueryService, + TimeRange, + KPISummary, + TimeSeriesPoint, + DashboardSummary, +} from '../services/analytics-query.service'; +import { MetricsCollectorService } from '../services/metrics-collector.service'; +import { AggregationPeriod } from '../services/metrics-aggregation.service'; + +// Query DTOs +interface TimeRangeQuery { + range?: string; + start?: string; + end?: string; +} + +interface TimeSeriesQuery extends TimeRangeQuery { + period?: string; + metric?: string; +} + +@Controller('analytics') +export class AnalyticsController { + private readonly logger = new Logger(AnalyticsController.name); + + constructor( + private readonly analyticsQuery: AnalyticsQueryService, + private readonly metricsCollector: MetricsCollectorService, + ) {} + + /** + * Get complete dashboard summary + * GET /api/v1/analytics/dashboard/:tenantId + */ + @Get('dashboard/:tenantId') + @HttpCode(HttpStatus.OK) + async getDashboard( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ): Promise<{ + success: boolean; + data: DashboardSummary; + timeRange: { start: Date; end: Date; range: string }; + }> { + const { range, start, end } = this.parseTimeRange(query); + + const data = await this.analyticsQuery.getDashboardSummary( + tenantId, + range, + start, + end, + ); + + return { + success: true, + data, + timeRange: { + start: start ?? new Date(Date.now() - 24 * 60 * 60 * 1000), + end: end ?? new Date(), + range: query.range ?? '24h', + }, + }; + } + + /** + * Get KPI summary + * GET /api/v1/analytics/kpis/:tenantId + */ + @Get('kpis/:tenantId') + @HttpCode(HttpStatus.OK) + async getKPIs( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ): Promise<{ + success: boolean; + kpis: KPISummary; + }> { + const { range, start, end } = this.parseTimeRange(query); + + const kpis = await this.analyticsQuery.getKPISummary( + tenantId, + range, + start, + end, + ); + + return { + success: true, + kpis, + }; + } + + /** + * Get time-series data for a specific metric + * GET /api/v1/analytics/timeseries/:tenantId + */ + @Get('timeseries/:tenantId') + @HttpCode(HttpStatus.OK) + async getTimeSeries( + @Param('tenantId') tenantId: string, + @Query() query: TimeSeriesQuery, + ): Promise<{ + success: boolean; + metric: string; + period: string; + data: TimeSeriesPoint[]; + }> { + const { range, start, end } = this.parseTimeRange(query); + const metric = query.metric ?? 'workflow_execution_duration'; + const period = this.parsePeriod(query.period ?? '5m'); + + const data = await this.analyticsQuery.getTimeSeries( + tenantId, + metric, + range, + period, + start, + end, + ); + + return { + success: true, + metric, + period, + data, + }; + } + + /** + * Get execution trends over time + * GET /api/v1/analytics/executions/:tenantId + */ + @Get('executions/:tenantId') + @HttpCode(HttpStatus.OK) + async getExecutions( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery & { period?: string }, + ): Promise<{ + success: boolean; + data: TimeSeriesPoint[]; + }> { + const { range, start, end } = this.parseTimeRange(query); + const period = this.parsePeriod(query.period ?? '1h'); + + const data = await this.analyticsQuery.getExecutionTrends( + tenantId, + range, + period, + start, + end, + ); + + return { + success: true, + data, + }; + } + + /** + * Get top workflows by execution count + * GET /api/v1/analytics/top-workflows/:tenantId + */ + @Get('top-workflows/:tenantId') + @HttpCode(HttpStatus.OK) + async getTopWorkflows( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery & { limit?: string }, + ): Promise<{ + success: boolean; + workflows: Array<{ + workflowName: string; + templateId: string | null; + count: number; + successRate: number; + avgDurationMs: number; + }>; + }> { + const { range, start, end } = this.parseTimeRange(query); + const limit = parseInt(query.limit ?? '10', 10); + + const workflows = await this.analyticsQuery.getTopWorkflows( + tenantId, + range, + limit, + start, + end, + ); + + return { + success: true, + workflows, + }; + } + + /** + * Get top agents by task completion + * GET /api/v1/analytics/top-agents/:tenantId + */ + @Get('top-agents/:tenantId') + @HttpCode(HttpStatus.OK) + async getTopAgents( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery & { limit?: string }, + ): Promise<{ + success: boolean; + agents: Array<{ + agentId: string; + agentName: string; + tasksCompleted: number; + tasksFailed: number; + avgDurationMs: number; + }>; + }> { + const { range, start, end } = this.parseTimeRange(query); + const limit = parseInt(query.limit ?? '10', 10); + + const agents = await this.analyticsQuery.getTopAgents( + tenantId, + range, + limit, + start, + end, + ); + + return { + success: true, + agents, + }; + } + + /** + * Get recent errors + * GET /api/v1/analytics/errors/:tenantId + */ + @Get('errors/:tenantId') + @HttpCode(HttpStatus.OK) + async getRecentErrors( + @Param('tenantId') tenantId: string, + @Query('limit') limitStr?: string, + ): Promise<{ + success: boolean; + errors: Array<{ + workflowRunId: string; + workflowName: string; + errorType: string | null; + errorMessage: string | null; + timestamp: Date; + }>; + }> { + const limit = parseInt(limitStr ?? '10', 10); + + const errors = await this.analyticsQuery.getRecentErrors(tenantId, limit); + + return { + success: true, + errors, + }; + } + + /** + * Get period-over-period comparison + * GET /api/v1/analytics/comparison/:tenantId + */ + @Get('comparison/:tenantId') + @HttpCode(HttpStatus.OK) + async getComparison( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ): Promise<{ + success: boolean; + current: KPISummary; + previous: KPISummary; + changes: { + executionsChange: number; + successRateChange: number; + durationChange: number; + }; + }> { + const { range, start, end } = this.parseTimeRange(query); + + const comparison = await this.analyticsQuery.getComparison( + tenantId, + range, + start, + end, + ); + + return { + success: true, + ...comparison, + }; + } + + /** + * Get real-time metrics summary + * GET /api/v1/analytics/realtime/:tenantId + */ + @Get('realtime/:tenantId') + @HttpCode(HttpStatus.OK) + async getRealtimeMetrics( + @Param('tenantId') tenantId: string, + @Query('minutes') minutesStr?: string, + ): Promise<{ + success: boolean; + data: { + workflowsStarted: number; + workflowsCompleted: number; + workflowsFailed: number; + avgDurationMs: number; + stepsCompleted: number; + stepsFailed: number; + }; + timestamp: Date; + }> { + const minutes = parseInt(minutesStr ?? '5', 10); + + const data = await this.metricsCollector.getRealtimeSummary( + tenantId, + minutes, + ); + + return { + success: true, + data, + timestamp: new Date(), + }; + } + + /** + * Get workflow execution history + * GET /api/v1/analytics/history/:tenantId + */ + @Get('history/:tenantId') + @HttpCode(HttpStatus.OK) + async getExecutionHistory( + @Param('tenantId') tenantId: string, + @Query() + query: TimeRangeQuery & { + status?: string; + workflowName?: string; + limit?: string; + offset?: string; + }, + ): Promise<{ + success: boolean; + executions: any[]; + total: number; + pagination: { + limit: number; + offset: number; + hasMore: boolean; + }; + }> { + const { start, end } = this.parseTimeRange(query); + const limit = parseInt(query.limit ?? '50', 10); + const offset = parseInt(query.offset ?? '0', 10); + + const result = await this.metricsCollector.getWorkflowHistory(tenantId, { + limit, + offset, + status: query.status, + workflowName: query.workflowName, + since: start, + until: end, + }); + + return { + success: true, + executions: result.executions, + total: result.total, + pagination: { + limit, + offset, + hasMore: offset + result.executions.length < result.total, + }, + }; + } + + /** + * Get available metrics + * GET /api/v1/analytics/metrics + */ + @Get('metrics') + @HttpCode(HttpStatus.OK) + getAvailableMetrics(): { + success: boolean; + metrics: Array<{ + name: string; + description: string; + type: string; + unit?: string; + }>; + } { + return { + success: true, + metrics: [ + { + name: 'workflow_execution_duration', + description: 'Workflow execution duration in milliseconds', + type: 'gauge', + unit: 'ms', + }, + { + name: 'workflow_success_rate', + description: 'Percentage of successful workflow executions', + type: 'gauge', + unit: '%', + }, + { + name: 'step_execution_duration', + description: 'Step execution duration in milliseconds', + type: 'gauge', + unit: 'ms', + }, + { + name: 'workflow_executions_total', + description: 'Total number of workflow executions', + type: 'counter', + }, + { + name: 'step_executions_total', + description: 'Total number of step executions', + type: 'counter', + }, + ], + }; + } + + // ========================================================================= + // Phase 7: Goal Run Analytics & Execution Insights + // ========================================================================= + + /** + * Get goal run KPIs + * GET /api/v1/analytics/goal-runs/:tenantId + */ + @Get('goal-runs/:tenantId') + @HttpCode(HttpStatus.OK) + async getGoalRunKPIs( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ) { + const { range, start, end } = this.parseTimeRange(query); + + const kpis = await this.analyticsQuery.getGoalRunKPIs( + tenantId, + range, + start, + end, + ); + + return { + success: true, + data: kpis, + }; + } + + /** + * Get goal run trends + * GET /api/v1/analytics/goal-runs/:tenantId/trends + */ + @Get('goal-runs/:tenantId/trends') + @HttpCode(HttpStatus.OK) + async getGoalRunTrends( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ) { + const { range, start, end } = this.parseTimeRange(query); + + const trends = await this.analyticsQuery.getGoalRunTrends( + tenantId, + range, + start, + end, + ); + + return { + success: true, + data: trends, + }; + } + + /** + * Get top templates by usage + * GET /api/v1/analytics/templates/:tenantId + */ + @Get('templates/:tenantId') + @HttpCode(HttpStatus.OK) + async getTopTemplates( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery & { limit?: string }, + ) { + const { range, start, end } = this.parseTimeRange(query); + const limit = parseInt(query.limit ?? '10', 10); + + const templates = await this.analyticsQuery.getTopTemplates( + tenantId, + range, + limit, + start, + end, + ); + + return { + success: true, + data: templates, + }; + } + + /** + * Get batch execution statistics + * GET /api/v1/analytics/batches/:tenantId + */ + @Get('batches/:tenantId') + @HttpCode(HttpStatus.OK) + async getBatchStats( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ) { + const { range, start, end } = this.parseTimeRange(query); + + const stats = await this.analyticsQuery.getBatchStats( + tenantId, + range, + start, + end, + ); + + return { + success: true, + data: stats, + }; + } + + /** + * Get goal run phase distribution (active runs only) + * GET /api/v1/analytics/phases/:tenantId + */ + @Get('phases/:tenantId') + @HttpCode(HttpStatus.OK) + async getPhaseDistribution(@Param('tenantId') tenantId: string) { + const distribution = await this.analyticsQuery.getPhaseDistribution(tenantId); + + return { + success: true, + data: distribution, + }; + } + + /** + * Get complete execution insights dashboard + * GET /api/v1/analytics/insights/:tenantId + */ + @Get('insights/:tenantId') + @HttpCode(HttpStatus.OK) + async getExecutionInsights( + @Param('tenantId') tenantId: string, + @Query() query: TimeRangeQuery, + ) { + const { range, start, end } = this.parseTimeRange(query); + + const insights = await this.analyticsQuery.getExecutionInsights( + tenantId, + range, + start, + end, + ); + + return { + success: true, + data: insights, + timeRange: { + start: start ?? new Date(Date.now() - 24 * 60 * 60 * 1000), + end: end ?? new Date(), + range: query.range ?? '24h', + }, + }; + } + + // ========================================================================= + // Utility Methods + // ========================================================================= + + private parseTimeRange(query: TimeRangeQuery): { + range: TimeRange; + start?: Date; + end?: Date; + } { + let range: TimeRange; + + switch (query.range) { + case '1h': + range = TimeRange.LAST_HOUR; + break; + case '6h': + range = TimeRange.LAST_6_HOURS; + break; + case '24h': + range = TimeRange.LAST_24_HOURS; + break; + case '7d': + range = TimeRange.LAST_7_DAYS; + break; + case '30d': + range = TimeRange.LAST_30_DAYS; + break; + case 'custom': + range = TimeRange.CUSTOM; + break; + default: + range = TimeRange.LAST_24_HOURS; + } + + let start: Date | undefined; + let end: Date | undefined; + + if (query.start) { + start = new Date(query.start); + if (isNaN(start.getTime())) { + throw new BadRequestException('Invalid start date'); + } + } + + if (query.end) { + end = new Date(query.end); + if (isNaN(end.getTime())) { + throw new BadRequestException('Invalid end date'); + } + } + + return { range, start, end }; + } + + private parsePeriod(period: string): AggregationPeriod { + switch (period) { + case '1m': + case '5m': + case '15m': + case '1h': + case '1d': + return period as AggregationPeriod; + default: + return '5m'; + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/approval.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/approval.controller.ts new file mode 100644 index 000000000..b77e354a6 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/approval.controller.ts @@ -0,0 +1,411 @@ +/** + * Approval Controller + * v1.0.1 M5: API endpoints for managing high-risk action approvals + * + * Endpoints: + * - GET /api/v1/approvals List pending approvals + * - GET /api/v1/approvals/count Get count of approvals by status + * - GET /api/v1/approvals/:id Get approval details + * - POST /api/v1/approvals/:id/approve Approve action + * - POST /api/v1/approvals/:id/reject Reject action + * - GET /api/v1/approvals/stats Get approval statistics + */ + +import { + Controller, + Get, + Post, + Param, + Body, + Query, + HttpException, + HttpStatus, + Logger, + Headers, +} from '@nestjs/common'; +import { ApprovalService, ApprovalStatus } from '../services/approval.service'; +import { IdempotencyService } from '../services/idempotency.service'; +import { HighRiskService } from '../services/high-risk.service'; + +/** + * DTOs for approval endpoints + */ +interface ApproveActionDto { + reason?: string; +} + +interface RejectActionDto { + reason: string; +} + +interface RequestApprovalDto { + nodeRunId: string; + workspaceId: string; + tenantId: string; + toolName: string; + toolParams: Record; + currentUrl?: string; + aiReasoning?: string; +} + +interface ListApprovalsQuery { + status?: 'PENDING' | 'APPROVED' | 'REJECTED' | 'EXPIRED'; + limit?: string; + offset?: string; +} + +@Controller('approvals') +export class ApprovalController { + private readonly logger = new Logger(ApprovalController.name); + + constructor( + private readonly approvalService: ApprovalService, + private readonly idempotencyService: IdempotencyService, + private readonly highRiskService: HighRiskService, + ) {} + + /** + * POST /api/v1/approvals/request + * Request approval for a high-risk action (called by agent) + */ + @Post('request') + async requestApproval(@Body() body: RequestApprovalDto) { + if (!body.nodeRunId || !body.toolName) { + throw new HttpException( + 'nodeRunId and toolName are required', + HttpStatus.BAD_REQUEST, + ); + } + + // Classify the action + const classification = this.highRiskService.classifyAction({ + toolName: body.toolName, + toolParams: body.toolParams, + currentUrl: body.currentUrl, + nodeRunId: body.nodeRunId, + workspaceId: body.workspaceId, + tenantId: body.tenantId, + }); + + // Check if approval is actually required + if (!classification.requiresApproval) { + return { + success: true, + requiresApproval: false, + message: 'Action does not require approval', + riskLevel: classification.riskLevel, + }; + } + + try { + // Create approval request + const approval = await this.approvalService.createApprovalRequest({ + nodeRunId: body.nodeRunId, + actionContext: { + toolName: body.toolName, + toolParams: body.toolParams, + currentUrl: body.currentUrl, + nodeRunId: body.nodeRunId, + workspaceId: body.workspaceId, + tenantId: body.tenantId, + }, + classification, + aiReasoning: body.aiReasoning, + }); + + this.logger.log( + `Created approval request ${approval.id} for ${body.toolName} (risk: ${classification.riskLevel})`, + ); + + return { + success: true, + requiresApproval: true, + approval: this.formatApproval(approval), + message: 'Action requires human approval before execution', + }; + } catch (error: any) { + this.logger.error(`Failed to create approval request: ${error.message}`); + throw new HttpException( + `Failed to create approval request: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/approvals + * List pending approvals for a tenant + */ + @Get() + async listApprovals( + @Query() query: ListApprovalsQuery, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const limit = parseInt(query.limit || '20', 10); + const offset = parseInt(query.offset || '0', 10); + + const { approvals, total } = await this.approvalService.getPendingApprovals(tenantId, { + limit, + offset, + }); + + return { + success: true, + approvals: approvals.map((a) => this.formatApproval(a)), + pagination: { + total, + limit, + offset, + hasMore: offset + approvals.length < total, + }, + }; + } + + /** + * GET /api/v1/approvals/count + * Get count of approvals by status (defaults to PENDING) + * v1.0.1: Added to support frontend badge display + */ + @Get('count') + async getApprovalsCount( + @Query('status') status?: 'PENDING' | 'APPROVED' | 'REJECTED' | 'EXPIRED', + @Headers('X-Tenant-Id') tenantId?: string, + ) { + // Default to PENDING status if not specified + const filterStatus = status || 'PENDING'; + + // Note: For count endpoint, we allow requests without tenant ID + // to support global badge counts. If tenant ID is provided, + // we filter by tenant. + const count = await this.approvalService.getApprovalCount( + filterStatus, + tenantId, + ); + + return { + success: true, + count, + }; + } + + /** + * GET /api/v1/approvals/:id + * Get approval details by ID + */ + @Get(':id') + async getApproval(@Param('id') id: string) { + const approval = await this.approvalService.getApprovalById(id); + + if (!approval) { + throw new HttpException('Approval not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + approval: this.formatApprovalDetailed(approval), + }; + } + + /** + * POST /api/v1/approvals/:id/approve + * Approve a high-risk action + */ + @Post(':id/approve') + async approveAction( + @Param('id') id: string, + @Body() body: ApproveActionDto, + @Headers('X-User-Id') userId?: string, + ) { + if (!userId) { + throw new HttpException('X-User-Id header required for approval', HttpStatus.BAD_REQUEST); + } + + try { + const approval = await this.approvalService.processDecision({ + approvalId: id, + approved: true, + reviewerId: userId, + reason: body.reason, + }); + + this.logger.log(`Approval ${id} approved by ${userId}`); + + return { + success: true, + approval: this.formatApproval(approval), + message: 'Action approved successfully. The workflow will resume execution.', + }; + } catch (error: any) { + this.logger.error(`Failed to approve ${id}: ${error.message}`); + + if (error.message.includes('not pending')) { + throw new HttpException(error.message, HttpStatus.CONFLICT); + } + if (error.message.includes('expired')) { + throw new HttpException(error.message, HttpStatus.GONE); + } + if (error.message.includes('not found')) { + throw new HttpException(error.message, HttpStatus.NOT_FOUND); + } + + throw new HttpException( + `Failed to approve action: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/approvals/:id/reject + * Reject a high-risk action + */ + @Post(':id/reject') + async rejectAction( + @Param('id') id: string, + @Body() body: RejectActionDto, + @Headers('X-User-Id') userId?: string, + ) { + if (!userId) { + throw new HttpException('X-User-Id header required for rejection', HttpStatus.BAD_REQUEST); + } + + if (!body.reason) { + throw new HttpException('Rejection reason is required', HttpStatus.BAD_REQUEST); + } + + try { + const approval = await this.approvalService.processDecision({ + approvalId: id, + approved: false, + reviewerId: userId, + reason: body.reason, + }); + + this.logger.log(`Approval ${id} rejected by ${userId}: ${body.reason}`); + + return { + success: true, + approval: this.formatApproval(approval), + message: 'Action rejected. The workflow node will be marked as failed.', + }; + } catch (error: any) { + this.logger.error(`Failed to reject ${id}: ${error.message}`); + + if (error.message.includes('not pending')) { + throw new HttpException(error.message, HttpStatus.CONFLICT); + } + if (error.message.includes('expired')) { + throw new HttpException(error.message, HttpStatus.GONE); + } + if (error.message.includes('not found')) { + throw new HttpException(error.message, HttpStatus.NOT_FOUND); + } + + throw new HttpException( + `Failed to reject action: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/approvals/stats + * Get approval and idempotency statistics + */ + @Get('stats') + async getStats(@Headers('X-Tenant-Id') tenantId?: string) { + // Expire old approvals first + const expiredCount = await this.approvalService.expireOldApprovals(); + + // Get idempotency stats + const idempotencyStats = await this.idempotencyService.getStats(); + + // Get high-risk tool list + const highRiskTools = this.highRiskService.getHighRiskToolNames(); + + return { + success: true, + stats: { + approvals: { + expiredThisCheck: expiredCount, + }, + idempotency: idempotencyStats, + configuration: { + highRiskTools, + }, + }, + }; + } + + /** + * POST /api/v1/approvals/cleanup + * Clean up expired records (admin endpoint) + */ + @Post('cleanup') + async cleanup() { + const expiredApprovals = await this.approvalService.expireOldApprovals(); + const expiredIdempotency = await this.idempotencyService.cleanupExpired(); + + return { + success: true, + cleanup: { + expiredApprovals, + expiredIdempotencyRecords: expiredIdempotency, + }, + }; + } + + /** + * Format approval for list response + */ + private formatApproval(approval: any) { + const preview = approval.previewData || {}; + + return { + id: approval.id, + status: approval.status, + toolName: approval.toolName, + summary: preview.summary || `Execute ${approval.toolName}`, + category: preview.category || 'Other', + riskLevel: preview.riskLevel || 'UNKNOWN', + recipient: preview.recipient, + subject: preview.subject, + expiresAt: approval.expiresAt, + createdAt: approval.createdAt, + decision: approval.status !== ApprovalStatus.PENDING + ? { + by: approval.approvedBy || approval.rejectedBy, + at: approval.approvedAt || approval.rejectedAt, + reason: approval.reason, + } + : undefined, + }; + } + + /** + * Format approval for detailed response + */ + private formatApprovalDetailed(approval: any) { + const preview = approval.previewData || {}; + + return { + ...this.formatApproval(approval), + nodeRunId: approval.nodeRunId, + actionHash: approval.actionHash, + toolParams: approval.toolParams, + preview: { + bodyPreview: preview.bodyPreview, + context: preview.context, + aiReasoning: preview.aiReasoning, + confidenceScore: preview.confidenceScore, + riskReason: preview.riskReason, + workspaceId: preview.workspaceId, + currentUrl: preview.currentUrl, + }, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/audit.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/audit.controller.ts new file mode 100644 index 000000000..0fef72edc --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/audit.controller.ts @@ -0,0 +1,406 @@ +/** + * Audit Controller + * Post-M5 Enhancement: API endpoints for querying audit logs + * + * Endpoints: + * - GET /api/v1/audit List audit logs with filtering + * - GET /api/v1/audit/approvals/:id Get audit trail for specific approval + * - GET /api/v1/audit/export Export audit logs (CSV/JSON) + * - GET /api/v1/audit/stats Get audit log statistics + */ + +import { + Controller, + Get, + Param, + Query, + HttpException, + HttpStatus, + Logger, + Headers, + Res, +} from '@nestjs/common'; +import { Response } from 'express'; +import { AuditService, AuditEventType } from '../services/audit.service'; + +/** + * Query parameters for listing audit logs + */ +interface ListAuditLogsQuery { + eventTypes?: string; + resourceType?: string; + resourceId?: string; + actorId?: string; + startDate?: string; + endDate?: string; + limit?: string; + offset?: string; +} + +/** + * Query parameters for export + */ +interface ExportQuery extends ListAuditLogsQuery { + format?: 'json' | 'csv'; +} + +/** + * Query parameters for stats + */ +interface StatsQuery { + startDate?: string; + endDate?: string; +} + +@Controller('audit') +export class AuditController { + private readonly logger = new Logger(AuditController.name); + + constructor(private readonly auditService: AuditService) {} + + /** + * GET /api/v1/audit + * List audit logs with filtering + */ + @Get() + async listAuditLogs( + @Query() query: ListAuditLogsQuery, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const limit = parseInt(query.limit || '50', 10); + const offset = parseInt(query.offset || '0', 10); + + // Parse event types if provided + let eventTypes: AuditEventType[] | undefined; + if (query.eventTypes) { + eventTypes = query.eventTypes.split(',') as AuditEventType[]; + // Validate event types + const validTypes = Object.values(AuditEventType); + const invalidTypes = eventTypes.filter((t) => !validTypes.includes(t)); + if (invalidTypes.length > 0) { + throw new HttpException( + `Invalid event types: ${invalidTypes.join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + } + + // Parse dates if provided + let startDate: Date | undefined; + let endDate: Date | undefined; + + if (query.startDate) { + startDate = new Date(query.startDate); + if (isNaN(startDate.getTime())) { + throw new HttpException('Invalid startDate format', HttpStatus.BAD_REQUEST); + } + } + + if (query.endDate) { + endDate = new Date(query.endDate); + if (isNaN(endDate.getTime())) { + throw new HttpException('Invalid endDate format', HttpStatus.BAD_REQUEST); + } + } + + try { + const { logs, total } = await this.auditService.query({ + tenantId, + eventTypes, + resourceType: query.resourceType, + resourceId: query.resourceId, + actorId: query.actorId, + startDate, + endDate, + limit, + offset, + }); + + return { + success: true, + logs: logs.map((log) => this.formatAuditLog(log)), + pagination: { + total, + limit, + offset, + hasMore: offset + logs.length < total, + }, + }; + } catch (error: any) { + this.logger.error(`Failed to query audit logs: ${error.message}`); + throw new HttpException( + `Failed to query audit logs: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/audit/approvals/:id + * Get audit trail for a specific approval + */ + @Get('approvals/:id') + async getApprovalAuditTrail( + @Param('id') approvalId: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + try { + const logs = await this.auditService.getApprovalAuditTrail(approvalId, tenantId); + + return { + success: true, + approvalId, + auditTrail: logs.map((log) => this.formatAuditLog(log)), + summary: this.generateAuditSummary(logs), + }; + } catch (error: any) { + this.logger.error(`Failed to get approval audit trail: ${error.message}`); + throw new HttpException( + `Failed to get audit trail: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/audit/export + * Export audit logs in CSV or JSON format + */ + @Get('export') + async exportAuditLogs( + @Query() query: ExportQuery, + @Headers('X-Tenant-Id') tenantId?: string, + @Res() res?: Response, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const format = query.format || 'json'; + + // Parse event types if provided + let eventTypes: AuditEventType[] | undefined; + if (query.eventTypes) { + eventTypes = query.eventTypes.split(',') as AuditEventType[]; + } + + // Parse dates if provided + let startDate: Date | undefined; + let endDate: Date | undefined; + + if (query.startDate) { + startDate = new Date(query.startDate); + } + + if (query.endDate) { + endDate = new Date(query.endDate); + } + + try { + const data = await this.auditService.exportLogs({ + tenantId, + eventTypes, + resourceType: query.resourceType, + resourceId: query.resourceId, + actorId: query.actorId, + startDate, + endDate, + format, + }); + + // Set response headers for download + const timestamp = new Date().toISOString().split('T')[0]; + const filename = `audit-logs-${timestamp}.${format}`; + + if (res) { + res.setHeader('Content-Type', format === 'csv' ? 'text/csv' : 'application/json'); + res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); + res.send(data); + } + + return { success: true }; + } catch (error: any) { + this.logger.error(`Failed to export audit logs: ${error.message}`); + throw new HttpException( + `Failed to export audit logs: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/audit/stats + * Get audit log statistics + */ + @Get('stats') + async getStats( + @Query() query: StatsQuery, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Parse dates if provided + let startDate: Date | undefined; + let endDate: Date | undefined; + + if (query.startDate) { + startDate = new Date(query.startDate); + if (isNaN(startDate.getTime())) { + throw new HttpException('Invalid startDate format', HttpStatus.BAD_REQUEST); + } + } + + if (query.endDate) { + endDate = new Date(query.endDate); + if (isNaN(endDate.getTime())) { + throw new HttpException('Invalid endDate format', HttpStatus.BAD_REQUEST); + } + } + + try { + const stats = await this.auditService.getStats(tenantId, startDate, endDate); + + return { + success: true, + stats: { + ...stats, + dateRange: { + start: startDate?.toISOString() || 'all time', + end: endDate?.toISOString() || 'now', + }, + }, + }; + } catch (error: any) { + this.logger.error(`Failed to get audit stats: ${error.message}`); + throw new HttpException( + `Failed to get audit stats: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/audit/event-types + * Get available audit event types + */ + @Get('event-types') + getEventTypes() { + return { + success: true, + eventTypes: Object.values(AuditEventType).map((type) => ({ + type, + description: this.getEventDescription(type), + category: this.getEventCategory(type), + })), + }; + } + + /** + * Format audit log entry for response + */ + private formatAuditLog(log: any) { + return { + id: log.id, + timestamp: log.timestamp, + eventType: log.eventType, + actor: { + type: log.actor.type, + id: log.actor.id, + email: log.actor.email, + name: log.actor.name, + }, + resource: { + type: log.resource.type, + id: log.resource.id, + name: log.resource.name, + }, + action: { + type: log.action.type, + reason: log.action.reason, + previousState: log.action.previousState, + newState: log.action.newState, + }, + context: { + workflowRunId: log.context.workflowRunId, + nodeRunId: log.context.nodeRunId, + }, + }; + } + + /** + * Generate summary of audit trail + */ + private generateAuditSummary(logs: any[]) { + if (logs.length === 0) { + return { totalEvents: 0 }; + } + + const firstEvent = logs[0]; + const lastEvent = logs[logs.length - 1]; + + return { + totalEvents: logs.length, + firstEvent: { + type: firstEvent.eventType, + timestamp: firstEvent.timestamp, + }, + lastEvent: { + type: lastEvent.eventType, + timestamp: lastEvent.timestamp, + }, + eventTypes: [...new Set(logs.map((l) => l.eventType))], + actors: [...new Set(logs.map((l) => l.actor.id).filter(Boolean))], + }; + } + + /** + * Get human-readable event description + */ + private getEventDescription(eventType: AuditEventType): string { + const descriptions: Record = { + [AuditEventType.APPROVAL_REQUESTED]: 'A high-risk action requested approval', + [AuditEventType.APPROVAL_VIEWED]: 'An approver viewed the approval request', + [AuditEventType.APPROVAL_APPROVED]: 'The action was approved', + [AuditEventType.APPROVAL_REJECTED]: 'The action was rejected', + [AuditEventType.APPROVAL_EXPIRED]: 'The approval request expired', + [AuditEventType.APPROVAL_EXECUTED]: 'The approved action was executed', + [AuditEventType.WEBHOOK_SENT]: 'A webhook notification was delivered', + [AuditEventType.WEBHOOK_FAILED]: 'A webhook notification failed', + [AuditEventType.USER_PROMPT_CREATED]: 'An external input request was created', + [AuditEventType.USER_PROMPT_RESOLVED]: 'An external input request was resolved', + [AuditEventType.USER_PROMPT_CANCELLED]: 'An external input request was cancelled', + [AuditEventType.USER_PROMPT_EXPIRED]: 'An external input request expired', + [AuditEventType.WEBHOOK_CREATED]: 'A webhook configuration was created', + [AuditEventType.WEBHOOK_UPDATED]: 'A webhook configuration was updated', + [AuditEventType.WEBHOOK_DELETED]: 'A webhook configuration was deleted', + [AuditEventType.WEBHOOK_SECRET_ROTATED]: 'A webhook secret was rotated', + }; + return descriptions[eventType] || 'Unknown event type'; + } + + /** + * Get event category for grouping + */ + private getEventCategory(eventType: AuditEventType): string { + if (eventType.startsWith('APPROVAL_')) { + return 'approval'; + } + if (eventType.startsWith('USER_PROMPT_')) { + return 'prompt'; + } + if (eventType.startsWith('WEBHOOK_')) { + return 'webhook'; + } + return 'other'; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/batch.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/batch.controller.ts new file mode 100644 index 000000000..4214fa503 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/batch.controller.ts @@ -0,0 +1,432 @@ +/** + * Batch Controller + * Phase 7: Enhanced Features + * + * REST API endpoints for batch goal execution: + * - Create and manage batches + * - Start/stop batch execution + * - Monitor batch progress + * - Retry failed items + */ + +import { + Controller, + Get, + Post, + Body, + Param, + Query, + HttpCode, + HttpStatus, + Headers, + BadRequestException, +} from '@nestjs/common'; +import { Throttle } from '@nestjs/throttler'; +import { + ApiTags, + ApiOperation, + ApiResponse, + ApiParam, + ApiHeader, + ApiProperty, +} from '@nestjs/swagger'; +import { + IsString, + IsOptional, + IsBoolean, + IsObject, + IsArray, + IsEnum, + IsNumber, + MinLength, + Min, + Max, + ValidateNested, + ArrayMinSize, +} from 'class-validator'; +import { Type } from 'class-transformer'; +import { + BatchService, + CreateBatchInput, + BatchGoalInput, + BatchFilters, +} from '../services/batch.service'; + +// Execution mode enum for validation +enum BatchExecutionMode { + PARALLEL = 'PARALLEL', + SEQUENTIAL = 'SEQUENTIAL', +} + +// ============================================================================ +// DTOs with class-validator decorators for ValidationPipe compatibility +// ============================================================================ + +/** + * DTO for a single goal in a batch + */ +class BatchGoalDto { + @ApiProperty({ + required: false, + description: 'Goal text (required if templateId not provided)', + example: 'Deploy api-gateway to production using rolling strategy', + minLength: 10, + }) + @IsOptional() + @IsString() + @MinLength(10, { message: 'Goal must be at least 10 characters' }) + goal?: string; + + @ApiProperty({ required: false, description: 'Execution constraints for this goal' }) + @IsOptional() + @IsObject() + constraints?: any; + + @ApiProperty({ required: false, description: 'Template ID to use (alternative to goal text)' }) + @IsOptional() + @IsString() + templateId?: string; + + @ApiProperty({ + required: false, + description: 'Variable values if using a template', + example: { service_name: 'api-gateway', environment: 'production' }, + }) + @IsOptional() + @IsObject() + variableValues?: Record; +} + +/** + * DTO for creating a new batch + */ +class CreateBatchDto { + @ApiProperty({ description: 'Batch name', example: 'Weekly Deployment Batch', minLength: 3 }) + @IsString() + @MinLength(3, { message: 'Batch name must be at least 3 characters' }) + name!: string; + + @ApiProperty({ required: false, description: 'Batch description' }) + @IsOptional() + @IsString() + description?: string; + + @ApiProperty({ + required: false, + enum: ['PARALLEL', 'SEQUENTIAL'], + description: 'Execution mode', + default: 'PARALLEL', + }) + @IsOptional() + @IsEnum(BatchExecutionMode) + executionMode?: 'PARALLEL' | 'SEQUENTIAL'; + + @ApiProperty({ + required: false, + description: 'Maximum concurrent goal executions (for PARALLEL mode)', + default: 5, + minimum: 1, + maximum: 20, + }) + @IsOptional() + @IsNumber() + @Min(1) + @Max(20) + maxConcurrency?: number; + + @ApiProperty({ + required: false, + description: 'Stop batch execution if any goal fails', + default: false, + }) + @IsOptional() + @IsBoolean() + stopOnFailure?: boolean; + + @ApiProperty({ type: [BatchGoalDto], description: 'List of goals to execute', minItems: 1 }) + @IsArray() + @ArrayMinSize(1, { message: 'Batch must contain at least one goal' }) + @ValidateNested({ each: true }) + @Type(() => BatchGoalDto) + goals!: BatchGoalDto[]; +} + +/** + * DTO for filtering batch list + */ +class BatchFiltersDto { + @ApiProperty({ required: false, description: 'Filter by batch status' }) + @IsOptional() + @IsString() + status?: string; + + @ApiProperty({ required: false, description: 'Page number', default: '1' }) + @IsOptional() + @IsString() + page?: string; + + @ApiProperty({ required: false, description: 'Page size', default: '20' }) + @IsOptional() + @IsString() + pageSize?: string; +} + +@ApiTags('batches') +// v5.11.3: Removed deprecated api/v1/batches backward compatibility prefix (was scheduled for v5.6.0) +@Controller('batches') +export class BatchController { + constructor(private batchService: BatchService) {} + + /** + * POST /api/v1/batches + * Create a new batch + * Rate limited: 3 per minute (batch creation is resource-intensive) + */ + @Post() + @HttpCode(HttpStatus.CREATED) + @Throttle({ default: { limit: 3, ttl: 60000 } }) + @ApiOperation({ + summary: 'Create a new batch', + description: 'Creates a batch of goals for parallel or sequential execution. Rate limited to 3 requests per minute.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 201, description: 'Batch created successfully' }) + @ApiResponse({ status: 400, description: 'Invalid input' }) + @ApiResponse({ status: 429, description: 'Rate limit exceeded' }) + async createBatch( + @Body() dto: CreateBatchDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.name || dto.name.trim().length < 3) { + throw new BadRequestException('Batch name must be at least 3 characters'); + } + + if (!dto.goals || dto.goals.length === 0) { + throw new BadRequestException('Batch must contain at least one goal'); + } + + // Validate goals + for (let i = 0; i < dto.goals.length; i++) { + const goal = dto.goals[i]; + if (!goal.templateId && (!goal.goal || goal.goal.trim().length < 10)) { + throw new BadRequestException( + `Goal ${i + 1} must be at least 10 characters or specify a templateId`, + ); + } + } + + const goals: BatchGoalInput[] = dto.goals.map((g) => ({ + goal: g.goal || '', // Will be populated from template if templateId is provided + constraints: g.constraints, + templateId: g.templateId, + variableValues: g.variableValues, + })); + + const input: CreateBatchInput = { + tenantId, + name: dto.name.trim(), + description: dto.description, + executionMode: dto.executionMode, + maxConcurrency: dto.maxConcurrency, + stopOnFailure: dto.stopOnFailure, + goals, + }; + + const batch = await this.batchService.create(input); + + return { + success: true, + data: batch, + }; + } + + /** + * GET /api/v1/batches + * List batches for tenant + */ + @Get() + @ApiOperation({ summary: 'List batches', description: 'Returns paginated list of batches for the tenant.' }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Batches retrieved successfully' }) + async listBatches( + @Query() query: BatchFiltersDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const filters: BatchFilters = { + status: query.status, + page: query.page ? parseInt(query.page, 10) : 1, + pageSize: query.pageSize ? parseInt(query.pageSize, 10) : 20, + }; + + const result = await this.batchService.findByTenant(tenantId, filters); + + return { + success: true, + ...result, + }; + } + + /** + * GET /api/v1/batches/:id + * Get batch by ID + */ + @Get(':id') + @ApiOperation({ summary: 'Get batch by ID', description: 'Returns a batch with all its goal items.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiResponse({ status: 200, description: 'Batch retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Batch not found' }) + async getBatch(@Param('id') id: string) { + const batch = await this.batchService.findByIdWithItems(id); + + return { + success: true, + data: batch, + }; + } + + /** + * GET /api/v1/batches/:id/items + * Get batch items + */ + @Get(':id/items') + @ApiOperation({ summary: 'Get batch items', description: 'Returns all goal items in the batch.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiResponse({ status: 200, description: 'Items retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Batch not found' }) + async getBatchItems(@Param('id') id: string) { + const batch = await this.batchService.findByIdWithItems(id); + + return { + success: true, + data: batch.items, + }; + } + + /** + * GET /api/v1/batches/:id/items/:itemId + * Get specific batch item + */ + @Get(':id/items/:itemId') + @ApiOperation({ summary: 'Get batch item', description: 'Returns a specific goal item from the batch.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiParam({ name: 'itemId', description: 'Batch item ID' }) + @ApiResponse({ status: 200, description: 'Item retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Batch or item not found' }) + async getBatchItem( + @Param('id') batchId: string, + @Param('itemId') itemId: string, + ) { + const item = await this.batchService.getItem(batchId, itemId); + + return { + success: true, + data: item, + }; + } + + /** + * POST /api/v1/batches/:id/start + * Start batch execution + */ + @Post(':id/start') + @HttpCode(HttpStatus.OK) + @ApiOperation({ summary: 'Start batch execution', description: 'Starts executing all goals in the batch according to the configured execution mode.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiResponse({ status: 200, description: 'Batch execution started' }) + @ApiResponse({ status: 400, description: 'Batch already started or completed' }) + @ApiResponse({ status: 404, description: 'Batch not found' }) + async startBatch(@Param('id') id: string) { + const batch = await this.batchService.start(id); + + return { + success: true, + data: batch, + message: 'Batch execution started', + }; + } + + /** + * POST /api/v1/batches/:id/cancel + * Cancel batch execution + */ + @Post(':id/cancel') + @HttpCode(HttpStatus.OK) + @ApiOperation({ summary: 'Cancel batch execution', description: 'Cancels batch execution and marks pending items as cancelled.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiResponse({ status: 200, description: 'Batch execution cancelled' }) + @ApiResponse({ status: 400, description: 'Batch not running' }) + @ApiResponse({ status: 404, description: 'Batch not found' }) + async cancelBatch( + @Param('id') id: string, + @Body() body?: { reason?: string }, + ) { + const batch = await this.batchService.cancel(id, body?.reason); + + return { + success: true, + data: batch, + message: 'Batch execution cancelled', + }; + } + + /** + * POST /api/v1/batches/:id/retry + * Retry failed items in batch + */ + @Post(':id/retry') + @HttpCode(HttpStatus.OK) + @ApiOperation({ summary: 'Retry failed items', description: 'Retries all failed goal items in the batch.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiResponse({ status: 200, description: 'Retry initiated successfully' }) + @ApiResponse({ status: 400, description: 'No failed items to retry' }) + @ApiResponse({ status: 404, description: 'Batch not found' }) + async retryBatch(@Param('id') id: string) { + const batch = await this.batchService.retryFailed(id); + + return { + success: true, + data: batch, + message: 'Retrying failed items', + }; + } + + /** + * GET /api/v1/batches/:id/progress + * Get batch progress summary + */ + @Get(':id/progress') + @ApiOperation({ summary: 'Get batch progress', description: 'Returns execution progress summary for the batch.' }) + @ApiParam({ name: 'id', description: 'Batch ID' }) + @ApiResponse({ status: 200, description: 'Progress retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Batch not found' }) + async getBatchProgress(@Param('id') id: string) { + const batch = await this.batchService.findById(id); + + return { + success: true, + data: { + status: batch.status, + progress: batch.progress, + totalGoals: batch.totalGoals, + completedGoals: batch.completedGoals, + failedGoals: batch.failedGoals, + cancelledGoals: batch.cancelledGoals, + remainingGoals: + batch.totalGoals - + batch.completedGoals - + batch.failedGoals - + batch.cancelledGoals, + startedAt: batch.startedAt, + completedAt: batch.completedAt, + }, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/checkpoint.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/checkpoint.controller.ts new file mode 100644 index 000000000..4964867d0 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/checkpoint.controller.ts @@ -0,0 +1,375 @@ +/** + * Checkpoint Controller + * v1.0.0: Checkpoint Visualization and Management API + * + * Provides REST endpoints for checkpoint inspection and management: + * - View current checkpoint state + * - Access checkpoint history + * - Visualize progress and knowledge + * - Trigger manual recovery + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS.md + */ + +import { + Controller, + Get, + Post, + Delete, + Param, + Query, + HttpException, + HttpStatus, + Logger, +} from '@nestjs/common'; +import { ApiTags, ApiOperation, ApiResponse, ApiParam, ApiQuery } from '@nestjs/swagger'; +import { GoalCheckpointService } from '../services/goal-checkpoint.service'; +import { CheckpointPersistenceService } from '../services/checkpoint-persistence.service'; +import { KnowledgeExtractionService } from '../services/knowledge-extraction.service'; +import { ContextSummarizationService } from '../services/context-summarization.service'; +import { BackgroundModeService } from '../services/background-mode.service'; + +@ApiTags('Checkpoints') +@Controller('checkpoints') +export class CheckpointController { + private readonly logger = new Logger(CheckpointController.name); + + constructor( + private readonly checkpointService: GoalCheckpointService, + private readonly persistenceService: CheckpointPersistenceService, + private readonly knowledgeService: KnowledgeExtractionService, + private readonly contextService: ContextSummarizationService, + private readonly backgroundService: BackgroundModeService, + ) {} + + /** + * Get checkpoint for a goal run + */ + @Get(':goalRunId') + @ApiOperation({ summary: 'Get checkpoint for a goal run' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiResponse({ status: 200, description: 'Checkpoint retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Checkpoint not found' }) + async getCheckpoint(@Param('goalRunId') goalRunId: string) { + const checkpoint = await this.checkpointService.getCheckpoint(goalRunId); + + if (!checkpoint) { + throw new HttpException('Checkpoint not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + data: checkpoint, + }; + } + + /** + * Get checkpoint formatted for UI visualization + */ + @Get(':goalRunId/visualization') + @ApiOperation({ summary: 'Get checkpoint formatted for UI visualization' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiResponse({ status: 200, description: 'Visualization data retrieved' }) + async getVisualization(@Param('goalRunId') goalRunId: string) { + const checkpoint = await this.checkpointService.getCheckpoint(goalRunId); + const knowledge = this.knowledgeService.getKnowledge(goalRunId); + + if (!checkpoint) { + throw new HttpException('Checkpoint not found', HttpStatus.NOT_FOUND); + } + + // Format for visualization + const visualization = { + // Progress ring data + progress: { + percentComplete: checkpoint.progressSummary.percentComplete, + completedSteps: checkpoint.progressSummary.completedSteps, + totalSteps: checkpoint.progressSummary.totalSteps, + failedSteps: checkpoint.progressSummary.failedSteps, + pendingSteps: checkpoint.progressSummary.pendingSteps, + }, + + // Timeline data + timeline: [ + ...checkpoint.completedWork.map((work, index) => ({ + id: `step-${work.stepNumber}`, + order: work.stepNumber, + label: work.description, + status: 'completed' as const, + outcome: work.outcome as string | null, + timestamp: work.completedAt as Date | null, + duration: this.calculateDuration(checkpoint.completedWork, index), + })), + ...checkpoint.remainingSteps.map(step => ({ + id: `step-${step.stepNumber}`, + order: step.stepNumber, + label: step.description, + status: step.status, + outcome: null as string | null, + timestamp: null as Date | null, + duration: null as number | null, + })), + ].sort((a, b) => a.order - b.order), + + // Knowledge graph summary + knowledge: knowledge ? { + factCount: knowledge.facts.length, + entityCount: knowledge.entities.length, + decisionCount: knowledge.decisions.length, + keyMetrics: knowledge.keyMetrics, + recentFacts: knowledge.facts.slice(-5).map(f => ({ + type: f.type, + content: f.content, + confidence: f.confidence, + })), + topEntities: knowledge.entities + .sort((a, b) => b.mentions - a.mentions) + .slice(0, 5) + .map(e => ({ + name: e.name, + type: e.type, + mentions: e.mentions, + })), + } : null, + + // Context info + context: { + currentPhase: checkpoint.currentPhase, + lastSuccessfulStep: checkpoint.currentContext.lastSuccessfulStep, + accumulatedKnowledgeCount: checkpoint.currentContext.accumulatedKnowledge.length, + }, + + // Metadata + meta: { + goalRunId: checkpoint.goalRunId, + version: checkpoint.version, + checkpointedAt: checkpoint.checkpointedAt, + goalDescription: checkpoint.goalDescription, + }, + }; + + return { + success: true, + data: visualization, + }; + } + + /** + * Get checkpoint as formatted text (Manus-style todo.md) + */ + @Get(':goalRunId/formatted') + @ApiOperation({ summary: 'Get checkpoint as formatted text (todo.md style)' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiQuery({ name: 'maxTokens', required: false, description: 'Max tokens for output' }) + @ApiResponse({ status: 200, description: 'Formatted checkpoint retrieved' }) + async getFormattedCheckpoint( + @Param('goalRunId') goalRunId: string, + @Query('maxTokens') maxTokens?: string, + ) { + const formatted = await this.checkpointService.getCheckpointAsContext(goalRunId); + + if (!formatted) { + throw new HttpException('Checkpoint not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + data: { + formatted, + tokenEstimate: Math.ceil(formatted.length / 4), + }, + }; + } + + /** + * Get knowledge graph for a goal run + */ + @Get(':goalRunId/knowledge') + @ApiOperation({ summary: 'Get accumulated knowledge for a goal run' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiResponse({ status: 200, description: 'Knowledge graph retrieved' }) + async getKnowledge(@Param('goalRunId') goalRunId: string) { + const knowledge = this.knowledgeService.getKnowledge(goalRunId); + + return { + success: true, + data: knowledge, + formatted: this.knowledgeService.formatForContext(goalRunId), + }; + } + + /** + * Get context status for a goal run + */ + @Get(':goalRunId/context-status') + @ApiOperation({ summary: 'Get context window status' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiResponse({ status: 200, description: 'Context status retrieved' }) + async getContextStatus(@Param('goalRunId') goalRunId: string) { + // Get checkpoint items and check context status + const checkpoint = await this.checkpointService.getCheckpoint(goalRunId); + + if (!checkpoint) { + throw new HttpException('Checkpoint not found', HttpStatus.NOT_FOUND); + } + + // Convert checkpoint to context items for status check + const contextItems = checkpoint.completedWork.map(work => ({ + id: `work-${work.stepNumber}`, + type: 'step_result' as const, + timestamp: work.completedAt, + content: `${work.description}: ${work.outcome}`, + metadata: { + stepNumber: work.stepNumber, + importance: 'high' as const, + }, + })); + + const status = this.contextService.getContextStatus(contextItems); + + return { + success: true, + data: { + ...status, + recommendedAction: status.needsSummarization + ? 'Consider summarizing context to reduce token usage' + : 'Context within acceptable limits', + }, + }; + } + + /** + * Recover checkpoint from persistence + */ + @Post(':goalRunId/recover') + @ApiOperation({ summary: 'Recover checkpoint from persistence' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiQuery({ name: 'version', required: false, description: 'Specific version to recover' }) + @ApiResponse({ status: 200, description: 'Checkpoint recovered' }) + async recoverCheckpoint( + @Param('goalRunId') goalRunId: string, + @Query('version') version?: string, + ) { + const result = await this.persistenceService.recoverCheckpoint( + goalRunId, + version ? parseInt(version, 10) : undefined, + ); + + if (!result.success) { + throw new HttpException(result.message, HttpStatus.NOT_FOUND); + } + + return { + success: true, + data: { + recoveredFromVersion: result.recoveredFromVersion, + message: result.message, + checkpoint: result.checkpoint, + }, + }; + } + + /** + * Delete checkpoint for a goal run + */ + @Delete(':goalRunId') + @ApiOperation({ summary: 'Delete checkpoint for a goal run' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiResponse({ status: 200, description: 'Checkpoint deleted' }) + async deleteCheckpoint(@Param('goalRunId') goalRunId: string) { + const deleted = await this.persistenceService.deleteCheckpoint(goalRunId); + this.knowledgeService.clearKnowledge(goalRunId); + + return { + success: true, + data: { + deleted, + message: deleted ? 'Checkpoint deleted' : 'No checkpoint found', + }, + }; + } + + /** + * Get checkpoint persistence statistics + */ + @Get('stats/overview') + @ApiOperation({ summary: 'Get checkpoint persistence statistics' }) + @ApiResponse({ status: 200, description: 'Statistics retrieved' }) + async getStats() { + const persistenceStats = await this.persistenceService.getStats(); + const backgroundStats = this.backgroundService.getQueueStats(); + + return { + success: true, + data: { + persistence: persistenceStats, + background: backgroundStats, + }, + }; + } + + /** + * Get background task status + */ + @Get('background-tasks/:taskId') + @ApiOperation({ summary: 'Get background task status' }) + @ApiParam({ name: 'taskId', description: 'Background task ID' }) + @ApiResponse({ status: 200, description: 'Task status retrieved' }) + async getBackgroundTaskStatus(@Param('taskId') taskId: string) { + const status = this.backgroundService.getTaskStatus(taskId); + + if (!status) { + throw new HttpException('Task not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + data: status, + }; + } + + /** + * Cancel a background task + */ + @Post('background-tasks/:taskId/cancel') + @ApiOperation({ summary: 'Cancel a background task' }) + @ApiParam({ name: 'taskId', description: 'Background task ID' }) + @ApiResponse({ status: 200, description: 'Task cancelled' }) + async cancelBackgroundTask(@Param('taskId') taskId: string) { + const cancelled = await this.backgroundService.cancelTask(taskId); + + if (!cancelled) { + throw new HttpException('Task not found or already completed', HttpStatus.BAD_REQUEST); + } + + return { + success: true, + data: { + taskId, + message: 'Task cancelled', + }, + }; + } + + /** + * Calculate duration between steps + */ + private calculateDuration( + completedWork: Array<{ stepNumber: number; completedAt: Date }>, + currentIndex: number, + ): number | null { + if (currentIndex === 0) { + return null; // Can't calculate first step duration without start time + } + + const currentStep = completedWork[currentIndex]; + const previousStep = completedWork[currentIndex - 1]; + + if (!currentStep?.completedAt || !previousStep?.completedAt) { + return null; + } + + return new Date(currentStep.completedAt).getTime() - + new Date(previousStep.completedAt).getTime(); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/dashboard.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/dashboard.controller.ts new file mode 100644 index 000000000..4e0967ddb --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/dashboard.controller.ts @@ -0,0 +1,225 @@ +/** + * Dashboard Controller + * v1.0.0: REST API for Real-Time Dashboard Visualization + * + * Provides endpoints for dashboard UI integration: + * - Overview metrics and aggregations + * - Goal timelines and progress + * - Activity streams + * - Historical analytics + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS_V2.md + */ + +import { + Controller, + Get, + Param, + Query, + HttpException, + HttpStatus, + Logger, + Sse, + MessageEvent, +} from '@nestjs/common'; +import { ApiTags, ApiOperation, ApiResponse, ApiParam, ApiQuery } from '@nestjs/swagger'; +import { Observable, fromEvent, map } from 'rxjs'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { DashboardService, ActivityItem } from '../services/dashboard.service'; + +@ApiTags('Dashboard') +@Controller('dashboard') +export class DashboardController { + private readonly logger = new Logger(DashboardController.name); + + constructor( + private readonly dashboardService: DashboardService, + private readonly eventEmitter: EventEmitter2, + ) {} + + /** + * Get dashboard overview with aggregated metrics + */ + @Get('overview') + @ApiOperation({ summary: 'Get dashboard overview with all metrics' }) + @ApiResponse({ status: 200, description: 'Overview retrieved successfully' }) + async getOverview() { + const overview = await this.dashboardService.getOverview(); + + return { + success: true, + data: overview, + }; + } + + /** + * Get timeline for a specific goal + */ + @Get('goals/:goalRunId/timeline') + @ApiOperation({ summary: 'Get detailed timeline for a goal' }) + @ApiParam({ name: 'goalRunId', description: 'Goal run ID' }) + @ApiResponse({ status: 200, description: 'Timeline retrieved' }) + async getGoalTimeline(@Param('goalRunId') goalRunId: string) { + const timeline = await this.dashboardService.getGoalTimeline(goalRunId); + + if (!timeline) { + throw new HttpException('Goal not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + data: timeline, + }; + } + + /** + * Get all active goal timelines + */ + @Get('goals/active') + @ApiOperation({ summary: 'Get timelines for all active goals' }) + @ApiResponse({ status: 200, description: 'Active timelines retrieved' }) + async getActiveGoals() { + const timelines = await this.dashboardService.getActiveGoalTimelines(); + + return { + success: true, + data: timelines, + count: timelines.length, + }; + } + + /** + * Get activity stream + */ + @Get('activity') + @ApiOperation({ summary: 'Get recent activity stream' }) + @ApiQuery({ name: 'limit', required: false, description: 'Max items to return' }) + @ApiQuery({ name: 'types', required: false, description: 'Filter by activity types (comma-separated)' }) + @ApiQuery({ name: 'severity', required: false, description: 'Filter by severity (comma-separated)' }) + @ApiQuery({ name: 'goalRunId', required: false, description: 'Filter by goal run ID' }) + @ApiResponse({ status: 200, description: 'Activity stream retrieved' }) + getActivity( + @Query('limit') limit?: string, + @Query('types') types?: string, + @Query('severity') severity?: string, + @Query('goalRunId') goalRunId?: string, + ) { + const activities = this.dashboardService.getActivityStream({ + limit: limit ? parseInt(limit, 10) : undefined, + types: types ? types.split(',') as any[] : undefined, + severity: severity ? severity.split(',') as any[] : undefined, + goalRunId, + }); + + return { + success: true, + data: activities, + count: activities.length, + }; + } + + /** + * Server-Sent Events for real-time activity streaming + */ + @Sse('activity/stream') + @ApiOperation({ summary: 'Subscribe to real-time activity stream (SSE)' }) + streamActivity(): Observable { + return fromEvent(this.eventEmitter, 'dashboard.activity').pipe( + map((activity: ActivityItem) => ({ + data: JSON.stringify(activity), + type: 'activity', + id: activity.id, + })), + ); + } + + /** + * Get historical analytics + */ + @Get('analytics') + @ApiOperation({ summary: 'Get historical analytics for a time range' }) + @ApiQuery({ name: 'start', required: true, description: 'Start date (ISO string)' }) + @ApiQuery({ name: 'end', required: false, description: 'End date (ISO string, defaults to now)' }) + @ApiQuery({ name: 'interval', required: false, description: 'Aggregation interval (hour or day)' }) + @ApiResponse({ status: 200, description: 'Analytics retrieved' }) + async getAnalytics( + @Query('start') start: string, + @Query('end') end?: string, + @Query('interval') interval?: string, + ) { + const startDate = new Date(start); + const endDate = end ? new Date(end) : new Date(); + + if (isNaN(startDate.getTime())) { + throw new HttpException('Invalid start date', HttpStatus.BAD_REQUEST); + } + + const analytics = await this.dashboardService.getHistoricalAnalytics( + startDate, + endDate, + interval as 'hour' | 'day' || 'hour', + ); + + return { + success: true, + data: analytics, + }; + } + + /** + * Get performance metrics summary + */ + @Get('metrics') + @ApiOperation({ summary: 'Get performance metrics summary' }) + @ApiResponse({ status: 200, description: 'Metrics retrieved' }) + async getMetrics() { + const overview = await this.dashboardService.getOverview(); + + return { + success: true, + data: { + goals: overview.summary, + agents: overview.agentHealth, + resources: overview.resourceUtilization, + timestamp: overview.timestamp, + }, + }; + } + + /** + * Get agent health details + */ + @Get('agents/health') + @ApiOperation({ summary: 'Get health status for all agents' }) + @ApiResponse({ status: 200, description: 'Agent health retrieved' }) + async getAgentHealth() { + const overview = await this.dashboardService.getOverview(); + + return { + success: true, + data: overview.agentHealth, + }; + } + + /** + * Get today's summary (quick stats) + */ + @Get('today') + @ApiOperation({ summary: 'Get quick stats for today' }) + @ApiResponse({ status: 200, description: 'Today summary retrieved' }) + async getTodaySummary() { + const overview = await this.dashboardService.getOverview(); + + return { + success: true, + data: { + activeGoals: overview.summary.activeGoals, + completedToday: overview.summary.completedToday, + failedToday: overview.summary.failedToday, + successRate: overview.summary.successRate, + averageCompletionTime: overview.summary.averageCompletionTime, + lastUpdated: overview.timestamp, + }, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/desktop-control.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/desktop-control.controller.ts new file mode 100644 index 000000000..bca66a44c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/desktop-control.controller.ts @@ -0,0 +1,108 @@ +/** + * Desktop Control Controller + * Phase 4: Live Desktop Control APIs + * + * REST endpoints for desktop control operations: + * - GET /api/v1/desktop/runs/:runId/status - Get desktop status + * - GET /api/v1/desktop/runs/:runId/url - Get VNC URLs + * - POST /api/v1/desktop/runs/:runId/wake - Wake hibernated desktop + * - POST /api/v1/desktop/runs/:runId/screenshot - Capture screenshot + * - GET /api/v1/desktop/runs/:runId/screenshots - Get all screenshots + */ + +import { + Controller, + Get, + Post, + Param, + Query, + HttpCode, + HttpStatus, + Headers, + Body, +} from '@nestjs/common'; +import { DesktopControlService } from '../services/desktop-control.service'; + +// v5.11.3: Removed deprecated api/v1/desktop backward compatibility prefix (was scheduled for v5.6.0) +@Controller('desktop') +export class DesktopControlController { + constructor(private desktopControlService: DesktopControlService) {} + + /** + * GET /api/v1/desktop/runs/:runId/status + * Get desktop status for a goal run + */ + @Get('runs/:runId/status') + async getDesktopStatus(@Param('runId') runId: string) { + const status = await this.desktopControlService.getDesktopStatus(runId); + + return { + success: true, + data: status, + }; + } + + /** + * GET /api/v1/desktop/runs/:runId/url + * Get VNC connection URLs + */ + @Get('runs/:runId/url') + async getDesktopUrls(@Param('runId') runId: string) { + const urls = await this.desktopControlService.getDesktopUrls(runId); + + return { + success: true, + data: urls, + }; + } + + /** + * POST /api/v1/desktop/runs/:runId/wake + * Wake a hibernated desktop + */ + @Post('runs/:runId/wake') + @HttpCode(HttpStatus.OK) + async wakeDesktop(@Param('runId') runId: string) { + const result = await this.desktopControlService.wakeDesktop(runId); + + return { + success: true, + data: result, + }; + } + + /** + * POST /api/v1/desktop/runs/:runId/screenshot + * Capture a screenshot + */ + @Post('runs/:runId/screenshot') + @HttpCode(HttpStatus.CREATED) + async captureScreenshot( + @Param('runId') runId: string, + @Body() body?: { stepId?: string }, + ) { + const screenshot = await this.desktopControlService.captureScreenshot( + runId, + body?.stepId, + ); + + return { + success: true, + data: screenshot, + }; + } + + /** + * GET /api/v1/desktop/runs/:runId/screenshots + * Get all screenshots for a goal run + */ + @Get('runs/:runId/screenshots') + async getScreenshots(@Param('runId') runId: string) { + const screenshots = await this.desktopControlService.getScreenshots(runId); + + return { + success: true, + data: screenshots, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/enterprise.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/enterprise.controller.ts new file mode 100644 index 000000000..367ff40ba --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/enterprise.controller.ts @@ -0,0 +1,1127 @@ +/** + * Enterprise Controller + * Phase 10 (v5.5.0): Enterprise Features + * + * REST API endpoints for enterprise features: + * - Tenant administration + * - Audit log export + * - Compliance reporting + * - SSO configuration + * - LLM provider management + */ + +import { + Controller, + Get, + Post, + Put, + Delete, + Body, + Param, + Query, + Headers, + Res, + HttpCode, + HttpStatus, + BadRequestException, + StreamableFile, +} from '@nestjs/common'; +import { Response } from 'express'; +import { Throttle } from '@nestjs/throttler'; +import { + ApiTags, + ApiOperation, + ApiResponse, + ApiParam, + ApiQuery, + ApiHeader, +} from '@nestjs/swagger'; +import { + IsString, + IsOptional, + IsBoolean, + IsObject, + IsArray, + IsEnum, + IsNumber, + IsEmail, + MinLength, + Min, + Max, +} from 'class-validator'; +import { + TenantAdminService, + TenantPlan, + TenantStatus, + CreateTenantInput, + UpdateTenantInput, + TenantSettingsInput, + TenantQuotaInput, +} from '../services/tenant-admin.service'; +import { + AuditExportService, + ExportFormat, +} from '../services/audit-export.service'; +import { + ComplianceService, + ReportType, + LegalBasis, + DataProcessingRecordInput, +} from '../services/compliance.service'; +import { SSOService, SSOProvider, SSOConfigInput } from '../services/sso.service'; +import { LLMProviderService, LLMProvider, ProviderConfig } from '../services/llm-provider.service'; + +// ============================================================================ +// DTOs with class-validator decorators for ValidationPipe compatibility +// ============================================================================ + +/** + * DTO for creating a tenant + */ +class CreateTenantDto { + @IsString() + @MinLength(2) + name!: string; + + @IsOptional() + @IsString() + slug?: string; + + @IsEmail() + adminEmail!: string; + + @IsOptional() + @IsString() + adminName?: string; + + @IsOptional() + @IsString() + companyName?: string; + + @IsOptional() + @IsEnum(TenantPlan) + plan?: TenantPlan; + + @IsOptional() + @IsEmail() + billingEmail?: string; + + @IsOptional() + @IsObject() + metadata?: Record; +} + +/** + * DTO for updating a tenant + */ +class UpdateTenantDto { + @IsOptional() + @IsString() + @MinLength(2) + name?: string; + + @IsOptional() + @IsEmail() + adminEmail?: string; + + @IsOptional() + @IsString() + adminName?: string; + + @IsOptional() + @IsString() + companyName?: string; + + @IsOptional() + @IsEmail() + billingEmail?: string; + + @IsOptional() + @IsObject() + metadata?: Record; +} + +/** + * DTO for suspending a tenant + */ +class SuspendTenantDto { + @IsOptional() + @IsString() + reason?: string; +} + +/** + * DTO for tenant settings + */ +class TenantSettingsDto { + @IsOptional() + @IsString() + timezone?: string; + + @IsOptional() + @IsString() + dateFormat?: string; + + @IsOptional() + @IsString() + defaultWorkspaceMode?: string; + + @IsOptional() + @IsBoolean() + requireMfa?: boolean; + + @IsOptional() + @IsNumber() + @Min(60) + @Max(86400) + sessionTimeout?: number; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + ipAllowlist?: string[]; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + allowedDomains?: string[]; + + @IsOptional() + @IsNumber() + @Min(1) + @Max(100) + maxConcurrentGoals?: number; + + @IsOptional() + @IsNumber() + defaultApprovalTimeout?: number; + + @IsOptional() + @IsBoolean() + autoReplanEnabled?: boolean; + + @IsOptional() + @IsNumber() + @Min(0) + @Max(10) + maxReplanAttempts?: number; + + @IsOptional() + @IsEmail() + notificationEmail?: string; + + @IsOptional() + @IsString() + slackWebhookUrl?: string; + + @IsOptional() + @IsString() + teamsWebhookUrl?: string; + + @IsOptional() + @IsNumber() + @Min(7) + @Max(365) + auditLogRetentionDays?: number; + + @IsOptional() + @IsNumber() + @Min(7) + @Max(365) + goalRunRetentionDays?: number; + + @IsOptional() + @IsObject() + features?: Record; +} + +/** + * DTO for tenant quotas + */ +class TenantQuotaDto { + @IsOptional() + @IsNumber() + @Min(0) + monthlyGoalRuns?: number; + + @IsOptional() + @IsNumber() + @Min(0) + monthlyTokens?: number; + + @IsOptional() + @IsNumber() + @Min(0) + storageLimit?: number; + + @IsOptional() + @IsNumber() + @Min(1) + maxConcurrentWorkspaces?: number; + + @IsOptional() + @IsNumber() + @Min(1) + maxUsersPerTenant?: number; + + @IsOptional() + @IsNumber() + @Min(1) + maxTemplates?: number; + + @IsOptional() + @IsNumber() + @Min(1) + maxBatchSize?: number; + + @IsOptional() + @IsNumber() + @Min(1) + apiRateLimitPerMinute?: number; +} + +/** + * DTO for changing tenant plan + */ +class ChangePlanDto { + @IsEnum(TenantPlan) + plan!: TenantPlan; +} + +/** + * DTO for generating compliance report + */ +class GenerateReportDto { + @IsEnum(ReportType) + reportType!: ReportType; + + @IsOptional() + @IsString() + reportName?: string; + + @IsString() + startDate!: string; + + @IsString() + endDate!: string; +} + +/** + * DTO for DSAR request types + */ +enum DSARRequestType { + ACCESS = 'access', + RECTIFICATION = 'rectification', + ERASURE = 'erasure', + PORTABILITY = 'portability', + RESTRICTION = 'restriction', + OBJECTION = 'objection', +} + +/** + * DTO for DSAR processing + */ +class ProcessDSARDto { + @IsEmail() + subjectEmail!: string; + + @IsOptional() + @IsString() + subjectName?: string; + + @IsEnum(DSARRequestType) + requestType!: DSARRequestType; + + @IsOptional() + @IsString() + requestDetails?: string; +} + +/** + * DTO for data processing record + */ +class DataProcessingRecordDto { + @IsString() + @MinLength(3) + activityName!: string; + + @IsOptional() + @IsString() + activityDescription?: string; + + @IsArray() + @IsString({ each: true }) + dataSubjectCategories!: string[]; + + @IsArray() + @IsString({ each: true }) + personalDataCategories!: string[]; + + @IsEnum(LegalBasis) + legalBasis!: LegalBasis; + + @IsOptional() + @IsString() + legalBasisDetails?: string; + + @IsArray() + @IsString({ each: true }) + processingPurposes!: string[]; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + recipientCategories?: string[]; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + thirdCountryTransfers?: string[]; + + @IsOptional() + @IsString() + transferSafeguards?: string; + + @IsOptional() + @IsString() + retentionPeriod?: string; + + @IsOptional() + @IsString() + retentionCriteria?: string; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + technicalMeasures?: string[]; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + organizationalMeasures?: string[]; +} + +/** + * DTO for SAML configuration + */ +class SAMLConfigDto { + @IsString() + entityId!: string; + + @IsString() + ssoUrl!: string; + + @IsOptional() + @IsString() + sloUrl?: string; + + @IsString() + certificate!: string; + + @IsOptional() + @IsString() + signatureAlgorithm?: 'sha256' | 'sha512'; +} + +/** + * DTO for attribute mapping + */ +class AttributeMappingDto { + @IsString() + email!: string; + + @IsOptional() + @IsString() + firstName?: string; + + @IsOptional() + @IsString() + lastName?: string; + + @IsOptional() + @IsString() + displayName?: string; + + @IsOptional() + @IsString() + groups?: string; + + @IsOptional() + @IsString() + role?: string; +} + +/** + * DTO for SSO configuration + */ +class SSOConfigDto { + @IsEnum(SSOProvider) + provider!: SSOProvider; + + @IsOptional() + @IsObject() + saml?: SAMLConfigDto; + + @IsOptional() + @IsObject() + attributeMapping?: AttributeMappingDto; + + @IsOptional() + @IsBoolean() + jitProvisioning?: boolean; + + @IsOptional() + @IsString() + defaultRole?: string; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + enforcedDomains?: string[]; + + @IsOptional() + @IsBoolean() + allowBypassSSO?: boolean; +} + +/** + * DTO for LLM provider configuration + */ +class LLMProviderConfigDto { + @IsEnum(LLMProvider) + provider!: LLMProvider; + + @IsString() + @MinLength(1) + name!: string; + + @IsOptional() + @IsString() + apiKey?: string; + + @IsOptional() + @IsString() + apiEndpoint?: string; + + @IsOptional() + @IsString() + model?: string; + + @IsOptional() + @IsString() + region?: string; + + @IsOptional() + @IsObject() + config?: Record; + + @IsOptional() + @IsBoolean() + isDefault?: boolean; + + @IsOptional() + @IsBoolean() + isFallback?: boolean; + + @IsOptional() + @IsNumber() + @Min(0) + priority?: number; + + @IsOptional() + @IsNumber() + @Min(1) + maxRequestsPerMinute?: number; + + @IsOptional() + @IsNumber() + @Min(1) + maxTokensPerRequest?: number; +} + +/** + * DTO for enabling/disabling provider + */ +class SetProviderEnabledDto { + @IsBoolean() + enabled!: boolean; +} + +// ============================================================================ +// Enterprise Controller +// ============================================================================ + +@ApiTags('enterprise') +@Controller('enterprise') +export class EnterpriseController { + constructor( + private readonly tenantAdminService: TenantAdminService, + private readonly auditExportService: AuditExportService, + private readonly complianceService: ComplianceService, + private readonly ssoService: SSOService, + private readonly llmProviderService: LLMProviderService, + ) {} + + // ========================================================================== + // Tenant Administration Endpoints + // ========================================================================== + + @Post('tenants') + @HttpCode(HttpStatus.CREATED) + @Throttle({ default: { limit: 10, ttl: 60000 } }) + @ApiOperation({ summary: 'Create a new tenant' }) + @ApiResponse({ status: 201, description: 'Tenant created successfully' }) + async createTenant(@Body() dto: CreateTenantDto) { + const input: CreateTenantInput = dto; + const tenant = await this.tenantAdminService.createTenant(input); + return { success: true, data: tenant }; + } + + @Get('tenants') + @ApiOperation({ summary: 'List all tenants' }) + @ApiQuery({ name: 'status', required: false, enum: TenantStatus }) + @ApiQuery({ name: 'plan', required: false, enum: TenantPlan }) + @ApiQuery({ name: 'search', required: false }) + @ApiQuery({ name: 'limit', required: false }) + @ApiQuery({ name: 'offset', required: false }) + async listTenants( + @Query('status') status?: TenantStatus, + @Query('plan') plan?: TenantPlan, + @Query('search') search?: string, + @Query('limit') limit?: string, + @Query('offset') offset?: string, + ) { + const result = await this.tenantAdminService.listTenants({ + status, + plan, + search, + limit: limit ? parseInt(limit, 10) : undefined, + offset: offset ? parseInt(offset, 10) : undefined, + }); + return { success: true, ...result }; + } + + @Get('tenants/:tenantId') + @ApiOperation({ summary: 'Get tenant details' }) + @ApiParam({ name: 'tenantId', description: 'Tenant ID' }) + async getTenant(@Param('tenantId') tenantId: string) { + const tenant = await this.tenantAdminService.getTenant(tenantId); + return { success: true, data: tenant }; + } + + @Put('tenants/:tenantId') + @ApiOperation({ summary: 'Update tenant' }) + @ApiParam({ name: 'tenantId', description: 'Tenant ID' }) + async updateTenant( + @Param('tenantId') tenantId: string, + @Body() dto: UpdateTenantDto, + ) { + const input: UpdateTenantInput = dto; + const tenant = await this.tenantAdminService.updateTenant(tenantId, input); + return { success: true, data: tenant }; + } + + @Delete('tenants/:tenantId') + @HttpCode(HttpStatus.NO_CONTENT) + @ApiOperation({ summary: 'Delete tenant' }) + @ApiParam({ name: 'tenantId', description: 'Tenant ID' }) + @ApiQuery({ name: 'hard', required: false, description: 'Perform hard delete' }) + async deleteTenant( + @Param('tenantId') tenantId: string, + @Query('hard') hard?: string, + ) { + await this.tenantAdminService.deleteTenant(tenantId, hard === 'true'); + } + + @Post('tenants/:tenantId/suspend') + @ApiOperation({ summary: 'Suspend a tenant' }) + @ApiParam({ name: 'tenantId', description: 'Tenant ID' }) + async suspendTenant( + @Param('tenantId') tenantId: string, + @Body() dto: SuspendTenantDto, + ) { + const tenant = await this.tenantAdminService.suspendTenant(tenantId, dto.reason); + return { success: true, data: tenant }; + } + + @Post('tenants/:tenantId/reactivate') + @ApiOperation({ summary: 'Reactivate a suspended tenant' }) + @ApiParam({ name: 'tenantId', description: 'Tenant ID' }) + async reactivateTenant(@Param('tenantId') tenantId: string) { + const tenant = await this.tenantAdminService.reactivateTenant(tenantId); + return { success: true, data: tenant }; + } + + @Get('tenants/:tenantId/settings') + @ApiOperation({ summary: 'Get tenant settings' }) + async getTenantSettings(@Param('tenantId') tenantId: string) { + const settings = await this.tenantAdminService.getSettings(tenantId); + return { success: true, data: settings }; + } + + @Put('tenants/:tenantId/settings') + @ApiOperation({ summary: 'Update tenant settings' }) + async updateTenantSettings( + @Param('tenantId') tenantId: string, + @Body() dto: TenantSettingsDto, + ) { + const input: TenantSettingsInput = dto; + const settings = await this.tenantAdminService.updateSettings(tenantId, input); + return { success: true, data: settings }; + } + + @Get('tenants/:tenantId/quotas') + @ApiOperation({ summary: 'Get tenant quotas' }) + async getTenantQuotas(@Param('tenantId') tenantId: string) { + const quotas = await this.tenantAdminService.getQuotas(tenantId); + return { success: true, data: quotas }; + } + + @Put('tenants/:tenantId/quotas') + @ApiOperation({ summary: 'Update tenant quotas' }) + async updateTenantQuotas( + @Param('tenantId') tenantId: string, + @Body() dto: TenantQuotaDto, + ) { + // Convert storageLimit from number to bigint if provided + const input: TenantQuotaInput = { + ...dto, + storageLimit: dto.storageLimit !== undefined ? BigInt(dto.storageLimit) : undefined, + }; + const quotas = await this.tenantAdminService.updateQuotas(tenantId, input); + return { success: true, data: quotas }; + } + + @Get('tenants/:tenantId/usage') + @ApiOperation({ summary: 'Get tenant usage statistics' }) + async getTenantUsage(@Param('tenantId') tenantId: string) { + const usage = await this.tenantAdminService.getUsageStats(tenantId); + return { success: true, data: usage }; + } + + @Post('tenants/:tenantId/plan') + @ApiOperation({ summary: 'Change tenant plan' }) + async changeTenantPlan( + @Param('tenantId') tenantId: string, + @Body() dto: ChangePlanDto, + ) { + const tenant = await this.tenantAdminService.changePlan(tenantId, dto.plan); + return { success: true, data: tenant }; + } + + @Get('plans') + @ApiOperation({ summary: 'Get available plans' }) + getAvailablePlans() { + return { success: true, data: this.tenantAdminService.getAvailablePlans() }; + } + + // ========================================================================== + // Audit Export Endpoints + // ========================================================================== + + @Get('audit/export') + @ApiOperation({ summary: 'Export audit logs' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + @ApiQuery({ name: 'format', required: false, enum: ExportFormat }) + @ApiQuery({ name: 'startDate', required: false }) + @ApiQuery({ name: 'endDate', required: false }) + @ApiQuery({ name: 'eventTypes', required: false }) + async exportAuditLogs( + @Headers('x-tenant-id') tenantId: string, + @Query('format') format?: ExportFormat, + @Query('startDate') startDate?: string, + @Query('endDate') endDate?: string, + @Query('eventTypes') eventTypes?: string, + @Res() res?: Response, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const exportFormat = format || ExportFormat.JSON; + const result = await this.auditExportService.exportToString({ + tenantId, + format: exportFormat, + startDate: startDate ? new Date(startDate) : undefined, + endDate: endDate ? new Date(endDate) : undefined, + eventTypes: eventTypes ? eventTypes.split(',') : undefined, + }); + + const contentType = { + [ExportFormat.JSON]: 'application/json', + [ExportFormat.CSV]: 'text/csv', + [ExportFormat.NDJSON]: 'application/x-ndjson', + [ExportFormat.SPLUNK]: 'application/json', + [ExportFormat.ELASTICSEARCH]: 'application/x-ndjson', + }[exportFormat]; + + const timestamp = new Date().toISOString().split('T')[0]; + const filename = `audit-logs-${timestamp}.${exportFormat}`; + + if (res) { + res.setHeader('Content-Type', contentType); + res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); + res.send(result.data); + } + } + + @Get('audit/export/stream') + @ApiOperation({ summary: 'Stream export audit logs (for large datasets)' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async streamAuditLogs( + @Headers('x-tenant-id') tenantId: string, + @Query('format') format?: ExportFormat, + @Query('startDate') startDate?: string, + @Query('endDate') endDate?: string, + @Res({ passthrough: true }) res?: Response, + ): Promise { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const exportFormat = format || ExportFormat.NDJSON; + const stream = this.auditExportService.createExportStream({ + tenantId, + format: exportFormat, + startDate: startDate ? new Date(startDate) : undefined, + endDate: endDate ? new Date(endDate) : undefined, + }); + + const timestamp = new Date().toISOString().split('T')[0]; + const filename = `audit-logs-${timestamp}.${exportFormat}`; + + if (res) { + res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); + } + + return new StreamableFile(stream); + } + + @Get('audit/stats') + @ApiOperation({ summary: 'Get audit export statistics' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async getAuditStats( + @Headers('x-tenant-id') tenantId: string, + @Query('days') days?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const stats = await this.auditExportService.getExportStats( + tenantId, + days ? parseInt(days, 10) : 30, + ); + return { success: true, data: stats }; + } + + // ========================================================================== + // Compliance Endpoints + // ========================================================================== + + @Post('compliance/reports') + @HttpCode(HttpStatus.CREATED) + @Throttle({ default: { limit: 5, ttl: 60000 } }) + @ApiOperation({ summary: 'Generate a compliance report' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async generateComplianceReport( + @Headers('x-tenant-id') tenantId: string, + @Body() dto: GenerateReportDto, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const report = await this.complianceService.generateReport({ + tenantId, + reportType: dto.reportType, + reportName: dto.reportName, + startDate: new Date(dto.startDate), + endDate: new Date(dto.endDate), + }); + + return { success: true, data: report }; + } + + @Get('compliance/reports') + @ApiOperation({ summary: 'List compliance reports' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async listComplianceReports( + @Headers('x-tenant-id') tenantId: string, + @Query('reportType') reportType?: ReportType, + @Query('status') status?: string, + @Query('limit') limit?: string, + @Query('offset') offset?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const result = await this.complianceService.listReports(tenantId, { + reportType, + status, + limit: limit ? parseInt(limit, 10) : undefined, + offset: offset ? parseInt(offset, 10) : undefined, + }); + + return { success: true, ...result }; + } + + @Get('compliance/reports/:reportId') + @ApiOperation({ summary: 'Get compliance report details' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async getComplianceReport( + @Headers('x-tenant-id') tenantId: string, + @Param('reportId') reportId: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const report = await this.complianceService.getReport(tenantId, reportId); + return { success: true, data: report }; + } + + @Get('compliance/report-types') + @ApiOperation({ summary: 'Get available report types' }) + getReportTypes() { + return { success: true, data: this.complianceService.getReportTypes() }; + } + + @Post('compliance/processing-records') + @HttpCode(HttpStatus.CREATED) + @ApiOperation({ summary: 'Create GDPR data processing record' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async createProcessingRecord( + @Headers('x-tenant-id') tenantId: string, + @Body() dto: DataProcessingRecordDto, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const input: DataProcessingRecordInput = dto; + const record = await this.complianceService.createDataProcessingRecord(tenantId, input); + return { success: true, data: record }; + } + + @Get('compliance/processing-records') + @ApiOperation({ summary: 'List GDPR data processing records' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async listProcessingRecords( + @Headers('x-tenant-id') tenantId: string, + @Query('status') status?: string, + @Query('legalBasis') legalBasis?: LegalBasis, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const records = await this.complianceService.listDataProcessingRecords(tenantId, { + status, + legalBasis, + }); + + return { success: true, data: records }; + } + + @Post('compliance/dsar') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 3, ttl: 60000 } }) + @ApiOperation({ summary: 'Process Data Subject Access Request' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async processDSAR( + @Headers('x-tenant-id') tenantId: string, + @Body() dto: ProcessDSARDto, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const result = await this.complianceService.processDSAR({ + tenantId, + ...dto, + }); + + return { success: true, data: result }; + } + + // ========================================================================== + // SSO Endpoints + // ========================================================================== + + @Post('sso/configure') + @ApiOperation({ summary: 'Configure SSO for tenant' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async configureSSO( + @Headers('x-tenant-id') tenantId: string, + @Body() dto: SSOConfigDto, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const input: SSOConfigInput = dto; + const config = await this.ssoService.configureSSOv(tenantId, input); + return { success: true, data: config }; + } + + @Get('sso/config') + @ApiOperation({ summary: 'Get SSO configuration' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async getSSOConfig(@Headers('x-tenant-id') tenantId: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const config = await this.ssoService.getSSOConfig(tenantId); + return { success: true, data: config }; + } + + @Post('sso/verify') + @ApiOperation({ summary: 'Verify SSO configuration' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async verifySSOConfig(@Headers('x-tenant-id') tenantId: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const result = await this.ssoService.verifySSOConfig(tenantId); + return { success: true, ...result }; + } + + @Post('sso/enable') + @ApiOperation({ summary: 'Enable SSO' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async enableSSO(@Headers('x-tenant-id') tenantId: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const config = await this.ssoService.enableSSO(tenantId); + return { success: true, data: config }; + } + + @Post('sso/disable') + @ApiOperation({ summary: 'Disable SSO' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async disableSSO(@Headers('x-tenant-id') tenantId: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const config = await this.ssoService.disableSSO(tenantId); + return { success: true, data: config }; + } + + @Get('sso/metadata/:tenantId') + @ApiOperation({ summary: 'Get SP metadata XML' }) + async getSPMetadata(@Param('tenantId') tenantId: string, @Res() res: Response) { + const xml = await this.ssoService.generateSPMetadataXML(tenantId); + res.setHeader('Content-Type', 'application/xml'); + res.send(xml); + } + + @Get('sso/attribute-mappings') + @ApiOperation({ summary: 'Get default attribute mappings' }) + getAttributeMappings() { + return { success: true, data: this.ssoService.getDefaultAttributeMappings() }; + } + + // ========================================================================== + // LLM Provider Endpoints + // ========================================================================== + + @Post('llm/providers') + @HttpCode(HttpStatus.CREATED) + @ApiOperation({ summary: 'Configure LLM provider' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async configureProvider( + @Headers('x-tenant-id') tenantId: string, + @Body() dto: LLMProviderConfigDto, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const input: ProviderConfig = dto; + const config = await this.llmProviderService.configureProvider(tenantId, input); + return { success: true, data: config }; + } + + @Get('llm/providers') + @ApiOperation({ summary: 'List configured LLM providers' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async listProviders(@Headers('x-tenant-id') tenantId: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const providers = await this.llmProviderService.getProviders(tenantId); + return { success: true, data: providers }; + } + + @Delete('llm/providers/:providerId') + @HttpCode(HttpStatus.NO_CONTENT) + @ApiOperation({ summary: 'Delete LLM provider' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async deleteProvider( + @Headers('x-tenant-id') tenantId: string, + @Param('providerId') providerId: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + await this.llmProviderService.deleteProvider(tenantId, providerId); + } + + @Post('llm/providers/:providerId/test') + @ApiOperation({ summary: 'Test LLM provider configuration' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async testProvider( + @Headers('x-tenant-id') tenantId: string, + @Param('providerId') providerId: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const result = await this.llmProviderService.testProvider(tenantId, providerId); + return result; + } + + @Put('llm/providers/:providerId/enabled') + @ApiOperation({ summary: 'Enable/disable LLM provider' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async setProviderEnabled( + @Headers('x-tenant-id') tenantId: string, + @Param('providerId') providerId: string, + @Body() dto: SetProviderEnabledDto, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const result = await this.llmProviderService.setProviderEnabled(tenantId, providerId, dto.enabled); + return { success: true, ...result }; + } + + @Get('llm/usage') + @ApiOperation({ summary: 'Get LLM usage statistics' }) + @ApiHeader({ name: 'x-tenant-id', required: true }) + async getLLMUsage(@Headers('x-tenant-id') tenantId: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header required'); + } + + const usage = await this.llmProviderService.getUsageStats(tenantId); + return { success: true, data: usage }; + } + + @Get('llm/available-providers') + @ApiOperation({ summary: 'Get available LLM providers' }) + getAvailableProviders() { + return { success: true, data: this.llmProviderService.getAvailableProviders() }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/events.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/events.controller.ts new file mode 100644 index 000000000..95e878f9e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/events.controller.ts @@ -0,0 +1,64 @@ +import { BadRequestException, Controller, Get, Headers, HttpCode, HttpStatus, Query } from '@nestjs/common'; +import { IsInt, IsOptional, IsString, Max, Min } from 'class-validator'; +import { Type } from 'class-transformer'; +import { OutboxService } from '../services/outbox.service'; + +class ReplayEventsQueryDto { + @IsOptional() + @IsString() + cursor?: string; + + @IsOptional() + @Type(() => Number) + @IsInt() + @Min(1) + @Max(500) + limit?: number; + + @IsOptional() + @IsString() + eventType?: string; +} + +@Controller() +export class EventsController { + constructor(private readonly outboxService: OutboxService) {} + + /** + * GET /api/v1/events?cursor=&limit= + * Cursor-based replay of outbox events for a tenant (monotonic eventSequence). + */ + @Get('events') + @HttpCode(HttpStatus.OK) + async replay( + @Query() query: ReplayEventsQueryDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + let cursor: bigint | undefined; + if (query.cursor !== undefined) { + try { + cursor = BigInt(query.cursor); + if (cursor < 0n) throw new Error('cursor must be >= 0'); + } catch { + throw new BadRequestException('cursor must be an integer string (>= 0)'); + } + } + + const result = await this.outboxService.replayEvents({ + tenantId, + cursor, + limit: query.limit, + eventType: query.eventType, + }); + + return { + success: true, + data: result, + }; + } +} + diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/git-integration.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/git-integration.controller.ts new file mode 100644 index 000000000..43aa75415 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/git-integration.controller.ts @@ -0,0 +1,641 @@ +/** + * Git Integration Controller + * Phase 8 (v5.3.0): External Integrations - GitHub/GitLab webhook management + * + * Endpoints: + * - GET /api/v1/git/integrations List integrations for tenant + * - POST /api/v1/git/integrations Create Git integration + * - GET /api/v1/git/integrations/:id Get integration details + * - PUT /api/v1/git/integrations/:id Update integration + * - DELETE /api/v1/git/integrations/:id Delete integration + * - POST /api/v1/git/integrations/:id/test Test integration + * - POST /api/v1/git/integrations/:id/rotate Rotate webhook secret + * - GET /api/v1/git/integrations/:id/events Get event history + * - POST /api/v1/git/webhooks/:id Receive webhook from GitHub/GitLab + * - GET /api/v1/git/providers Get available providers + * - GET /api/v1/git/events Get available event types + */ + +import { + Controller, + Get, + Post, + Put, + Delete, + Param, + Body, + Query, + HttpException, + HttpStatus, + Logger, + Headers, + RawBodyRequest, + Req, +} from '@nestjs/common'; +import { + ApiTags, + ApiOperation, + ApiResponse, + ApiHeader, + ApiQuery, +} from '@nestjs/swagger'; +import { Request } from 'express'; +import { + GitIntegrationService, + GitProvider, + GitEventType, +} from '../services/git-integration.service'; + +/** + * DTOs for Git integration endpoints + */ +interface CreateIntegrationDto { + provider: GitProvider; + name: string; + owner: string; + repository: string; + branch?: string; + subscribedEvents: string[]; + triggerConfig: { + enabled: boolean; + events: string[]; + branches?: string[]; + paths?: string[]; + goalTemplateId?: string; + goalPattern?: string; + constraints?: Record; + variableMapping?: Record; + }; +} + +interface UpdateIntegrationDto { + name?: string; + branch?: string; + subscribedEvents?: string[]; + triggerConfig?: { + enabled?: boolean; + events?: string[]; + branches?: string[]; + paths?: string[]; + goalTemplateId?: string; + goalPattern?: string; + constraints?: Record; + variableMapping?: Record; + }; + enabled?: boolean; +} + +interface ListIntegrationsQuery { + provider?: GitProvider; + limit?: string; + offset?: string; +} + +@ApiTags('git') +@Controller('git') +export class GitIntegrationController { + private readonly logger = new Logger(GitIntegrationController.name); + + constructor(private readonly gitService: GitIntegrationService) {} + + /** + * GET /api/v1/git/integrations + * List Git integrations for a tenant + */ + @Get('integrations') + @ApiOperation({ summary: 'List Git integrations' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiQuery({ name: 'provider', required: false, enum: GitProvider }) + @ApiQuery({ name: 'limit', required: false }) + @ApiQuery({ name: 'offset', required: false }) + @ApiResponse({ status: 200, description: 'Integrations retrieved successfully' }) + async listIntegrations( + @Query() query: ListIntegrationsQuery, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const limit = parseInt(query.limit || '20', 10); + const offset = parseInt(query.offset || '0', 10); + + const result = await this.gitService.listIntegrations(tenantId, { + limit, + offset, + provider: query.provider, + }); + + return { + success: true, + integrations: result.integrations.map((i) => this.formatIntegration(i)), + pagination: result.pagination, + }; + } + + /** + * POST /api/v1/git/integrations + * Create a new Git integration + */ + @Post('integrations') + @ApiOperation({ summary: 'Create Git integration' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 201, description: 'Integration created successfully' }) + async createIntegration( + @Body() body: CreateIntegrationDto, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Validate required fields + if (!body.provider) { + throw new HttpException('provider is required', HttpStatus.BAD_REQUEST); + } + + if (!Object.values(GitProvider).includes(body.provider)) { + throw new HttpException( + `Invalid provider. Valid providers: ${Object.values(GitProvider).join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + + if (!body.name) { + throw new HttpException('name is required', HttpStatus.BAD_REQUEST); + } + + if (!body.owner) { + throw new HttpException('owner is required', HttpStatus.BAD_REQUEST); + } + + if (!body.repository) { + throw new HttpException('repository is required', HttpStatus.BAD_REQUEST); + } + + if (!body.subscribedEvents || body.subscribedEvents.length === 0) { + throw new HttpException('subscribedEvents array is required', HttpStatus.BAD_REQUEST); + } + + // Generate webhook secret + const crypto = await import('crypto'); + const webhookSecret = crypto.randomBytes(32).toString('hex'); + + try { + const result = await this.gitService.createIntegration({ + tenantId, + provider: body.provider, + name: body.name, + owner: body.owner, + repository: body.repository, + branch: body.branch, + webhookSecret, + subscribedEvents: body.subscribedEvents, + triggerConfig: body.triggerConfig, + }); + + this.logger.log( + `Created ${body.provider} integration ${result.integration.id} for ${body.owner}/${body.repository}`, + ); + + return { + success: true, + integration: this.formatIntegrationWithSecret(result.integration, webhookSecret), + webhookUrl: result.webhookUrl, + message: `Integration created successfully. Configure your ${body.provider} webhook to send events to the webhookUrl. Save the webhookSecret - it will not be shown again.`, + }; + } catch (error: any) { + this.logger.error(`Failed to create Git integration: ${error.message}`); + throw new HttpException( + `Failed to create integration: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/git/integrations/:id + * Get integration details + */ + @Get('integrations/:id') + @ApiOperation({ summary: 'Get Git integration details' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Integration retrieved successfully' }) + async getIntegration( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const integration = await this.gitService.getIntegration(id); + + if (!integration) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + if (integration.tenantId !== tenantId) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + // Cast to any to access included events relation + const integrationWithEvents = integration as any; + + return { + success: true, + integration: this.formatIntegration(integration), + webhookUrl: this.gitService.getWebhookUrl(id), + recentEvents: integrationWithEvents.events?.map((e: any) => ({ + id: e.id, + eventType: e.eventType, + eventAction: e.eventAction, + ref: e.ref, + commitSha: e.commitSha, + prNumber: e.prNumber, + processed: e.processed, + goalRunId: e.goalRunId, + error: e.error, + receivedAt: e.receivedAt, + })), + }; + } + + /** + * PUT /api/v1/git/integrations/:id + * Update Git integration + */ + @Put('integrations/:id') + @ApiOperation({ summary: 'Update Git integration' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Integration updated successfully' }) + async updateIntegration( + @Param('id') id: string, + @Body() body: UpdateIntegrationDto, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Verify integration belongs to tenant + const existing = await this.gitService.getIntegration(id); + + if (!existing || existing.tenantId !== tenantId) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + try { + const integration = await this.gitService.updateIntegration(id, { + name: body.name, + branch: body.branch, + subscribedEvents: body.subscribedEvents, + triggerConfig: body.triggerConfig as any, + enabled: body.enabled, + }); + + this.logger.log(`Updated Git integration ${id}`); + + return { + success: true, + integration: this.formatIntegration(integration), + message: 'Integration updated successfully', + }; + } catch (error: any) { + this.logger.error(`Failed to update integration ${id}: ${error.message}`); + throw new HttpException( + `Failed to update integration: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * DELETE /api/v1/git/integrations/:id + * Delete Git integration + */ + @Delete('integrations/:id') + @ApiOperation({ summary: 'Delete Git integration' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Integration deleted successfully' }) + async deleteIntegration( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Verify integration belongs to tenant + const existing = await this.gitService.getIntegration(id); + + if (!existing || existing.tenantId !== tenantId) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + try { + await this.gitService.deleteIntegration(id); + + this.logger.log(`Deleted Git integration ${id}`); + + return { + success: true, + message: 'Integration deleted successfully. Remember to remove the webhook from your Git provider.', + }; + } catch (error: any) { + this.logger.error(`Failed to delete integration ${id}: ${error.message}`); + throw new HttpException( + `Failed to delete integration: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/git/integrations/:id/test + * Test Git integration + */ + @Post('integrations/:id/test') + @ApiOperation({ summary: 'Test Git integration configuration' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Integration test result' }) + async testIntegration( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Verify integration belongs to tenant + const existing = await this.gitService.getIntegration(id); + + if (!existing || existing.tenantId !== tenantId) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + const result = await this.gitService.testIntegration(id); + + return { + success: result.success, + message: result.message, + }; + } + + /** + * POST /api/v1/git/integrations/:id/rotate + * Rotate webhook secret + */ + @Post('integrations/:id/rotate') + @ApiOperation({ summary: 'Rotate webhook secret' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Secret rotated successfully' }) + async rotateSecret( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Verify integration belongs to tenant + const existing = await this.gitService.getIntegration(id); + + if (!existing || existing.tenantId !== tenantId) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + try { + const result = await this.gitService.rotateWebhookSecret(id); + + this.logger.log(`Rotated webhook secret for integration ${id}`); + + return { + success: true, + webhookSecret: result.secret, + message: 'Webhook secret rotated. Update your Git provider webhook configuration with the new secret.', + }; + } catch (error: any) { + this.logger.error(`Failed to rotate secret for integration ${id}: ${error.message}`); + throw new HttpException( + `Failed to rotate secret: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/git/integrations/:id/events + * Get event history + */ + @Get('integrations/:id/events') + @ApiOperation({ summary: 'Get Git integration event history' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiQuery({ name: 'limit', required: false }) + @ApiQuery({ name: 'offset', required: false }) + @ApiResponse({ status: 200, description: 'Events retrieved successfully' }) + async getEvents( + @Param('id') id: string, + @Query('limit') limit?: string, + @Query('offset') offset?: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Verify integration belongs to tenant + const existing = await this.gitService.getIntegration(id); + + if (!existing || existing.tenantId !== tenantId) { + throw new HttpException('Integration not found', HttpStatus.NOT_FOUND); + } + + const result = await this.gitService.getEventHistory(id, { + limit: parseInt(limit || '50', 10), + offset: parseInt(offset || '0', 10), + }); + + return { + success: true, + events: result.events.map((e: any) => ({ + id: e.id, + eventType: e.eventType, + eventAction: e.eventAction, + ref: e.ref, + commitSha: e.commitSha, + prNumber: e.prNumber, + processed: e.processed, + goalRunId: e.goalRunId, + error: e.error, + createdAt: e.createdAt, + })), + pagination: result.pagination, + }; + } + + /** + * POST /api/v1/git/webhooks/:id + * Receive webhook from GitHub/GitLab + * This endpoint is called by the Git provider when events occur + */ + @Post('webhooks/:id') + @ApiOperation({ summary: 'Receive webhook from Git provider (GitHub/GitLab)' }) + @ApiResponse({ status: 200, description: 'Webhook processed successfully' }) + async receiveWebhook( + @Param('id') id: string, + @Req() req: RawBodyRequest, + @Body() body: Record, + @Headers() headers: Record, + ) { + // Get raw body for signature verification + const rawBody = req.rawBody?.toString() || JSON.stringify(body); + + try { + const result = await this.gitService.processWebhook( + id, + headers, + rawBody, + body, + ); + + if (result.success) { + return { + success: true, + eventId: result.eventId, + goalRunId: result.goalRunId, + message: result.goalRunId + ? 'Webhook processed and goal run triggered' + : 'Webhook processed', + }; + } else { + // Return 200 for invalid signature to not leak info + // but include error in response + return { + success: false, + error: result.error, + }; + } + } catch (error: any) { + this.logger.error(`Webhook processing error: ${error.message}`); + // Always return 200 to Git providers to prevent retries + return { + success: false, + error: 'Internal processing error', + }; + } + } + + /** + * GET /api/v1/git/providers + * Get available Git providers + */ + @Get('providers') + @ApiOperation({ summary: 'Get available Git providers' }) + @ApiResponse({ status: 200, description: 'Providers retrieved' }) + getProviders() { + return { + success: true, + providers: [ + { + provider: GitProvider.GITHUB, + name: 'GitHub', + description: 'GitHub repositories and organizations', + webhookSetup: { + url: 'Settings > Webhooks > Add webhook', + contentType: 'application/json', + signatureHeader: 'X-Hub-Signature-256', + events: [ + 'push', + 'pull_request', + 'pull_request_review', + 'issues', + 'issue_comment', + 'release', + 'workflow_run', + ], + }, + }, + { + provider: GitProvider.GITLAB, + name: 'GitLab', + description: 'GitLab repositories and groups', + webhookSetup: { + url: 'Settings > Webhooks', + contentType: 'application/json', + signatureHeader: 'X-Gitlab-Token', + events: [ + 'push', + 'merge_request', + 'pipeline', + 'tag_push', + 'note', + ], + }, + }, + ], + }; + } + + /** + * GET /api/v1/git/events + * Get available Git event types + */ + @Get('events') + @ApiOperation({ summary: 'Get available Git event types' }) + @ApiResponse({ status: 200, description: 'Event types retrieved' }) + getEventTypes() { + return { + success: true, + events: { + github: [ + { type: 'push', description: 'Push to repository' }, + { type: 'pull_request', description: 'Pull request events', actions: ['opened', 'closed', 'merged', 'synchronize', 'reopened'] }, + { type: 'pull_request_review', description: 'PR review events', actions: ['submitted', 'approved', 'changes_requested'] }, + { type: 'issues', description: 'Issue events', actions: ['opened', 'closed', 'edited'] }, + { type: 'issue_comment', description: 'Issue/PR comments', actions: ['created', 'edited'] }, + { type: 'release', description: 'Release events', actions: ['published', 'created'] }, + { type: 'workflow_run', description: 'GitHub Actions workflow', actions: ['completed', 'requested'] }, + ], + gitlab: [ + { type: 'push', description: 'Push to repository' }, + { type: 'merge_request', description: 'Merge request events', actions: ['open', 'close', 'merge', 'update'] }, + { type: 'pipeline', description: 'CI/CD pipeline events', actions: ['pending', 'running', 'success', 'failed'] }, + { type: 'tag_push', description: 'Tag push events' }, + { type: 'note', description: 'Comment events' }, + ], + }, + }; + } + + /** + * Format integration for response (hide sensitive data) + */ + private formatIntegration(integration: any) { + return { + id: integration.id, + tenantId: integration.tenantId, + provider: integration.provider, + name: integration.name, + owner: integration.owner, + repository: integration.repository, + branch: integration.branch, + webhookId: integration.webhookId, + subscribedEvents: integration.subscribedEvents, + triggerConfig: integration.triggerConfig, + enabled: integration.enabled, + createdAt: integration.createdAt, + updatedAt: integration.updatedAt, + }; + } + + /** + * Format integration with secret (only for create) + */ + private formatIntegrationWithSecret(integration: any, secret: string) { + return { + ...this.formatIntegration(integration), + webhookSecret: secret, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/goal-run.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/goal-run.controller.ts new file mode 100644 index 000000000..32f18fdc1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/goal-run.controller.ts @@ -0,0 +1,533 @@ +/** + * Goal Run Controller + * v1.0.0: REST API for Manus-style goal-first orchestration + * + * Endpoints: + * - POST /api/v1/runs/from-goal - Create goal run + * - GET /api/v1/goal-runs/:id - Get goal run + * - GET /api/v1/goal-runs - List goal runs + * - GET /api/v1/goal-runs/:id/plan - Get current plan + * - GET /api/v1/goal-runs/:id/plan/history - Get plan history + * - POST /api/v1/goal-runs/:id/steering - Send steering command + * - GET /api/v1/goal-runs/:id/activity - Get activity feed + * - POST /api/v1/goal-runs/:id/pause - Pause goal run + * - POST /api/v1/goal-runs/:id/resume - Resume goal run + * - POST /api/v1/goal-runs/:id/cancel - Cancel goal run + */ + +import { + Controller, + Get, + Post, + Body, + Param, + Query, + HttpCode, + HttpStatus, + BadRequestException, + Headers, +} from '@nestjs/common'; +import { Throttle, SkipThrottle } from '@nestjs/throttler'; +import { + IsString, + IsOptional, + IsBoolean, + IsObject, + IsEnum, + MinLength, +} from 'class-validator'; +import { + GoalRunService, + CreateGoalRunInput, + GoalConstraints, + SteeringInput, + GoalRunFilters, + GoalRunStatus, + GoalRunPhase, +} from '../services/goal-run.service'; +import { PlannerService } from '../services/planner.service'; +import { OrchestratorLoopService } from '../services/orchestrator-loop.service'; + +// ============================================================================ +// DTOs with class-validator decorators for ValidationPipe compatibility +// ============================================================================ + +/** + * DTO for creating a new goal run + */ +class CreateGoalRunDto { + @IsString() + @MinLength(10, { message: 'Goal must be at least 10 characters' }) + goal!: string; + + @IsOptional() + @IsObject() + constraints?: GoalConstraints; + + @IsOptional() + @IsBoolean() + autoStart?: boolean; +} + +/** + * Steering message types for goal run control + */ +enum SteeringMessageType { + PAUSE = 'PAUSE', + RESUME = 'RESUME', + CANCEL = 'CANCEL', + MODIFY_PLAN = 'MODIFY_PLAN', + APPROVE = 'APPROVE', + REJECT = 'REJECT', + INSTRUCTION = 'INSTRUCTION', +} + +/** + * DTO for sending steering commands to a goal run + */ +class SteeringMessageDto { + @IsEnum(SteeringMessageType) + type!: SteeringMessageType; + + @IsOptional() + @IsString() + content?: string; + + @IsOptional() + @IsString() + targetItemId?: string; +} + +/** + * DTO for filtering goal runs list + */ +class GoalRunFiltersDto { + @IsOptional() + @IsString() + status?: string; + + @IsOptional() + @IsString() + phase?: string; + + @IsOptional() + @IsString() + page?: string; + + @IsOptional() + @IsString() + pageSize?: string; +} + +/** + * DTO for pagination parameters + */ +class PaginationDto { + @IsOptional() + @IsString() + page?: string; + + @IsOptional() + @IsString() + pageSize?: string; +} + +/** + * DTO for cancelling a goal run + */ +class CancelGoalRunDto { + @IsOptional() + @IsString() + reason?: string; +} + +/** + * DTO for estimating goal complexity + */ +class EstimateComplexityDto { + @IsString() + @MinLength(10, { message: 'Goal must be at least 10 characters' }) + goal!: string; +} + +/** + * DTO for rejecting a step + */ +class RejectStepDto { + @IsString() + reason!: string; +} + +// v5.11.3: Removed deprecated api/v1 backward compatibility prefix (was scheduled for v5.6.0) +@Controller() +export class GoalRunController { + constructor( + private goalRunService: GoalRunService, + private plannerService: PlannerService, + private orchestratorLoopService: OrchestratorLoopService, + ) {} + + /** + * POST /api/v1/runs/from-goal + * Create a new goal-based run + * Rate limited: 5 per minute, 20 per hour per tenant (expensive operation) + */ + @Post('runs/from-goal') + @HttpCode(HttpStatus.CREATED) + @Throttle({ default: { limit: 5, ttl: 60000 } }) // 5 goal runs per minute + async createFromGoal( + @Body() dto: CreateGoalRunDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!dto.goal || dto.goal.trim().length < 10) { + throw new BadRequestException('Goal must be at least 10 characters'); + } + + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const input: CreateGoalRunInput = { + tenantId, + goal: dto.goal.trim(), + constraints: dto.constraints, + autoStart: dto.autoStart, + }; + + const goalRun = await this.goalRunService.createFromGoal(input); + + return { + success: true, + data: goalRun, + }; + } + + /** + * GET /api/v1/goal-runs/:id + * Get goal run by ID + */ + @Get('goal-runs/:id') + async getGoalRun(@Param('id') id: string) { + const goalRun = await this.goalRunService.findByIdWithPlan(id); + + // Add loop status + const loopStatus = await this.orchestratorLoopService.getLoopStatus(id); + + return { + success: true, + data: { + ...goalRun, + loopStatus, + }, + }; + } + + /** + * GET /api/v1/goal-runs + * List goal runs for tenant + */ + @Get('goal-runs') + async listGoalRuns( + @Query() query: GoalRunFiltersDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const filters: GoalRunFilters = { + status: query.status as GoalRunStatus | undefined, + phase: query.phase as GoalRunPhase | undefined, + page: query.page ? parseInt(query.page, 10) : 1, + pageSize: query.pageSize ? parseInt(query.pageSize, 10) : 20, + }; + + const result = await this.goalRunService.findByTenant(tenantId, filters); + + return { + success: true, + ...result, + }; + } + + /** + * GET /api/v1/goal-runs/:id/plan + * Get current plan for goal run + */ + @Get('goal-runs/:id/plan') + async getCurrentPlan(@Param('id') id: string) { + const plan = await this.goalRunService.getCurrentPlan(id); + + return { + success: true, + data: plan, + }; + } + + /** + * GET /api/v1/goal-runs/:id/plan/history + * Get plan version history + */ + @Get('goal-runs/:id/plan/history') + async getPlanHistory(@Param('id') id: string) { + const history = await this.goalRunService.getPlanHistory(id); + + return { + success: true, + data: history, + }; + } + + /** + * GET /api/v1/goal-runs/:id/plan/diff + * Get diff between two plan versions + */ + @Get('goal-runs/:id/plan/diff') + async getPlanDiff( + @Param('id') id: string, + @Query('from') fromVersion: string, + @Query('to') toVersion: string, + ) { + const history = await this.goalRunService.getPlanHistory(id); + + const fromPlan = history.find((p) => p.version === parseInt(fromVersion, 10)); + const toPlan = history.find((p) => p.version === parseInt(toVersion, 10)); + + if (!fromPlan || !toPlan) { + throw new BadRequestException('Invalid version numbers'); + } + + // Simple diff - compare items + const addedItems = toPlan.items.filter( + (toItem) => !fromPlan.items.some((fromItem) => fromItem.description === toItem.description), + ); + const removedItems = fromPlan.items.filter( + (fromItem) => !toPlan.items.some((toItem) => toItem.description === fromItem.description), + ); + const unchangedItems = toPlan.items.filter((toItem) => + fromPlan.items.some((fromItem) => fromItem.description === toItem.description), + ); + + return { + success: true, + data: { + fromVersion: parseInt(fromVersion, 10), + toVersion: parseInt(toVersion, 10), + replanReason: toPlan.replanReason, + changes: { + added: addedItems, + removed: removedItems, + unchanged: unchangedItems, + }, + }, + }; + } + + /** + * POST /api/v1/goal-runs/:id/steering + * Send steering command + */ + @Post('goal-runs/:id/steering') + @HttpCode(HttpStatus.OK) + async sendSteering( + @Param('id') id: string, + @Body() dto: SteeringMessageDto, + @Headers('x-user-id') userId?: string, + @Headers('x-user-email') userEmail?: string, + ) { + const input: SteeringInput = { + type: dto.type, + content: dto.content, + targetItemId: dto.targetItemId, + userId, + userEmail, + }; + + await this.goalRunService.submitSteering(id, input); + + return { + success: true, + message: `Steering command ${dto.type} sent`, + }; + } + + /** + * GET /api/v1/goal-runs/:id/activity + * Get activity feed + */ + @Get('goal-runs/:id/activity') + async getActivityFeed(@Param('id') id: string, @Query() query: PaginationDto) { + const options = { + page: query.page ? parseInt(query.page, 10) : 1, + pageSize: query.pageSize ? parseInt(query.pageSize, 10) : 50, + }; + + const result = await this.goalRunService.getActivityFeed(id, options); + + return { + success: true, + ...result, + }; + } + + /** + * POST /api/v1/goal-runs/:id/pause + * Pause goal run + */ + @Post('goal-runs/:id/pause') + @HttpCode(HttpStatus.OK) + async pauseGoalRun(@Param('id') id: string) { + const goalRun = await this.goalRunService.pauseGoalRun(id); + + return { + success: true, + data: goalRun, + }; + } + + /** + * POST /api/v1/goal-runs/:id/resume + * Resume goal run + */ + @Post('goal-runs/:id/resume') + @HttpCode(HttpStatus.OK) + async resumeGoalRun(@Param('id') id: string) { + const goalRun = await this.goalRunService.resumeGoalRun(id); + + return { + success: true, + data: goalRun, + }; + } + + /** + * POST /api/v1/goal-runs/:id/cancel + * Cancel goal run + */ + @Post('goal-runs/:id/cancel') + @HttpCode(HttpStatus.OK) + async cancelGoalRun(@Param('id') id: string, @Body() dto: CancelGoalRunDto) { + const goalRun = await this.goalRunService.cancelGoalRun(id, dto?.reason); + + return { + success: true, + data: goalRun, + }; + } + + /** + * GET /api/v1/goal-runs/:id/metrics + * Get goal run metrics + */ + @Get('goal-runs/:id/metrics') + async getMetrics(@Param('id') id: string) { + const metrics = await this.goalRunService.getMetrics(id); + + return { + success: true, + data: metrics, + }; + } + + /** + * POST /api/v1/goals/estimate-complexity + * Estimate complexity of a goal (preview before creating) + * Rate limited: 10 per minute (AI operation) + */ + @Post('goals/estimate-complexity') + @HttpCode(HttpStatus.OK) + @Throttle({ default: { limit: 10, ttl: 60000 } }) // 10 estimates per minute + async estimateComplexity(@Body() dto: EstimateComplexityDto) { + // DTO validation handles minimum length check + const estimate = await this.plannerService.estimateComplexity(dto.goal); + + return { + success: true, + data: estimate, + }; + } + + // ========================================== + // Phase 4: Live Desktop Control APIs + // ========================================== + + /** + * POST /api/v1/goal-runs/:id/intervene + * User takes control from agent + */ + @Post('goal-runs/:id/intervene') + @HttpCode(HttpStatus.OK) + async intervene( + @Param('id') id: string, + @Headers('x-user-id') userId?: string, + ) { + const goalRun = await this.goalRunService.intervene(id, userId); + + return { + success: true, + data: goalRun, + message: 'Control transferred to user', + }; + } + + /** + * POST /api/v1/goal-runs/:id/return-control + * User returns control to agent + */ + @Post('goal-runs/:id/return-control') + @HttpCode(HttpStatus.OK) + async returnControl( + @Param('id') id: string, + @Headers('x-user-id') userId?: string, + ) { + const goalRun = await this.goalRunService.returnControl(id, userId); + + return { + success: true, + data: goalRun, + message: 'Control returned to agent', + }; + } + + /** + * POST /api/v1/goal-runs/:id/steps/:stepId/approve + * Approve a step + */ + @Post('goal-runs/:id/steps/:stepId/approve') + @HttpCode(HttpStatus.OK) + async approveStep( + @Param('id') id: string, + @Param('stepId') stepId: string, + @Headers('x-user-id') userId?: string, + ) { + const step = await this.goalRunService.approveStep(id, stepId, userId); + + return { + success: true, + data: step, + message: 'Step approved', + }; + } + + /** + * POST /api/v1/goal-runs/:id/steps/:stepId/reject + * Reject a step with reason + */ + @Post('goal-runs/:id/steps/:stepId/reject') + @HttpCode(HttpStatus.OK) + async rejectStep( + @Param('id') id: string, + @Param('stepId') stepId: string, + @Body() dto: RejectStepDto, + @Headers('x-user-id') userId?: string, + ) { + // DTO validation handles required check via @IsString() + const step = await this.goalRunService.rejectStep(id, stepId, dto.reason, userId); + + return { + success: true, + data: step, + message: 'Step rejected', + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/health.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/health.controller.ts new file mode 100644 index 000000000..6b5356f02 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/health.controller.ts @@ -0,0 +1,186 @@ +/** + * Health Controller + * v2.1.0: Phase E maintenance mode status endpoints + * v2.0.0: Phase 7 Multi-Agent Orchestration + * + * Kubernetes health checks, readiness probes, leader status, + * maintenance mode status, and multi-agent orchestration health. + */ + +import { Controller, Get, Post, Body, HttpCode, HttpStatus } from '@nestjs/common'; +import { + HealthCheck, + HealthCheckService, + PrismaHealthIndicator, +} from '@nestjs/terminus'; +import { SkipThrottle } from '@nestjs/throttler'; +import { PrismaService } from '../services/prisma.service'; +import { LeaderElectionService } from '../services/leader-election.service'; +import { SchedulerService } from '../services/scheduler.service'; +import { AgentRegistryService } from '../services/agent-registry.service'; +import { AgentHealthService } from '../services/agent-health.service'; +import { MaintenanceModeService, MaintenanceState, EnterMaintenanceOptions } from '../services/maintenance-mode.service'; + +@Controller('health') +@SkipThrottle() // Health checks should not be rate limited +export class HealthController { + constructor( + private health: HealthCheckService, + private prismaHealth: PrismaHealthIndicator, + private prisma: PrismaService, + private leaderElection: LeaderElectionService, + private scheduler: SchedulerService, + private agentRegistry: AgentRegistryService, + private agentHealth: AgentHealthService, + private maintenanceMode: MaintenanceModeService, + ) {} + + @Get() + @HealthCheck() + check() { + return this.health.check([ + () => this.prismaHealth.pingCheck('database', this.prisma), + ]); + } + + @Get('live') + liveness() { + return { status: 'ok' }; + } + + @Get('ready') + @HealthCheck() + readiness() { + // v2.1.0 Phase E: Include maintenance mode in readiness + const maintenanceStatus = this.maintenanceMode.getStatus(); + + return this.health.check([ + () => this.prismaHealth.pingCheck('database', this.prisma), + // Maintenance mode check - mark as not ready if in maintenance + async () => { + if (maintenanceStatus.state === MaintenanceState.MAINTENANCE) { + return { + maintenance: { + status: 'down', + message: 'System in maintenance mode', + reason: maintenanceStatus.reason, + expectedEndAt: maintenanceStatus.expectedEndAt?.toISOString(), + }, + }; + } + return { + maintenance: { + status: 'up', + state: maintenanceStatus.state, + acceptingNewWork: maintenanceStatus.acceptingNewWork, + }, + }; + }, + ]); + } + + /** + * Get leader election status for monitoring + */ + @Get('leader') + leaderStatus() { + const status = this.leaderElection.getLeadershipStatus(); + return { + success: true, + ...status, + timestamp: new Date().toISOString(), + }; + } + + /** + * Get scheduler status for monitoring + */ + @Get('scheduler') + schedulerStatus() { + const status = this.scheduler.getSchedulerStatus(); + return { + success: true, + ...status, + timestamp: new Date().toISOString(), + }; + } + + /** + * Get multi-agent orchestration status + * Phase 7: Multi-Agent Orchestration + */ + @Get('agents') + async agentsStatus() { + const registryStats = await this.agentRegistry.getStats(); + const healthSummary = await this.agentHealth.getOverallHealthSummary(); + + return { + success: true, + registry: registryStats, + health: { + avgSuccessRate: healthSummary.avgSuccessRate, + avgLatencyMs: healthSummary.avgLatencyMs, + }, + timestamp: new Date().toISOString(), + }; + } + + /** + * Get maintenance mode status + * Phase E: Maintenance mode handling + */ + @Get('maintenance') + maintenanceStatus() { + const status = this.maintenanceMode.getStatus(); + return { + success: true, + ...status, + timestamp: new Date().toISOString(), + }; + } + + /** + * Enter maintenance mode + * Phase E: Maintenance mode handling + * + * Starts graceful drain process: + * 1. Stop accepting new work + * 2. Wait for in-progress work to complete (up to drainTimeoutMs) + * 3. Pause remaining goal runs + * 4. Enter maintenance state + */ + @Post('maintenance/enter') + @HttpCode(HttpStatus.OK) + async enterMaintenance( + @Body() options: EnterMaintenanceOptions, + ) { + const status = await this.maintenanceMode.enterMaintenance(options); + return { + success: true, + message: 'Maintenance mode initiated', + ...status, + timestamp: new Date().toISOString(), + }; + } + + /** + * Exit maintenance mode + * Phase E: Maintenance mode handling + * + * Resumes normal operation: + * 1. Resume goal runs that were paused by maintenance + * 2. Accept new work + * 3. Return to running state + */ + @Post('maintenance/exit') + @HttpCode(HttpStatus.OK) + async exitMaintenance() { + const status = await this.maintenanceMode.exitMaintenance(); + return { + success: true, + message: 'Maintenance mode ended', + ...status, + timestamp: new Date().toISOString(), + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/internal.controller.contracts.spec.ts b/packages/bytebot-workflow-orchestrator/src/controllers/internal.controller.contracts.spec.ts new file mode 100644 index 000000000..202ed28a3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/internal.controller.contracts.spec.ts @@ -0,0 +1,256 @@ +import { InternalController } from './internal.controller'; +import { ExecutionSurface, StepType } from '@prisma/client'; + +describe('InternalController planner/dispatch contract gates', () => { + const makeController = (overrides?: Partial<{ + ensureGoalSpecReadyForPlanning: jest.Mock; + requestGoalIntakeFromPlannerError: jest.Mock; + prismaGoalRunFindUnique: jest.Mock; + dispatchTask: jest.Mock; + getTaskStatus: jest.Mock; + }>) => { + const taskDispatchService = { + dispatchTask: overrides?.dispatchTask ?? jest.fn(), + getTaskStatus: overrides?.getTaskStatus ?? jest.fn(), + } as any; + + const prismaService = { + goalRun: { + findUnique: overrides?.prismaGoalRunFindUnique ?? jest.fn(), + }, + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + + const goalIntakeService = { + ensureGoalSpecReadyForPlanning: + overrides?.ensureGoalSpecReadyForPlanning ?? + jest.fn().mockResolvedValue({ ready: true }), + requestGoalIntakeFromPlannerError: + overrides?.requestGoalIntakeFromPlannerError ?? + jest.fn().mockResolvedValue({ goalSpecId: 'gs_test', promptId: 'up_test' }), + } as any; + + return new InternalController(taskDispatchService, prismaService, configService, goalIntakeService); + }; + + describe('POST /api/v1/internal/plan', () => { + it('returns GOAL_INTAKE_REQUIRED when planner outputs suggestedTools=["CHAT"] (interaction alias)', async () => { + const goalIntake = jest.fn().mockResolvedValue({ goalSpecId: 'gs1', promptId: 'p1' }); + const controller = makeController({ requestGoalIntakeFromPlannerError: goalIntake }); + + jest.spyOn(controller as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + planSummary: 'test', + steps: [ + { + stepNumber: 1, + description: 'Ask the user to confirm the target URL', + type: StepType.EXECUTE, // misclassified + suggestedTools: ['CHAT'], + requiresDesktop: false, + expectedOutcome: 'URL confirmed', + isHighRisk: false, + dependencies: [], + }, + ], + confidence: 0.9, + }), + ); + + const result = await controller.generatePlan( + { + goalRunId: 'gr1', + tenantId: 't1', + goalDescription: 'Test goal', + constraints: { maxSteps: 3 }, + } as any, + 'true', + ); + + expect(result).toEqual({ + kind: 'GOAL_INTAKE_REQUIRED', + goalSpecId: 'gs1', + promptId: 'p1', + reason: 'ASK_USER_TOOL', + }); + expect(goalIntake).toHaveBeenCalledTimes(1); + }); + + it('retries on unknown suggestedTools tokens and succeeds when a later attempt is valid', async () => { + const controller = makeController(); + const llm = jest.spyOn(controller as any, 'callLLM'); + + llm + .mockResolvedValueOnce( + JSON.stringify({ + planSummary: 'bad tools', + steps: [ + { + stepNumber: 1, + description: 'Do the thing', + type: StepType.EXECUTE, + suggestedTools: ['totally_not_a_real_tool'], + requiresDesktop: true, + expectedOutcome: 'Done', + isHighRisk: false, + dependencies: [], + }, + ], + confidence: 0.9, + }), + ) + .mockResolvedValueOnce( + JSON.stringify({ + planSummary: 'good tools', + steps: [ + { + stepNumber: 1, + description: 'Open the target site in the browser', + type: StepType.EXECUTE, + suggestedTools: [], + requiresDesktop: true, + expectedOutcome: 'Site is open', + isHighRisk: false, + dependencies: [], + }, + ], + confidence: 0.9, + }), + ); + + const result = await controller.generatePlan( + { + goalRunId: 'gr1', + tenantId: 't1', + goalDescription: 'Test goal', + constraints: { maxSteps: 3 }, + } as any, + 'true', + ); + + expect(result.kind).toBe('PLAN'); + expect(llm).toHaveBeenCalledTimes(2); + }); + }); + + describe('POST /api/v1/internal/dispatch-step', () => { + it('defaults to DESKTOP when workspaceId is present and surface fields are omitted', async () => { + const dispatchTask = jest.fn().mockResolvedValue({ success: true, taskId: 'task1' }); + const prismaGoalRunFindUnique = jest.fn().mockResolvedValue({ goal: 'Do the thing' }); + const controller = makeController({ dispatchTask, prismaGoalRunFindUnique }); + + const result = await controller.dispatchStep( + { + goalRunId: 'gr1', + tenantId: 't1', + workspaceId: 'ws1', + idempotencyKey: 'gr1-step-1', + step: { + stepNumber: 1, + description: 'Open the website', + }, + } as any, + 'true', + ); + + expect(result).toEqual({ success: true, taskId: 'task1', status: 'DISPATCHED' }); + expect(dispatchTask).toHaveBeenCalledWith( + expect.objectContaining({ + requiresDesktop: true, + executionSurface: ExecutionSurface.DESKTOP, + allowedTools: [], + }), + ); + }); + + it('rejects dispatch-step when suggestedTools contains unknown tokens (fail-closed)', async () => { + const dispatchTask = jest.fn(); + const prismaGoalRunFindUnique = jest.fn().mockResolvedValue({ goal: 'Do the thing' }); + const controller = makeController({ dispatchTask, prismaGoalRunFindUnique }); + + const result = await controller.dispatchStep( + { + goalRunId: 'gr1', + tenantId: 't1', + workspaceId: 'ws1', + idempotencyKey: 'gr1-step-1', + step: { + stepNumber: 1, + description: 'Open the website', + suggestedTools: ['not_a_real_tool_token'], + }, + } as any, + 'true', + ); + + expect(result.success).toBe(false); + expect(result.error).toMatch(/unknown suggestedTools token/i); + expect(dispatchTask).not.toHaveBeenCalled(); + }); + }); + + describe('GET /api/v1/internal/task-status/:taskId', () => { + it('treats AGENT_REQUESTED_HELP as FAILED (strategy is not external input)', async () => { + const getTaskStatus = jest.fn().mockResolvedValue({ + status: 'NEEDS_HELP', + result: { errorCode: 'AGENT_REQUESTED_HELP', message: 'Which flight site should I use?' }, + error: null, + }); + + const controller = makeController({ getTaskStatus }); + + const result = await controller.getTaskStatus('task1', 'true', 't1'); + + expect(result.status).toBe('FAILED'); + expect(result.error).toMatch(/AGENT_REQUESTED_HELP/i); + }); + + it('returns WAITING_USER_INPUT only for allowlisted NEEDS_HELP codes', async () => { + const getTaskStatus = jest.fn().mockResolvedValue({ + status: 'NEEDS_HELP', + result: { errorCode: 'UI_BLOCKED_SIGNIN', message: 'Sign-in required' }, + error: null, + }); + + const controller = makeController({ getTaskStatus }); + + const result = await controller.getTaskStatus('task1', 'true', 't1'); + + expect(result.status).toBe('WAITING_USER_INPUT'); + expect(result.output?.summary).toBe('Sign-in required'); + }); + + it('returns WAITING_USER_INPUT for DESKTOP_TAKEOVER_REQUIRED', async () => { + const getTaskStatus = jest.fn().mockResolvedValue({ + status: 'NEEDS_HELP', + result: { errorCode: 'DESKTOP_TAKEOVER_REQUIRED', message: 'Popup requires takeover' }, + error: null, + }); + + const controller = makeController({ getTaskStatus }); + + const result = await controller.getTaskStatus('task1', 'true', 't1'); + + expect(result.status).toBe('WAITING_USER_INPUT'); + expect(result.output?.summary).toBe('Popup requires takeover'); + }); + + it('returns WAITING_PROVIDER for provider-eligible NEEDS_HELP codes', async () => { + const getTaskStatus = jest.fn().mockResolvedValue({ + status: 'NEEDS_HELP', + result: { errorCode: 'LLM_PROXY_DOWN', message: 'LiteLLM proxy is unreachable' }, + error: null, + }); + + const controller = makeController({ getTaskStatus }); + + const result = await controller.getTaskStatus('task1', 'true', 't1'); + + expect(result.status).toBe('WAITING_PROVIDER'); + expect(result.output?.summary).toBe('LiteLLM proxy is unreachable'); + }); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/internal.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/internal.controller.ts new file mode 100644 index 000000000..c19ee5708 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/internal.controller.ts @@ -0,0 +1,1145 @@ +/** + * Internal Controller - Phase 14.3 + * + * Provides internal API endpoints for service-to-service communication. + * These endpoints are intended for use by Temporal workers and other internal services. + * + * Security: Protected by X-Internal-Request header validation. + * + * Endpoints: + * - POST /api/v1/internal/plan - Generate a step-by-step plan for a goal (Phase 14.2) + * - POST /api/v1/internal/dispatch-step - Dispatch a step for execution via TaskDispatchService + * - POST /api/v1/internal/verify - Verify step execution result + * - GET /api/v1/internal/task-status/:taskId - Get task status + * + * Phase 14.2: Added /plan endpoint to enable proper task-level planning + * - Fixes overly granular planning issue (mouse-level vs task-level steps) + * - Uses industry-standard planning prompt with proper granularity guidance + * - Follows Manus AI, ReAct, and Anthropic computer-use best practices + * + * Phase 14.3: Fixed LLM integration to use LiteLLM proxy + * - Uses LLM_API_URL, LLM_API_KEY, LLM_MODEL env vars (same pattern as planner.service.ts) + * - Routes to gpt-oss-120b via LiteLLM proxy in AIML cluster + * - Removed broken LLMProviderService dependency + * + * @see /docs/TEMPORAL_INTEGRATION.md + */ + +import { + Controller, + Post, + Get, + Body, + Param, + Headers, + HttpCode, + HttpStatus, + HttpException, + Logger, +} from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { SkipThrottle } from '@nestjs/throttler'; +import { IsString, IsOptional, IsBoolean, IsArray, IsNumber, ValidateNested, IsEnum } from 'class-validator'; +import { Type } from 'class-transformer'; +import { TaskDispatchService } from '../services/task-dispatch.service'; +import { PrismaService } from '../services/prisma.service'; +import { GoalIntakeService } from '../services/goal-intake.service'; +import { detectPlannerFirstStepUserInputReason, PlannerFirstStepUserInputError } from '../services/planner.errors'; +import { ExecutionSurface, StepType } from '@prisma/client'; +import { hasDesktopExecutionTool, normalizeSuggestedToolsOrThrow, PlannerOutputContractViolationError } from '../contracts/planner-tools'; +import { inferGoalFeasibility } from '../contracts/goal-feasibility'; + +// ============================================================================ +// DTOs +// ============================================================================ + +/** + * Context for step execution + */ +class StepContextDto { + @IsOptional() + @IsString() + previousStepOutcome?: string; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + accumulatedKnowledge?: string[]; +} + +/** + * Step definition for dispatch + */ +class StepDto { + @IsNumber() + stepNumber!: number; + + @IsString() + description!: string; + + @IsOptional() + @IsString() + expectedOutcome?: string; + + @IsOptional() + @IsBoolean() + isHighRisk?: boolean; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + dependencies?: string[]; + + @IsOptional() + @IsArray() + @IsString({ each: true }) + suggestedTools?: string[]; + + @IsOptional() + @IsBoolean() + requiresDesktop?: boolean; + + // PR5: Explicit execution surface (TEXT_ONLY vs DESKTOP) + @IsOptional() + @IsEnum(ExecutionSurface) + executionSurface?: ExecutionSurface; +} + +/** + * Request body for step dispatch + */ +class DispatchStepDto { + @IsString() + goalRunId!: string; + + @IsString() + tenantId!: string; + + @ValidateNested() + @Type(() => StepDto) + step!: StepDto; + + @IsOptional() + @IsString() + workspaceId?: string; + + @IsOptional() + @ValidateNested() + @Type(() => StepContextDto) + context?: StepContextDto; + + @IsOptional() + @IsString() + idempotencyKey?: string; +} + +/** + * Response from step dispatch + */ +interface DispatchStepResponse { + success: boolean; + taskId?: string; + status?: 'PENDING' | 'DISPATCHED'; + error?: string; +} + +/** + * Request body for step verification + */ +class VerifyStepDto { + @IsString() + goalRunId!: string; + + @IsString() + tenantId!: string; + + @ValidateNested() + @Type(() => StepDto) + step!: StepDto; + + @IsString() + expectedOutcome!: string; + + @IsOptional() + @IsString() + actualOutcome?: string; + + @IsBoolean() + success!: boolean; + + @IsOptional() + @IsString() + error?: string; +} + +/** + * Response from step verification + */ +interface VerifyStepResponse { + verified: boolean; + verificationDetails: string; + suggestReplan: boolean; + replanReason?: string; +} + +/** + * Phase 14.2: Request body for plan generation + * Called by Temporal workers to generate step-by-step plans + */ +class PlanRequestDto { + @IsString() + goalRunId!: string; + + @IsString() + tenantId!: string; + + @IsString() + goalDescription!: string; + + @IsOptional() + @IsString() + context?: string; + + @IsOptional() + constraints?: { + maxSteps?: number; + allowedTools?: string[]; + workspaceMode?: string; + riskPolicy?: { + requireApproval?: string[]; + }; + }; + + @IsOptional() + @IsString() + preferredModel?: string; +} + +/** + * Phase 14.2: Response from plan generation + */ +interface PlanResponse { + kind: 'PLAN'; + steps: Array<{ + stepNumber: number; + description: string; + expectedOutcome?: string; + isHighRisk?: boolean; + dependencies?: number[]; + estimatedDurationMs?: number; + }>; + planSummary: string; + estimatedDurationMs?: number; + confidence?: number; +} + +interface GoalIntakeRequiredResponse { + kind: 'GOAL_INTAKE_REQUIRED'; + goalSpecId: string; + promptId: string; + reason: string; +} + +type InternalPlanResponse = PlanResponse | GoalIntakeRequiredResponse; + +// ============================================================================ +// Controller +// ============================================================================ + +@Controller('internal') +@SkipThrottle() // Internal endpoints don't need rate limiting +export class InternalController { + private readonly logger = new Logger(InternalController.name); + + // Phase 14.3: Direct LiteLLM proxy configuration (same pattern as planner.service.ts) + private readonly llmApiUrl: string; + private readonly llmApiKey: string; + private readonly llmModel: string; + + constructor( + private readonly taskDispatchService: TaskDispatchService, + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly goalIntakeService: GoalIntakeService, + ) { + // Read LLM config from environment (set in K8s deployment) + // LLM_API_URL → http://litellm.llm.svc.cluster.local:4000/v1/messages + // LLM_API_KEY → sk-butler-vantage (LiteLLM proxy key) + // LLM_MODEL → openai/gpt-oss-120b + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + + this.logger.log({ + message: 'InternalController initialized with LLM config', + llmApiUrl: this.llmApiUrl, + llmModel: this.llmModel, + hasApiKey: !!this.llmApiKey, + }); + } + + /** + * Validate internal request header + */ + private validateInternalRequest(internalHeader?: string): void { + if (internalHeader !== 'true') { + throw new HttpException('Forbidden: Internal access only', HttpStatus.FORBIDDEN); + } + } + + /** + * Dispatch a step for execution + * + * This endpoint is called by Temporal workers to dispatch steps to ByteBot agents. + * It uses the orchestrator's TaskDispatchService which manages: + * - Idempotent task creation + * - Task routing to bytebot-agent:9991 + * - Status tracking and polling + * + * @param body Step dispatch request + * @param internalHeader X-Internal-Request header for authentication + * @returns Dispatch result with taskId + */ + @Post('dispatch-step') + @HttpCode(HttpStatus.OK) + async dispatchStep( + @Body() body: DispatchStepDto, + @Headers('x-internal-request') internalHeader?: string, + ): Promise { + this.validateInternalRequest(internalHeader); + + this.logger.log({ + message: 'Dispatching step via internal API', + goalRunId: body.goalRunId, + stepNumber: body.step.stepNumber, + tenantId: body.tenantId, + }); + + try { + const normalizedSuggestedTools = normalizeSuggestedToolsOrThrow({ + suggestedTools: body.step.suggestedTools, + allowedTools: null, + }); + + // Goal context (used for deterministic feasibility gates) + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: body.goalRunId }, + select: { goal: true }, + }); + + const goalFeasibility = inferGoalFeasibility(goalRun?.goal); + const mustUseDesktopSurface = goalFeasibility?.requiredSurface === ExecutionSurface.DESKTOP; + + // Resolve execution surface deterministically (structured signals only; no NL heuristics). + // Temporal worker v1 StepDto may omit surface fields; use workspace presence as the safest default. + const inferredRequiresDesktop = + Boolean(body.step.requiresDesktop) || + hasDesktopExecutionTool(normalizedSuggestedTools) || + !!body.workspaceId || + mustUseDesktopSurface; + + let resolvedExecutionSurface = + body.step.executionSurface ?? + (inferredRequiresDesktop ? ExecutionSurface.DESKTOP : ExecutionSurface.TEXT_ONLY); + + // Feasibility gate: do not allow TEXT_ONLY dispatch when desktop execution is required. + if (resolvedExecutionSurface === ExecutionSurface.TEXT_ONLY && inferredRequiresDesktop) { + this.logger.warn({ + message: 'Feasibility auto-upgrade: step requires DESKTOP but was TEXT_ONLY', + goalRunId: body.goalRunId, + stepNumber: body.step.stepNumber, + workspaceId: body.workspaceId ?? null, + suggestedTools: normalizedSuggestedTools, + }); + resolvedExecutionSurface = ExecutionSurface.DESKTOP; + } + + const resolvedRequiresDesktop = resolvedExecutionSurface === ExecutionSurface.DESKTOP; + + const allowedToolsForDispatch = + mustUseDesktopSurface && !hasDesktopExecutionTool(normalizedSuggestedTools) + ? [...normalizedSuggestedTools, 'browser'] + : normalizedSuggestedTools; + + // Build previous step results from context + const previousStepResults = body.context?.accumulatedKnowledge?.length + ? body.context.accumulatedKnowledge.join('\n') + : body.context?.previousStepOutcome; + + // Dispatch via TaskDispatchService + const result = await this.taskDispatchService.dispatchTask({ + goalRunId: body.goalRunId, + // Use idempotencyKey as checklistItemId for tracking + // Format: goalRunId-step-stepNumber for Temporal compatibility + checklistItemId: body.idempotencyKey || `${body.goalRunId}-step-${body.step.stepNumber}`, + workspaceId: body.workspaceId, + title: body.step.description.slice(0, 100), + description: body.step.description, + expectedOutcome: body.step.expectedOutcome, + allowedTools: allowedToolsForDispatch, + requiresDesktop: resolvedRequiresDesktop, + executionSurface: resolvedExecutionSurface, + // Context for autonomous operation + goalContext: goalRun?.goal, + previousStepResults, + // Track retry attempts + attempt: 1, + }); + + if (result.success && result.taskId) { + this.logger.log({ + message: 'Step dispatched successfully', + goalRunId: body.goalRunId, + stepNumber: body.step.stepNumber, + taskId: result.taskId, + }); + + return { + success: true, + taskId: result.taskId, + status: 'DISPATCHED', + }; + } + + return { + success: false, + error: result.error || 'Failed to dispatch step', + }; + } catch (error: any) { + this.logger.error({ + message: 'Failed to dispatch step', + goalRunId: body.goalRunId, + stepNumber: body.step.stepNumber, + error: error.message, + }); + + return { + success: false, + error: error.message || 'Internal error during dispatch', + }; + } + } + + /** + * Get task status by ID + * + * Called by Temporal workers to poll for task completion. + * + * @param taskId Task ID to check + * @param internalHeader X-Internal-Request header for authentication + * @returns Task status information + */ + @Get('task-status/:taskId') + @HttpCode(HttpStatus.OK) + async getTaskStatus( + @Param('taskId') taskId: string, + @Headers('x-internal-request') internalHeader?: string, + @Headers('x-tenant-id') tenantId?: string, + ): Promise<{ + status: 'PENDING' | 'RUNNING' | 'COMPLETED' | 'FAILED' | 'WAITING_USER_INPUT' | 'WAITING_PROVIDER'; + output?: { + summary?: string; + result?: string; + artifacts?: string[]; + }; + error?: string; + }> { + this.validateInternalRequest(internalHeader); + + try { + // Query agent API for task status + const task = await this.taskDispatchService.getTaskStatus(taskId); + + if (!task) { + throw new HttpException('Task not found', HttpStatus.NOT_FOUND); + } + + const normalizeErrorCode = (value: unknown): string => (typeof value === 'string' ? value.trim() : ''); + const normalizeMessage = (value: unknown): string => (typeof value === 'string' ? value.trim() : ''); + + const extractNeedsHelp = (): { errorCode: string; message: string } => { + const result = task.result && typeof task.result === 'object' ? (task.result as any) : null; + return { + errorCode: + normalizeErrorCode(result?.errorCode) || + normalizeErrorCode(result?.code) || + '', + message: + normalizeMessage(result?.message) || + normalizeMessage(result?.question) || + normalizeMessage(result?.description) || + '', + }; + }; + + const promptEligibleNeedsHelpCodes = new Set([ + // External input / takeover only. Strategy and repair MUST NOT land here. + 'DESKTOP_TAKEOVER_REQUIRED', + 'DISPATCHED_USER_PROMPT_STEP', + 'UI_BLOCKED_SIGNIN', + 'UI_BLOCKED_POPUP', + ]); + + const providerWaitEligibleNeedsHelpCodes = new Set([ + // Provider/model/gateway recovery. This must be a stable wait state, not replan churn. + 'WAITING_PROVIDER', + 'LLM_PROXY_DOWN', + 'MODEL_UNAVAILABLE', + 'LLM_EMPTY_RESPONSE', + ]); + + const isNeedsHelp = task.status === 'NEEDS_HELP' || task.status === 'NEEDS_REVIEW'; + if (isNeedsHelp) { + const { errorCode, message } = extractNeedsHelp(); + + // Contract: untyped needs_help must never create WAITING_USER_INPUT. + if (!errorCode) { + return { + status: 'FAILED', + error: 'NEEDS_HELP missing errorCode (contract violation)', + }; + } + + // Contract: strategy-as-help must never strand a run waiting for a user. + if (errorCode === 'AGENT_REQUESTED_HELP') { + return { + status: 'FAILED', + error: `NEEDS_HELP(${errorCode}) is strategy-only and is not external input`, + }; + } + + if (promptEligibleNeedsHelpCodes.has(errorCode)) { + return { + status: 'WAITING_USER_INPUT', + output: task.result + ? { + summary: message || 'Waiting for user input', + result: JSON.stringify(task.result), + artifacts: (task.result as any)?.artifacts, + } + : undefined, + }; + } + + if (providerWaitEligibleNeedsHelpCodes.has(errorCode)) { + return { + status: 'WAITING_PROVIDER', + output: task.result + ? { + summary: message || 'Waiting for provider/model capacity', + result: JSON.stringify(task.result), + artifacts: (task.result as any)?.artifacts, + } + : undefined, + }; + } + + // Default: treat non-allowlisted NEEDS_HELP as an internal failure signal so the workflow can retry/replan. + return { + status: 'FAILED', + error: `NEEDS_HELP(${errorCode}) routed to internal repair: ${message || 'no message'}`, + }; + } + + // Map agent task status to our format + const statusMap: Record = { + 'PENDING': 'PENDING', + 'RUNNING': 'RUNNING', + 'COMPLETED': 'COMPLETED', + 'FAILED': 'FAILED', + 'CANCELLED': 'FAILED', + }; + + return { + status: statusMap[task.status] || 'RUNNING', + output: task.result ? { + summary: task.result.summary, + result: JSON.stringify(task.result), + artifacts: task.result.artifacts, + } : undefined, + error: task.error, + }; + } catch (error: any) { + if (error instanceof HttpException) throw error; + + this.logger.error({ + message: 'Failed to get task status', + taskId, + error: error.message, + }); + + throw new HttpException( + error.message || 'Failed to get task status', + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * Verify step execution result + * + * Uses LLM to compare expected vs actual outcome and determine + * if replanning is needed. + * + * @param body Verification request + * @param internalHeader X-Internal-Request header for authentication + * @returns Verification result + */ + @Post('verify') + @HttpCode(HttpStatus.OK) + async verifyStep( + @Body() body: VerifyStepDto, + @Headers('x-internal-request') internalHeader?: string, + ): Promise { + this.validateInternalRequest(internalHeader); + + this.logger.log({ + message: 'Verifying step execution', + goalRunId: body.goalRunId, + stepNumber: body.step.stepNumber, + success: body.success, + }); + + // If step failed, suggest replan + if (!body.success) { + return { + verified: false, + verificationDetails: `Step failed with error: ${body.error || 'Unknown error'}`, + suggestReplan: true, + replanReason: body.error || 'Step execution failed', + }; + } + + // If no expected outcome, auto-verify based on success + if (!body.expectedOutcome) { + return { + verified: true, + verificationDetails: 'Step completed successfully (no expected outcome to verify)', + suggestReplan: false, + }; + } + + try { + // Use LLM to verify outcome matches expectation + const verificationPrompt = `You are verifying if a step execution achieved its expected outcome. + +Expected Outcome: +${body.expectedOutcome} + +Actual Outcome: +${body.actualOutcome || 'No outcome provided'} + +Step Description: +${body.step.description} + +Respond in JSON format: +{ + "verified": true/false, + "confidence": 0-100, + "explanation": "Brief explanation", + "suggestReplan": true/false, + "replanReason": "If replan needed, explain why" +}`; + + // Phase 14.3: Call LLM directly via LiteLLM proxy + const responseText = await this.callLLM(verificationPrompt, 500, 0.1); + + // Parse JSON response + try { + const parsed = JSON.parse(responseText); + return { + verified: Boolean(parsed.verified), + verificationDetails: parsed.explanation || 'Verification completed', + suggestReplan: Boolean(parsed.suggestReplan), + replanReason: parsed.replanReason, + }; + } catch { + // Fallback if LLM response isn't valid JSON + return { + verified: true, + verificationDetails: 'Verification completed (LLM response parsing fallback)', + suggestReplan: false, + }; + } + } catch (error: any) { + this.logger.warn({ + message: 'LLM verification failed, using fallback', + error: error.message, + }); + + // Fallback: simple verification based on success flag + return { + verified: body.success, + verificationDetails: `Fallback verification: ${body.success ? 'success' : 'failed'}`, + suggestReplan: !body.success, + replanReason: body.error, + }; + } + } + + /** + * Health check for internal services + */ + @Get('health') + @HttpCode(HttpStatus.OK) + async health(): Promise<{ status: string; timestamp: string }> { + return { + status: 'ok', + timestamp: new Date().toISOString(), + }; + } + + /** + * Phase 14.2: Generate a step-by-step plan for a goal + * + * This endpoint is called by Temporal workers to generate plans. + * It uses an industry-standard planning prompt that: + * - Generates task-level steps (not mouse/keyboard-level) + * - Follows Manus AI, ReAct, and Anthropic computer-use best practices + * - Prevents overly granular step generation + * + * Key Design Decisions: + * - Steps are "logical tasks" not "atomic actions" + * - Each step should accomplish a meaningful sub-goal + * - Steps are verifiable through visual observation of outcomes + * + * @param body Plan request with goal description and constraints + * @param internalHeader X-Internal-Request header for authentication + * @returns Generated plan with steps + */ + @Post('plan') + @HttpCode(HttpStatus.OK) + async generatePlan( + @Body() body: PlanRequestDto, + @Headers('x-internal-request') internalHeader?: string, + ): Promise { + this.validateInternalRequest(internalHeader); + + this.logger.log({ + message: 'Generating plan via internal API', + goalRunId: body.goalRunId, + tenantId: body.tenantId, + goalDescription: body.goalDescription.substring(0, 100), + }); + + const startTime = Date.now(); + + try { + // Stark rule: do not plan until GoalSpec is COMPLETE. + const intakeGate = await this.goalIntakeService.ensureGoalSpecReadyForPlanning({ + goalRunId: body.goalRunId, + tenantId: body.tenantId, + }); + + if (!intakeGate.ready) { + this.logger.warn({ + message: 'Goal intake required before planning (GoalSpec gate)', + goalRunId: body.goalRunId, + goalSpecId: intakeGate.goalSpecId, + promptId: intakeGate.promptId, + }); + + return { + kind: 'GOAL_INTAKE_REQUIRED', + goalSpecId: intakeGate.goalSpecId, + promptId: intakeGate.promptId, + reason: 'GOAL_SPEC_INCOMPLETE', + }; + } + + const basePrompt = this.buildPlanningPrompt(body); + const maxContractAttempts = 3; + + let lastContractError: PlannerOutputContractViolationError | null = null; + let parsed: any = null; + let steps: Array<{ + stepNumber: number; + description: string; + expectedOutcome?: string; + isHighRisk: boolean; + dependencies: any[]; + estimatedDurationMs?: number; + }> = []; + + for (let attempt = 1; attempt <= maxContractAttempts; attempt++) { + const planningPrompt = + attempt === 1 + ? basePrompt + : `${basePrompt}\n\nCORRECTION REQUIRED:\n` + + `- Your previous JSON violated the planner-output contract.\n` + + `- suggestedTools MUST be either [] or a subset of the Allowed tools list. Never invent tool tokens.\n` + + `- If a step requires user input, use type=USER_INPUT_REQUIRED and suggestedTools=[\"ASK_USER\"] (never \"CHAT\").\n`; + + // Phase 14.3: Call LLM directly via LiteLLM proxy + // Routes to gpt-oss-120b (120B parameter model) for high-quality planning + const responseText = await this.callLLM(planningPrompt, 4096, 0.3); + + // Parse JSON response + let candidateParsed: any; + try { + // Extract JSON from potential markdown code blocks + const jsonMatch = responseText.match(/```(?:json)?\s*([\s\S]*?)```/); + const jsonStr = jsonMatch ? jsonMatch[1] : responseText; + candidateParsed = JSON.parse(jsonStr.trim()); + } catch { + // Try to find JSON object in response + const objectMatch = responseText.match(/\{[\s\S]*\}/); + if (objectMatch) { + candidateParsed = JSON.parse(objectMatch[0]); + } else { + throw new Error('Failed to parse LLM response as JSON'); + } + } + + const rawSteps = Array.isArray(candidateParsed.steps) ? candidateParsed.steps : []; + + try { + // Build a normalized representation so we can apply contract gates deterministically. + // We do not accept plans that contain interaction-shaped steps; those must be handled via Goal Intake. + type NormalizedPlannerChecklistItem = { + description: string; + type: StepType; + expectedOutcome?: string; + suggestedTools: string[]; + requiresDesktop: boolean; + stepNumber: number; + isHighRisk: boolean; + dependencies: any[]; + estimatedDurationMs?: number; + }; + + const normalizedChecklist: NormalizedPlannerChecklistItem[] = rawSteps.map((rawStep: any, index: number) => ({ + description: typeof rawStep?.description === 'string' ? rawStep.description : '', + type: rawStep?.type === StepType.USER_INPUT_REQUIRED ? StepType.USER_INPUT_REQUIRED : StepType.EXECUTE, + expectedOutcome: typeof rawStep?.expectedOutcome === 'string' ? rawStep.expectedOutcome : undefined, + suggestedTools: normalizeSuggestedToolsOrThrow({ + suggestedTools: Array.isArray(rawStep?.suggestedTools) ? rawStep.suggestedTools : [], + allowedTools: body.constraints?.allowedTools, + }), + requiresDesktop: typeof rawStep?.requiresDesktop === 'boolean' ? rawStep.requiresDesktop : false, + stepNumber: rawStep?.stepNumber ?? index + 1, + isHighRisk: rawStep?.isHighRisk ?? false, + dependencies: rawStep?.dependencies ?? [], + estimatedDurationMs: rawStep?.estimatedDurationMs, + })); + + const firstInteraction = normalizedChecklist.find((item: NormalizedPlannerChecklistItem) => + detectPlannerFirstStepUserInputReason(item), + ); + if (firstInteraction) { + const reason = detectPlannerFirstStepUserInputReason(firstInteraction) ?? 'ASK_USER_TOOL'; + const error = new PlannerFirstStepUserInputError({ + mode: 'initial', + firstStep: firstInteraction, + reason, + }); + + const intake = await this.goalIntakeService.requestGoalIntakeFromPlannerError({ + goalRunId: body.goalRunId, + tenantId: body.tenantId, + error, + }); + + this.logger.warn({ + message: 'Planner produced an interaction-shaped step; issuing Goal Intake prompt instead', + goalRunId: body.goalRunId, + goalSpecId: intake.goalSpecId, + promptId: intake.promptId, + reason: error.reason, + }); + + return { + kind: 'GOAL_INTAKE_REQUIRED', + goalSpecId: intake.goalSpecId, + promptId: intake.promptId, + reason: error.reason, + }; + } + + steps = normalizedChecklist.map((item: NormalizedPlannerChecklistItem) => ({ + stepNumber: item.stepNumber, + description: item.description, + expectedOutcome: item.expectedOutcome, + isHighRisk: item.isHighRisk ?? false, + dependencies: item.dependencies ?? [], + estimatedDurationMs: item.estimatedDurationMs, + })); + + parsed = candidateParsed; + break; + } catch (error: any) { + if (error instanceof PlannerOutputContractViolationError) { + lastContractError = error; + this.logger.warn({ + message: 'Planner output contract violation (retrying)', + goalRunId: body.goalRunId, + attempt, + code: error.code, + }); + continue; + } + throw error; + } + } + + if (!parsed) { + if (lastContractError) throw lastContractError; + throw new Error('Failed to generate plan'); + } + + const durationMs = Date.now() - startTime; + this.logger.log({ + message: 'Plan generated successfully', + goalRunId: body.goalRunId, + stepCount: steps.length, + durationMs, + confidence: parsed.confidence, + }); + + return { + kind: 'PLAN', + steps, + planSummary: parsed.planSummary || parsed.summary || 'Generated plan', + estimatedDurationMs: parsed.estimatedDurationMs, + confidence: parsed.confidence, + }; + } catch (error: any) { + this.logger.error({ + message: 'Failed to generate plan', + goalRunId: body.goalRunId, + error: error.message, + }); + + // Return a sensible fallback plan + return this.generateFallbackPlan(body.goalDescription); + } + } + + /** + * Phase 14.2: Build planning prompt with proper granularity guidance + * + * Key improvements over previous prompts: + * 1. Explicit "task-level" vs "action-level" distinction + * 2. Examples of good vs bad step granularity + * 3. Clear instruction that steps are NOT individual mouse/keyboard actions + * 4. Manus AI-style: steps should be "meaningful sub-goals" + */ + private buildPlanningPrompt(body: PlanRequestDto): string { + const maxSteps = body.constraints?.maxSteps || 10; + const allowedTools = body.constraints?.allowedTools?.join(', ') || 'any desktop tools'; + const workspaceMode = body.constraints?.workspaceMode || 'SHARED'; + + return `You are Butler Vantage, a DESKTOP-BASED autonomous agent that plans and executes tasks by controlling a virtual desktop environment. + +## Your Environment +You operate within a virtual desktop and interact with applications through: +- Mouse control (click, drag, scroll) +- Keyboard input (type, shortcuts) +- Visual observation (screenshots) +- Application interaction (browsers, editors, etc.) + +## CRITICAL: Step Granularity Guidelines + +Steps should be **TASK-LEVEL**, not **ACTION-LEVEL**. + +### BAD Examples (Too Granular - DO NOT DO THIS): +- "Move cursor to address bar" +- "Click on the address bar" +- "Type 'https://www.google.com'" +- "Press Enter key" + +### GOOD Examples (Correct Granularity): +- "Navigate to google.com in the browser" +- "Search for 'cheap flights to Paris' on Google" +- "Fill out the login form with provided credentials" +- "Download the PDF report from the dashboard" + +### The Difference: +- **Task-level step**: Accomplishes a meaningful sub-goal (e.g., "Navigate to website") +- **Action-level step**: A single mouse/keyboard action (e.g., "Click", "Type", "Press Enter") + +Your steps should be task-level. The execution engine handles the individual actions. + +## Goal to Plan +${body.goalDescription} + +${body.context ? `## Additional Context\n${body.context}\n` : ''} +## Constraints +- Maximum steps: ${maxSteps} +- Allowed tools: ${allowedTools} +- Workspace mode: ${workspaceMode} +${body.constraints?.riskPolicy?.requireApproval?.length ? `- Actions requiring approval: ${body.constraints.riskPolicy.requireApproval.join(', ')}` : ''} + +## Output Format (JSON) +{ + "planSummary": "Brief description of the overall approach", + "steps": [ + { + "stepNumber": 1, + "description": "Clear task-level description (NOT individual mouse/keyboard actions)", + "type": "EXECUTE | USER_INPUT_REQUIRED", + "suggestedTools": [], + "requiresDesktop": true, + "expectedOutcome": "What should be observable when this step succeeds", + "isHighRisk": false, + "dependencies": [], + "estimatedDurationMs": 30000 + } + ], + "confidence": 0.85, + "estimatedDurationMs": 120000 +} + +## Planning Rules +1. **Task-Level Steps**: Each step should accomplish a meaningful sub-goal, not a single UI action +2. **2-${maxSteps} Steps**: Break complex goals into 2-${maxSteps} logical steps +3. **Verifiable Outcomes**: Each step should have an observable outcome (not "cursor moved") +4. **Dependencies**: Mark dependencies where step order matters +5. **High-Risk Marking**: Mark steps that submit data, make purchases, or modify external systems as isHighRisk: true +6. **Desktop-Achievable**: Every step must be achievable through desktop interactions (no API calls) +7. **Specific but Not Granular**: Be specific about what to do, but don't specify individual clicks/keystrokes +8. **Tool Vocabulary (Fail-Closed)**: suggestedTools MUST be [] or a subset of the Allowed tools list. Never invent tool tokens. +9. **User Interaction Tooling**: If a step requires user input, set type=USER_INPUT_REQUIRED and suggestedTools=["ASK_USER"] (never "CHAT"). +10. **No Prompt-First Plans**: The FIRST step MUST NOT require user input. If you need clarification to start, assume safe defaults and begin with an EXECUTE step. + +## Example Good Plan +Goal: "Search for flights from NYC to Paris and find the cheapest option" + +Good plan: +1. "Open Google Flights in the browser" (NOT: "Click address bar, type URL, press Enter") +2. "Search for flights from NYC to Paris for next week" (NOT: "Click origin field, type NYC, click destination...") +3. "Sort results by price to find cheapest option" (NOT: "Click sort dropdown, click Price option") +4. "Record the cheapest flight details" (NOT: "Move cursor to price, screenshot") + +Generate the plan:`; + } + + /** + * Generate a fallback plan when LLM fails + */ + private generateFallbackPlan(goal: string): PlanResponse { + return { + kind: 'PLAN', + steps: [ + { + stepNumber: 1, + description: `Analyze the goal and identify the target application or website`, + expectedOutcome: 'Clear understanding of where to perform the task', + isHighRisk: false, + dependencies: [], + estimatedDurationMs: 15000, + }, + { + stepNumber: 2, + description: `Execute the main task: ${goal.substring(0, 100)}`, + expectedOutcome: 'Task objective achieved', + isHighRisk: false, + dependencies: [1], + estimatedDurationMs: 60000, + }, + { + stepNumber: 3, + description: 'Verify the task was completed successfully', + expectedOutcome: 'Visual confirmation of success', + isHighRisk: false, + dependencies: [2], + estimatedDurationMs: 15000, + }, + ], + planSummary: `Execute goal: ${goal.substring(0, 100)}`, + estimatedDurationMs: 90000, + confidence: 0.5, + }; + } + + /** + * Phase 14.3: Call LLM via LiteLLM proxy + * + * Uses the same pattern as planner.service.ts for consistency. + * Routes to gpt-oss-120b (120B parameter model) via LiteLLM proxy. + * + * @param prompt The prompt to send to the LLM + * @param maxTokens Maximum tokens in response (default 2000) + * @param temperature Temperature for response generation (default 0.7) + * @returns The LLM response text + */ + private async callLLM(prompt: string, maxTokens = 2000, temperature = 0.7): Promise { + // If no API key configured, log warning and throw + if (!this.llmApiKey) { + this.logger.error({ + message: 'No LLM API key configured', + llmApiUrl: this.llmApiUrl, + llmModel: this.llmModel, + }); + throw new Error('LLM API key not configured'); + } + + this.logger.debug({ + message: 'Calling LLM via LiteLLM proxy', + llmApiUrl: this.llmApiUrl, + llmModel: this.llmModel, + maxTokens, + temperature, + promptLength: prompt.length, + }); + + const startTime = Date.now(); + + try { + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: this.llmModel, + max_tokens: maxTokens, + temperature, + messages: [ + { + role: 'user', + content: prompt, + }, + ], + }), + }); + + if (!response.ok) { + const errorText = await response.text(); + this.logger.error({ + message: 'LLM API error', + status: response.status, + statusText: response.statusText, + error: errorText, + llmApiUrl: this.llmApiUrl, + llmModel: this.llmModel, + }); + throw new Error(`LLM API error: ${response.status} ${response.statusText} - ${errorText}`); + } + + const data = await response.json(); + const durationMs = Date.now() - startTime; + + this.logger.log({ + message: 'LLM call successful', + llmModel: this.llmModel, + durationMs, + responseLength: data.content?.[0]?.text?.length || 0, + }); + + return data.content[0].text; + } catch (error: any) { + const durationMs = Date.now() - startTime; + this.logger.error({ + message: 'LLM call failed', + error: error.message, + llmApiUrl: this.llmApiUrl, + llmModel: this.llmModel, + durationMs, + }); + throw error; + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/notification.controller.spec.ts b/packages/bytebot-workflow-orchestrator/src/controllers/notification.controller.spec.ts new file mode 100644 index 000000000..ae01629bb --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/notification.controller.spec.ts @@ -0,0 +1,18 @@ +import { NotificationController } from './notification.controller'; +import { SlackEventType } from '../services/slack-notification.service'; + +describe('NotificationController', () => { + it('returns event types that match server-side validation (SlackEventType values)', () => { + const controller = new NotificationController({} as any, {} as any, {} as any); + const result = controller.getEventTypes(); + + expect(result.success).toBe(true); + expect(Array.isArray(result.events)).toBe(true); + + const types = result.events.map((e: any) => e.type); + expect(types).toContain(SlackEventType.GOAL_STARTED); + expect(types).toContain(SlackEventType.USER_PROMPT_CREATED); + expect(types).not.toContain('GOAL_STARTED'); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/notification.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/notification.controller.ts new file mode 100644 index 000000000..bf76f878e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/notification.controller.ts @@ -0,0 +1,724 @@ +/** + * Notification Channel Controller + * Phase 8 (v5.3.0): External Integrations - Unified notification management + * + * Endpoints: + * - GET /api/v1/notifications/channels List channels for tenant + * - POST /api/v1/notifications/channels Create notification channel + * - GET /api/v1/notifications/channels/:id Get channel details + * - PUT /api/v1/notifications/channels/:id Update channel + * - DELETE /api/v1/notifications/channels/:id Delete channel + * - POST /api/v1/notifications/channels/:id/test Test channel delivery + * - GET /api/v1/notifications/channels/:id/deliveries Get delivery history + * - GET /api/v1/notifications/types Get available channel types + * - GET /api/v1/notifications/events Get available event types + */ + +import { + Controller, + Get, + Post, + Put, + Delete, + Param, + Body, + Query, + HttpException, + HttpStatus, + Logger, + Headers, +} from '@nestjs/common'; +import { + ApiTags, + ApiOperation, + ApiResponse, + ApiHeader, + ApiQuery, +} from '@nestjs/swagger'; +import { SlackNotificationService, SlackEventType } from '../services/slack-notification.service'; +import { TeamsNotificationService, TeamsEventType } from '../services/teams-notification.service'; +import { PrismaService } from '../services/prisma.service'; + +/** + * Notification channel types + */ +export enum NotificationChannelType { + SLACK = 'SLACK', + TEAMS = 'TEAMS', + EMAIL = 'EMAIL', + CUSTOM_WEBHOOK = 'CUSTOM_WEBHOOK', +} + +/** + * DTOs for notification endpoints + */ +interface CreateChannelDto { + type: NotificationChannelType; + name: string; + description?: string; + config: { + webhookUrl?: string; + email?: string; + // Slack-specific + channel?: string; + channelId?: string; + username?: string; + iconEmoji?: string; + // Teams-specific + mentionUsers?: string[]; + // Custom webhook + headers?: Record; + method?: 'POST' | 'PUT'; + }; + events: string[]; + filters?: { + goalPatterns?: string[]; + statuses?: string[]; + priorities?: string[]; + }; +} + +interface UpdateChannelDto { + name?: string; + description?: string; + config?: Record; + events?: string[]; + filters?: Record; + enabled?: boolean; +} + +interface ListChannelsQuery { + type?: NotificationChannelType; + limit?: string; + offset?: string; +} + +@ApiTags('notifications') +@Controller('notifications') +export class NotificationController { + private readonly logger = new Logger(NotificationController.name); + + constructor( + private readonly prisma: PrismaService, + private readonly slackService: SlackNotificationService, + private readonly teamsService: TeamsNotificationService, + ) {} + + /** + * GET /api/v1/notifications/channels + * List notification channels for a tenant + */ + @Get('channels') + @ApiOperation({ summary: 'List notification channels' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiQuery({ name: 'type', required: false, enum: NotificationChannelType }) + @ApiQuery({ name: 'limit', required: false }) + @ApiQuery({ name: 'offset', required: false }) + @ApiResponse({ status: 200, description: 'Channels retrieved successfully' }) + async listChannels( + @Query() query: ListChannelsQuery, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const limit = parseInt(query.limit || '20', 10); + const offset = parseInt(query.offset || '0', 10); + + const where = { + tenantId, + ...(query.type && { type: query.type }), + }; + + const [channels, total] = await Promise.all([ + this.prisma.notificationChannel.findMany({ + where, + orderBy: { createdAt: 'desc' }, + skip: offset, + take: limit, + }), + this.prisma.notificationChannel.count({ where }), + ]); + + return { + success: true, + channels: channels.map((c) => this.formatChannel(c)), + pagination: { + total, + limit, + offset, + hasMore: offset + channels.length < total, + }, + }; + } + + /** + * POST /api/v1/notifications/channels + * Create a new notification channel + */ + @Post('channels') + @ApiOperation({ summary: 'Create notification channel' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 201, description: 'Channel created successfully' }) + async createChannel( + @Body() body: CreateChannelDto, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Validate required fields + if (!body.type) { + throw new HttpException('type is required', HttpStatus.BAD_REQUEST); + } + + if (!body.name) { + throw new HttpException('name is required', HttpStatus.BAD_REQUEST); + } + + if (!body.events || body.events.length === 0) { + throw new HttpException('events array is required', HttpStatus.BAD_REQUEST); + } + + // Validate channel type + if (!Object.values(NotificationChannelType).includes(body.type)) { + throw new HttpException( + `Invalid channel type. Valid types: ${Object.values(NotificationChannelType).join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + + // Validate config based on type + this.validateChannelConfig(body.type, body.config); + + // Validate event types + const validEvents = this.getValidEventTypes(); + const invalidEvents = body.events.filter((e) => !validEvents.includes(e)); + if (invalidEvents.length > 0) { + throw new HttpException( + `Invalid event types: ${invalidEvents.join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + + try { + const channel = await this.prisma.notificationChannel.create({ + data: { + tenantId, + type: body.type, + name: body.name, + description: body.description, + config: body.config as any, + events: body.events, + filters: (body.filters || {}) as any, + enabled: true, + verified: false, + }, + }); + + this.logger.log( + `Created ${body.type} notification channel ${channel.id} for tenant ${tenantId}`, + ); + + return { + success: true, + channel: this.formatChannel(channel), + message: 'Channel created successfully. Test the channel to verify configuration.', + }; + } catch (error: any) { + this.logger.error(`Failed to create notification channel: ${error.message}`); + throw new HttpException( + `Failed to create channel: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/notifications/channels/:id + * Get channel details + */ + @Get('channels/:id') + @ApiOperation({ summary: 'Get notification channel details' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Channel retrieved successfully' }) + async getChannel( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const channel = await this.prisma.notificationChannel.findFirst({ + where: { id, tenantId }, + }); + + if (!channel) { + throw new HttpException('Channel not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + channel: this.formatChannel(channel), + }; + } + + /** + * PUT /api/v1/notifications/channels/:id + * Update notification channel + */ + @Put('channels/:id') + @ApiOperation({ summary: 'Update notification channel' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Channel updated successfully' }) + async updateChannel( + @Param('id') id: string, + @Body() body: UpdateChannelDto, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Check channel exists and belongs to tenant + const existing = await this.prisma.notificationChannel.findFirst({ + where: { id, tenantId }, + }); + + if (!existing) { + throw new HttpException('Channel not found', HttpStatus.NOT_FOUND); + } + + // Validate config if provided + if (body.config) { + this.validateChannelConfig( + existing.type as NotificationChannelType, + { ...existing.config as any, ...body.config }, + ); + } + + // Validate events if provided + if (body.events) { + const validEvents = this.getValidEventTypes(); + const invalidEvents = body.events.filter((e) => !validEvents.includes(e)); + if (invalidEvents.length > 0) { + throw new HttpException( + `Invalid event types: ${invalidEvents.join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + } + + try { + const channel = await this.prisma.notificationChannel.update({ + where: { id }, + data: { + ...(body.name && { name: body.name }), + ...(body.description !== undefined && { description: body.description }), + ...(body.config && { config: body.config as any }), + ...(body.events && { events: body.events }), + ...(body.filters && { filters: body.filters as any }), + ...(body.enabled !== undefined && { enabled: body.enabled }), + // Reset verified status if config changed + ...(body.config && { verified: false }), + }, + }); + + this.logger.log(`Updated notification channel ${id}`); + + return { + success: true, + channel: this.formatChannel(channel), + message: 'Channel updated successfully', + }; + } catch (error: any) { + this.logger.error(`Failed to update channel ${id}: ${error.message}`); + throw new HttpException( + `Failed to update channel: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * DELETE /api/v1/notifications/channels/:id + * Delete notification channel + */ + @Delete('channels/:id') + @ApiOperation({ summary: 'Delete notification channel' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Channel deleted successfully' }) + async deleteChannel( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Check channel exists and belongs to tenant + const existing = await this.prisma.notificationChannel.findFirst({ + where: { id, tenantId }, + }); + + if (!existing) { + throw new HttpException('Channel not found', HttpStatus.NOT_FOUND); + } + + try { + await this.prisma.notificationChannel.delete({ where: { id } }); + + this.logger.log(`Deleted notification channel ${id}`); + + return { + success: true, + message: 'Channel deleted successfully', + }; + } catch (error: any) { + this.logger.error(`Failed to delete channel ${id}: ${error.message}`); + throw new HttpException( + `Failed to delete channel: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/notifications/channels/:id/test + * Test notification channel + */ + @Post('channels/:id/test') + @ApiOperation({ summary: 'Test notification channel delivery' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiResponse({ status: 200, description: 'Test notification sent' }) + async testChannel( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const channel = await this.prisma.notificationChannel.findFirst({ + where: { id, tenantId }, + }); + + if (!channel) { + throw new HttpException('Channel not found', HttpStatus.NOT_FOUND); + } + + try { + let result: { success: boolean; error?: string }; + + switch (channel.type) { + case NotificationChannelType.SLACK: + result = await this.slackService.testChannel(id); + break; + case NotificationChannelType.TEAMS: + result = await this.teamsService.testChannel(id); + break; + default: + result = { success: false, error: 'Test not implemented for this channel type' }; + } + + if (result.success) { + // Mark channel as verified + await this.prisma.notificationChannel.update({ + where: { id }, + data: { verified: true }, + }); + + return { + success: true, + message: 'Test notification sent successfully', + }; + } else { + return { + success: false, + message: 'Test notification failed', + error: result.error, + }; + } + } catch (error: any) { + this.logger.error(`Failed to test channel ${id}: ${error.message}`); + throw new HttpException( + `Failed to test channel: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/notifications/channels/:id/deliveries + * Get channel delivery history + */ + @Get('channels/:id/deliveries') + @ApiOperation({ summary: 'Get notification delivery history' }) + @ApiHeader({ name: 'X-Tenant-Id', required: true }) + @ApiQuery({ name: 'limit', required: false }) + @ApiResponse({ status: 200, description: 'Deliveries retrieved successfully' }) + async getDeliveries( + @Param('id') id: string, + @Query('limit') limit?: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Verify channel belongs to tenant + const channel = await this.prisma.notificationChannel.findFirst({ + where: { id, tenantId }, + }); + + if (!channel) { + throw new HttpException('Channel not found', HttpStatus.NOT_FOUND); + } + + const deliveryLimit = parseInt(limit || '50', 10); + + const deliveries = await this.prisma.notificationDelivery.findMany({ + where: { channelId: id }, + orderBy: { createdAt: 'desc' }, + take: deliveryLimit, + }); + + return { + success: true, + deliveries: deliveries.map((d) => ({ + id: d.id, + eventId: d.eventId, + eventType: d.eventType, + success: d.success, + statusCode: d.statusCode, + error: d.error, + attempts: d.attempts, + deliveredAt: d.deliveredAt, + createdAt: d.createdAt, + })), + }; + } + + /** + * GET /api/v1/notifications/types + * Get available channel types + */ + @Get('types') + @ApiOperation({ summary: 'Get available notification channel types' }) + @ApiResponse({ status: 200, description: 'Channel types retrieved' }) + getChannelTypes() { + return { + success: true, + types: [ + { + type: NotificationChannelType.SLACK, + name: 'Slack', + description: 'Send notifications to a Slack channel (webhook or bot token)', + configFields: [ + { + name: 'webhookUrl', + type: 'string', + required: false, + description: + 'Slack Incoming Webhook URL (webhook mode). If omitted, ByteBot can post via bot token using channelId/channel.', + }, + { name: 'channelId', type: 'string', required: false, description: 'Slack conversation ID (bot mode)' }, + { name: 'channel', type: 'string', required: false, description: 'Channel name like #butler-vantage (bot mode)' }, + { name: 'username', type: 'string', required: false, description: 'Bot username (optional)' }, + { name: 'iconEmoji', type: 'string', required: false, description: 'Bot icon emoji (optional)' }, + ], + }, + { + type: NotificationChannelType.TEAMS, + name: 'Microsoft Teams', + description: 'Send notifications to a Teams channel via Incoming Webhook', + configFields: [ + { name: 'webhookUrl', type: 'string', required: true, description: 'Teams Incoming Webhook URL' }, + { name: 'mentionUsers', type: 'array', required: false, description: 'User emails to @mention' }, + ], + }, + { + type: NotificationChannelType.EMAIL, + name: 'Email', + description: 'Send notifications via email (coming soon)', + configFields: [ + { name: 'email', type: 'string', required: true, description: 'Recipient email address' }, + ], + }, + { + type: NotificationChannelType.CUSTOM_WEBHOOK, + name: 'Custom Webhook', + description: 'Send notifications to a custom HTTP endpoint', + configFields: [ + { name: 'webhookUrl', type: 'string', required: true, description: 'Webhook URL' }, + { name: 'method', type: 'string', required: false, description: 'HTTP method (POST or PUT)' }, + { name: 'headers', type: 'object', required: false, description: 'Custom HTTP headers' }, + ], + }, + ], + }; + } + + /** + * GET /api/v1/notifications/events + * Get available event types + */ + @Get('events') + @ApiOperation({ summary: 'Get available notification event types' }) + @ApiResponse({ status: 200, description: 'Event types retrieved' }) + getEventTypes() { + return { + success: true, + events: [ + // Goal events + { type: SlackEventType.GOAL_STARTED, category: 'goals', description: 'Goal run has started' }, + { type: SlackEventType.GOAL_COMPLETED, category: 'goals', description: 'Goal run completed successfully' }, + { type: SlackEventType.GOAL_FAILED, category: 'goals', description: 'Goal run failed' }, + { type: SlackEventType.GOAL_CANCELLED, category: 'goals', description: 'Goal run was cancelled' }, + // Batch events + { type: SlackEventType.BATCH_STARTED, category: 'batches', description: 'Batch execution started' }, + { type: SlackEventType.BATCH_COMPLETED, category: 'batches', description: 'Batch execution completed' }, + { type: SlackEventType.BATCH_FAILED, category: 'batches', description: 'Batch execution failed' }, + { type: SlackEventType.BATCH_PROGRESS, category: 'batches', description: 'Batch progress update' }, + // Approval events + { type: SlackEventType.APPROVAL_REQUESTED, category: 'approvals', description: 'Approval is requested' }, + { type: SlackEventType.APPROVAL_APPROVED, category: 'approvals', description: 'Action was approved' }, + { type: SlackEventType.APPROVAL_REJECTED, category: 'approvals', description: 'Action was rejected' }, + { type: SlackEventType.APPROVAL_EXPIRED, category: 'approvals', description: 'Approval request expired' }, + // External input (UserPrompts) + { type: SlackEventType.USER_PROMPT_CREATED, category: 'prompts', description: 'External input requested' }, + { type: SlackEventType.USER_PROMPT_RESOLVED, category: 'prompts', description: 'External input resolved' }, + { type: SlackEventType.USER_PROMPT_CANCELLED, category: 'prompts', description: 'External input cancelled' }, + ], + }; + } + + /** + * Validate channel config based on type + */ + private validateChannelConfig(type: NotificationChannelType, config: any) { + switch (type) { + case NotificationChannelType.SLACK: + // Slack supports either: + // - Incoming webhook delivery (webhookUrl), OR + // - Bot token delivery (channelId/channel), where the bot token is stored server-side. + if (config.webhookUrl) { + try { + new URL(config.webhookUrl); + } catch { + throw new HttpException('Invalid webhookUrl format', HttpStatus.BAD_REQUEST); + } + break; + } + + if (config.channelId) { + if (typeof config.channelId !== 'string' || config.channelId.trim().length === 0) { + throw new HttpException('Invalid channelId', HttpStatus.BAD_REQUEST); + } + break; + } + + if (config.channel) { + if (typeof config.channel !== 'string' || config.channel.trim().length === 0) { + throw new HttpException('Invalid channel', HttpStatus.BAD_REQUEST); + } + break; + } + + throw new HttpException( + 'Slack config requires webhookUrl (incoming webhook) or channelId/channel (bot-based delivery)', + HttpStatus.BAD_REQUEST, + ); + + case NotificationChannelType.TEAMS: + if (!config.webhookUrl) { + throw new HttpException('webhookUrl is required', HttpStatus.BAD_REQUEST); + } + try { + new URL(config.webhookUrl); + } catch { + throw new HttpException('Invalid webhookUrl format', HttpStatus.BAD_REQUEST); + } + break; + + case NotificationChannelType.EMAIL: + if (!config.email) { + throw new HttpException('email is required', HttpStatus.BAD_REQUEST); + } + // Basic email validation + if (!/^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(config.email)) { + throw new HttpException('Invalid email format', HttpStatus.BAD_REQUEST); + } + break; + + case NotificationChannelType.CUSTOM_WEBHOOK: + if (!config.webhookUrl) { + throw new HttpException('webhookUrl is required', HttpStatus.BAD_REQUEST); + } + try { + new URL(config.webhookUrl); + } catch { + throw new HttpException('Invalid webhookUrl format', HttpStatus.BAD_REQUEST); + } + break; + } + } + + /** + * Get valid event types + */ + private getValidEventTypes(): string[] { + return [ + // Goal events + SlackEventType.GOAL_STARTED, + SlackEventType.GOAL_COMPLETED, + SlackEventType.GOAL_FAILED, + SlackEventType.GOAL_CANCELLED, + // Batch events + SlackEventType.BATCH_STARTED, + SlackEventType.BATCH_COMPLETED, + SlackEventType.BATCH_FAILED, + SlackEventType.BATCH_PROGRESS, + // Approval events + SlackEventType.APPROVAL_REQUESTED, + SlackEventType.APPROVAL_APPROVED, + SlackEventType.APPROVAL_REJECTED, + SlackEventType.APPROVAL_EXPIRED, + // User prompt events (durable WAIT surface) + SlackEventType.USER_PROMPT_CREATED, + SlackEventType.USER_PROMPT_RESOLVED, + SlackEventType.USER_PROMPT_CANCELLED, + ]; + } + + /** + * Format channel for response (hide sensitive config) + */ + private formatChannel(channel: any) { + const config = { ...(channel.config as any) }; + + // Mask webhook URL + if (config.webhookUrl) { + const url = new URL(config.webhookUrl); + config.webhookUrl = `${url.protocol}//${url.host}/...${url.pathname.slice(-8)}`; + } + + return { + id: channel.id, + tenantId: channel.tenantId, + type: channel.type, + name: channel.name, + description: channel.description, + config, + events: channel.events, + filters: channel.filters, + enabled: channel.enabled, + verified: channel.verified, + createdAt: channel.createdAt, + updatedAt: channel.updatedAt, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/self-healing.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/self-healing.controller.ts new file mode 100644 index 000000000..6520a8435 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/self-healing.controller.ts @@ -0,0 +1,644 @@ +/** + * Self-Healing Controller + * v1.1.0: Added workspace health/reconciler status endpoints + * v1.0.0: Phase 9 Self-Healing & Auto-Recovery + * + * REST API endpoints for self-healing features: + * - Circuit breaker status and management + * - Dead letter queue operations + * - Workflow checkpoint and recovery + * - Task recovery management + * - Recovery logs and statistics + * - Workspace health and reconciliation (v1.1.0) + */ + +import { + Controller, + Get, + Post, + Body, + Param, + Query, + HttpCode, + HttpStatus, + Logger, +} from '@nestjs/common'; +import { CircuitBreakerService, CircuitBreakerStateEnum } from '../services/circuit-breaker.service'; +import { DeadLetterQueueService, DLQStatus, DLQSeverity, FailureCategory } from '../services/dead-letter-queue.service'; +import { WorkflowCheckpointService } from '../services/workflow-checkpoint.service'; +import { TaskRecoveryService } from '../services/task-recovery.service'; +import { PrismaService } from '../services/prisma.service'; +import { WorkspaceDbReconcilerService } from '../services/workspace-db-reconciler.service'; +import { OrphanPodGCService } from '../services/orphan-pod-gc.service'; + +// DTOs +class RetryDLQEntryDto { + ids?: string[]; + id?: string; +} + +class SkipDLQEntryDto { + ids?: string[]; + id?: string; + reason: string; +} + +class DiscardDLQEntryDto { + id: string; + reason: string; +} + +class RecoverWorkflowDto { + workflowRunId: string; + skipCompletedNodes?: boolean; + resetFailedNodes?: boolean; + fromCheckpoint?: string; +} + +class ManualRecoverTaskDto { + nodeRunId: string; +} + +@Controller('recovery') +export class SelfHealingController { + private readonly logger = new Logger(SelfHealingController.name); + + constructor( + private readonly circuitBreaker: CircuitBreakerService, + private readonly dlq: DeadLetterQueueService, + private readonly checkpoint: WorkflowCheckpointService, + private readonly taskRecovery: TaskRecoveryService, + private readonly prisma: PrismaService, + private readonly workspaceReconciler: WorkspaceDbReconcilerService, + private readonly orphanPodGC: OrphanPodGCService, + ) {} + + // ========================================================================= + // Circuit Breaker Endpoints + // ========================================================================= + + /** + * Get all circuit breaker statuses + */ + @Get('circuits') + async getCircuitBreakers() { + const circuits = this.circuitBreaker.getAllStats(); + return { + success: true, + circuits, + count: circuits.length, + }; + } + + /** + * Get circuit breaker status for a specific service + */ + @Get('circuits/:serviceName') + async getCircuitBreaker(@Param('serviceName') serviceName: string) { + const stats = this.circuitBreaker.getStats(serviceName); + if (!stats) { + return { + success: false, + error: 'Circuit breaker not found', + }; + } + return { + success: true, + serviceName, + ...stats, + }; + } + + /** + * Reset a circuit breaker + */ + @Post('circuits/:serviceName/reset') + @HttpCode(HttpStatus.OK) + async resetCircuitBreaker(@Param('serviceName') serviceName: string) { + await this.circuitBreaker.resetCircuit(serviceName); + return { + success: true, + message: `Circuit breaker for ${serviceName} reset to CLOSED`, + }; + } + + /** + * Get circuit breaker history from database + */ + @Get('circuits/:serviceName/history') + async getCircuitBreakerHistory( + @Param('serviceName') serviceName: string, + @Query('limit') limit?: string, + ) { + const logs = await this.prisma.recoveryLog.findMany({ + where: { + targetType: 'CIRCUIT_BREAKER', + targetId: serviceName, + }, + orderBy: { createdAt: 'desc' }, + take: parseInt(limit ?? '20', 10), + }); + + return { + success: true, + serviceName, + history: logs, + }; + } + + // ========================================================================= + // Dead Letter Queue Endpoints + // ========================================================================= + + /** + * Get DLQ statistics + */ + @Get('dlq/stats') + async getDLQStats(@Query('tenantId') tenantId?: string) { + const stats = await this.dlq.getStats(tenantId); + return { + success: true, + stats, + }; + } + + /** + * Get DLQ entries + */ + @Get('dlq/entries') + async getDLQEntries( + @Query('tenantId') tenantId?: string, + @Query('status') status?: DLQStatus, + @Query('severity') severity?: DLQSeverity, + @Query('category') category?: FailureCategory, + @Query('limit') limit?: string, + @Query('offset') offset?: string, + ) { + const result = await this.dlq.getEntries({ + tenantId, + status, + severity, + category, + limit: limit ? parseInt(limit, 10) : undefined, + offset: offset ? parseInt(offset, 10) : undefined, + }); + + return { + success: true, + entries: result.entries, + total: result.total, + }; + } + + /** + * Get a single DLQ entry + */ + @Get('dlq/entries/:id') + async getDLQEntry(@Param('id') id: string) { + const entry = await this.dlq.getEntry(id); + if (!entry) { + return { + success: false, + error: 'Entry not found', + }; + } + return { + success: true, + entry, + }; + } + + /** + * Retry a DLQ entry (or bulk retry) + */ + @Post('dlq/retry') + @HttpCode(HttpStatus.OK) + async retryDLQEntry(@Body() dto: RetryDLQEntryDto) { + if (dto.ids && dto.ids.length > 0) { + const results = await this.dlq.bulkRetry(dto.ids); + const successCount = results.filter((r) => r.success).length; + return { + success: true, + message: `${successCount}/${dto.ids.length} entries queued for retry`, + results, + }; + } + + if (dto.id) { + const result = await this.dlq.retryEntry(dto.id); + return { + success: result.success, + result, + }; + } + + return { + success: false, + error: 'Must provide id or ids', + }; + } + + /** + * Skip a DLQ entry (or bulk skip) + */ + @Post('dlq/skip') + @HttpCode(HttpStatus.OK) + async skipDLQEntry(@Body() dto: SkipDLQEntryDto) { + if (!dto.reason) { + return { + success: false, + error: 'Reason is required', + }; + } + + if (dto.ids && dto.ids.length > 0) { + const results = await this.dlq.bulkSkip(dto.ids, dto.reason); + const successCount = results.filter((r) => r.success).length; + return { + success: true, + message: `${successCount}/${dto.ids.length} entries skipped`, + results, + }; + } + + if (dto.id) { + const result = await this.dlq.skipEntry(dto.id, dto.reason); + return { + success: result.success, + result, + }; + } + + return { + success: false, + error: 'Must provide id or ids', + }; + } + + /** + * Discard a DLQ entry + */ + @Post('dlq/discard') + @HttpCode(HttpStatus.OK) + async discardDLQEntry(@Body() dto: DiscardDLQEntryDto) { + if (!dto.reason) { + return { + success: false, + error: 'Reason is required', + }; + } + + const result = await this.dlq.discardEntry(dto.id, dto.reason); + return { + success: result.success, + result, + }; + } + + // ========================================================================= + // Checkpoint Endpoints + // ========================================================================= + + /** + * Get checkpoint statistics + */ + @Get('checkpoints/stats') + async getCheckpointStats() { + const stats = await this.checkpoint.getCheckpointStats(); + return { + success: true, + stats, + }; + } + + /** + * Get checkpoints for a workflow + */ + @Get('checkpoints/:workflowRunId') + async getWorkflowCheckpoints(@Param('workflowRunId') workflowRunId: string) { + const checkpoints = await this.checkpoint.getWorkflowCheckpoints(workflowRunId); + return { + success: true, + workflowRunId, + checkpoints, + count: checkpoints.length, + }; + } + + /** + * Check if a workflow can be recovered + */ + @Get('checkpoints/:workflowRunId/can-recover') + async canRecoverWorkflow(@Param('workflowRunId') workflowRunId: string) { + const result = await this.checkpoint.canRecover(workflowRunId); + return { + success: true, + workflowRunId, + ...result, + }; + } + + /** + * Recover a workflow from checkpoint + */ + @Post('checkpoints/recover') + @HttpCode(HttpStatus.OK) + async recoverWorkflow(@Body() dto: RecoverWorkflowDto) { + const result = await this.checkpoint.recoverWorkflow(dto.workflowRunId, { + skipCompletedNodes: dto.skipCompletedNodes, + resetFailedNodes: dto.resetFailedNodes, + fromCheckpoint: dto.fromCheckpoint, + }); + + return { + success: result.success, + result, + }; + } + + // ========================================================================= + // Task Recovery Endpoints + // ========================================================================= + + /** + * Get task recovery statistics + */ + @Get('tasks/stats') + async getTaskRecoveryStats() { + const stats = await this.taskRecovery.getRecoveryStats(); + return { + success: true, + stats, + }; + } + + /** + * Get stale tasks + */ + @Get('tasks/stale') + async getStaleTasks( + @Query('status') status?: string, + @Query('limit') limit?: string, + ) { + const where: any = {}; + if (status) { + where.status = status; + } + + const tasks = await this.prisma.staleTask.findMany({ + where, + orderBy: { detectedAt: 'desc' }, + take: parseInt(limit ?? '50', 10), + }); + + return { + success: true, + tasks, + count: tasks.length, + }; + } + + /** + * Manually trigger recovery for a task + */ + @Post('tasks/recover') + @HttpCode(HttpStatus.OK) + async manualRecoverTask(@Body() dto: ManualRecoverTaskDto) { + const result = await this.taskRecovery.manualRecover(dto.nodeRunId); + return { + success: result.success, + result, + }; + } + + /** + * Trigger a full recovery check + */ + @Post('tasks/check') + @HttpCode(HttpStatus.OK) + async triggerRecoveryCheck() { + const summary = await this.taskRecovery.runRecoveryCheck(); + return { + success: true, + summary, + }; + } + + // ========================================================================= + // Recovery Logs + // ========================================================================= + + /** + * Get recovery logs + */ + @Get('logs') + async getRecoveryLogs( + @Query('tenantId') tenantId?: string, + @Query('actionType') actionType?: string, + @Query('targetType') targetType?: string, + @Query('limit') limit?: string, + @Query('offset') offset?: string, + ) { + const where: any = {}; + if (tenantId) where.tenantId = tenantId; + if (actionType) where.actionType = actionType; + if (targetType) where.targetType = targetType; + + const [logs, total] = await Promise.all([ + this.prisma.recoveryLog.findMany({ + where, + orderBy: { createdAt: 'desc' }, + take: parseInt(limit ?? '50', 10), + skip: parseInt(offset ?? '0', 10), + }), + this.prisma.recoveryLog.count({ where }), + ]); + + return { + success: true, + logs, + total, + }; + } + + /** + * Get recovery log summary + */ + @Get('logs/summary') + async getRecoveryLogSummary(@Query('tenantId') tenantId?: string) { + const where: any = tenantId ? { tenantId } : {}; + const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000); + + const [totalLogs, successfulLogs, recentLogs, byActionType] = await Promise.all([ + this.prisma.recoveryLog.count({ where }), + this.prisma.recoveryLog.count({ where: { ...where, success: true } }), + this.prisma.recoveryLog.count({ + where: { ...where, createdAt: { gte: oneDayAgo } }, + }), + this.prisma.recoveryLog.groupBy({ + by: ['actionType'], + where, + _count: true, + }), + ]); + + return { + success: true, + summary: { + totalLogs, + successfulLogs, + failedLogs: totalLogs - successfulLogs, + successRate: totalLogs > 0 ? (successfulLogs / totalLogs) * 100 : 0, + recentLogs24h: recentLogs, + byActionType: byActionType.map((b) => ({ + actionType: b.actionType, + count: b._count, + })), + }, + }; + } + + // ========================================================================= + // Health Summary + // ========================================================================= + + /** + * Get overall self-healing health summary + */ + @Get('health') + async getSelfHealingHealth() { + const [ + circuitStats, + dlqStats, + checkpointStats, + taskRecoveryStats, + ] = await Promise.all([ + Promise.resolve(this.circuitBreaker.getAllStats()), + this.dlq.getStats(), + this.checkpoint.getCheckpointStats(), + this.taskRecovery.getRecoveryStats(), + ]); + + // Determine overall health + const openCircuits = circuitStats.filter( + (c) => c.state === CircuitBreakerStateEnum.OPEN, + ).length; + + const healthStatus = + openCircuits > 0 || dlqStats.pending > 10 || taskRecoveryStats.pendingStaleTasks > 5 + ? 'DEGRADED' + : 'HEALTHY'; + + return { + success: true, + status: healthStatus, + summary: { + circuits: { + total: circuitStats.length, + open: openCircuits, + halfOpen: circuitStats.filter( + (c) => c.state === CircuitBreakerStateEnum.HALF_OPEN, + ).length, + closed: circuitStats.filter( + (c) => c.state === CircuitBreakerStateEnum.CLOSED, + ).length, + }, + dlq: { + pending: dlqStats.pending, + retrying: dlqStats.retrying, + criticalCount: dlqStats.bySeverity.critical, + }, + checkpoints: { + total: checkpointStats.totalCheckpoints, + recoverable: checkpointStats.recoverableCheckpoints, + expiringWithin24h: checkpointStats.expiringWithin24h, + }, + taskRecovery: { + pendingStaleTasks: taskRecoveryStats.pendingStaleTasks, + recoveringTasks: taskRecoveryStats.recoveringTasks, + recoveredLast24h: taskRecoveryStats.recoveredLast24h, + }, + }, + }; + } + + // ========================================================================= + // Workspace Health & Reconciliation Endpoints (v1.1.0) + // ========================================================================= + + /** + * Get workspace health overview + * Shows K8s vs DB workspace state comparison and drift detection + */ + @Get('workspaces/health') + async getWorkspaceHealth() { + const health = await this.workspaceReconciler.getWorkspaceHealth(); + return { + success: true, + ...health, + summary: { + k8sActiveCount: health.k8sWorkspaces.filter( + (w) => w.phase === 'Running' || w.phase === 'Pending' + ).length, + dbActiveCount: health.dbActiveWorkspaces.length, + driftDetectedCount: health.driftDetectedWorkspaces.length, + hasDrift: health.driftDetectedWorkspaces.length > 0, + capacityStatus: `${health.capacityUsed}/${health.maxCapacity} used`, + }, + }; + } + + /** + * Get workspace DB reconciler status + */ + @Get('workspaces/reconciler') + async getReconcilerStatus() { + const status = await this.workspaceReconciler.getStatus(); + return { + success: true, + ...status, + }; + } + + /** + * Get orphan pod GC status + */ + @Get('workspaces/gc') + async getOrphanPodGCStatus() { + const status = await this.orphanPodGC.getStatus(); + return { + success: true, + ...status, + }; + } + + /** + * Trigger manual workspace DB reconciliation + */ + @Post('workspaces/reconcile') + @HttpCode(HttpStatus.OK) + async triggerReconcile() { + this.logger.log('Manual workspace DB reconciliation triggered'); + const result = await this.workspaceReconciler.runReconcile(); + return { + success: true, + message: 'Reconciliation completed', + result, + }; + } + + /** + * Trigger manual orphan pod GC + */ + @Post('workspaces/gc') + @HttpCode(HttpStatus.OK) + async triggerOrphanGC() { + this.logger.log('Manual orphan pod GC triggered'); + const result = await this.orphanPodGC.runGC(); + return { + success: true, + message: 'Orphan GC completed', + result, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/slack-bridge.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/slack-bridge.controller.ts new file mode 100644 index 000000000..19cb8f254 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/slack-bridge.controller.ts @@ -0,0 +1,63 @@ +import { + BadRequestException, + Body, + Controller, + Headers, + HttpCode, + HttpStatus, + Post, + Req, + UnauthorizedException, +} from '@nestjs/common'; +import type { Request } from 'express'; +import { SlackBridgeService } from '../services/slack-bridge.service'; + +@Controller() +export class SlackBridgeController { + constructor(private readonly slackBridgeService: SlackBridgeService) {} + + /** + * POST /api/v1/slack/interactivity + * Slack interactive components endpoint (buttons/modals). + * + * Notes: + * - Requires Slack request signature verification. + * - Does not expose any secrets; uses SLACK_SIGNING_SECRET + SLACK_BOT_TOKEN env vars. + */ + @Post('slack/interactivity') + @HttpCode(HttpStatus.OK) + async interactivity( + @Body() body: any, + @Req() req: Request & { rawBody?: Buffer }, + @Headers('x-slack-signature') slackSignature?: string, + @Headers('x-slack-request-timestamp') slackTimestamp?: string, + ) { + if (!this.slackBridgeService.isEnabled()) { + throw new BadRequestException('Slack reply-in-Slack is not enabled'); + } + + const ok = this.slackBridgeService.verifySlackRequest({ + rawBody: req.rawBody, + signature: slackSignature, + timestamp: slackTimestamp, + }); + if (!ok) { + throw new UnauthorizedException('Invalid Slack signature'); + } + + const payloadRaw = typeof body?.payload === 'string' ? body.payload : null; + if (!payloadRaw) { + throw new BadRequestException('Missing payload'); + } + + let payload: any; + try { + payload = JSON.parse(payloadRaw); + } catch { + throw new BadRequestException('Invalid payload JSON'); + } + + return await this.slackBridgeService.handleInteractivity(payload, req.headers as any); + } +} + diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/template.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/template.controller.ts new file mode 100644 index 000000000..b3419c0c5 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/template.controller.ts @@ -0,0 +1,657 @@ +/** + * Goal Template Controller + * Phase 7: Enhanced Features + * + * REST API endpoints for goal template management: + * - CRUD operations for templates + * - Template instantiation + * - Template versioning + * - Usage statistics + */ + +import { + Controller, + Get, + Post, + Put, + Delete, + Body, + Param, + Query, + HttpCode, + HttpStatus, + Headers, + BadRequestException, +} from '@nestjs/common'; +import { Throttle } from '@nestjs/throttler'; +import { + IsString, + IsOptional, + IsBoolean, + IsObject, + IsArray, + IsEnum, + IsNumber, + MinLength, + ValidateNested, +} from 'class-validator'; +import { Type } from 'class-transformer'; +import { + ApiTags, + ApiOperation, + ApiResponse, + ApiParam, + ApiQuery, + ApiHeader, + ApiProperty, +} from '@nestjs/swagger'; +import { + GoalTemplateService, + CreateGoalTemplateInput, + UpdateGoalTemplateInput, + GoalTemplateFilters, + CreateFromTemplateInput, +} from '../services/goal-template.service'; + +// ============================================================================ +// DTOs with class-validator decorators for ValidationPipe compatibility +// ============================================================================ + +enum VariableType { + STRING = 'string', + NUMBER = 'number', + BOOLEAN = 'boolean', + SELECT = 'select', +} + +class TemplateVariableDto { + @ApiProperty({ description: 'Variable name (used in {{name}} syntax)', example: 'repository_url' }) + @IsString() + name!: string; + + @ApiProperty({ enum: ['string', 'number', 'boolean', 'select'], description: 'Variable type' }) + @IsEnum(VariableType) + type!: VariableType; + + @ApiProperty({ description: 'Whether the variable is required' }) + @IsBoolean() + required!: boolean; + + @ApiProperty({ required: false, description: 'Default value for the variable' }) + @IsOptional() + default?: string | number | boolean; + + @ApiProperty({ required: false, description: 'Variable description/help text' }) + @IsOptional() + @IsString() + description?: string; + + @ApiProperty({ required: false, type: [String], description: 'Options for select type' }) + @IsOptional() + @IsArray() + options?: string[]; + + @ApiProperty({ required: false, description: 'Validation rules' }) + @IsOptional() + @IsObject() + validation?: { + minLength?: number; + maxLength?: number; + min?: number; + max?: number; + pattern?: string; + }; +} + +class ChecklistTemplateItemDto { + @ApiProperty({ description: 'Item order in checklist' }) + @IsNumber() + order!: number; + + @ApiProperty({ description: 'Description template with {{variables}}' }) + @IsString() + descriptionTemplate!: string; + + @ApiProperty({ required: false, description: 'Expected outcome template' }) + @IsOptional() + @IsString() + expectedOutcomeTemplate?: string; + + @ApiProperty({ required: false, type: [String], description: 'Suggested tools for this step' }) + @IsOptional() + @IsArray() + suggestedTools?: string[]; + + @ApiProperty({ required: false, description: 'Whether step requires desktop access' }) + @IsOptional() + @IsBoolean() + requiresDesktop?: boolean; +} + +class CreateTemplateDto { + @ApiProperty({ description: 'Template name', example: 'Deploy to Production', minLength: 3 }) + @IsString() + @MinLength(3) + name!: string; + + @ApiProperty({ required: false, description: 'Template description' }) + @IsOptional() + @IsString() + description?: string; + + @ApiProperty({ required: false, description: 'Category for organization', example: 'deployment' }) + @IsOptional() + @IsString() + category?: string; + + @ApiProperty({ required: false, type: [String], description: 'Tags for filtering', example: ['kubernetes', 'production'] }) + @IsOptional() + @IsArray() + tags?: string[]; + + @ApiProperty({ required: false, description: 'Icon identifier', example: 'rocket' }) + @IsOptional() + @IsString() + icon?: string; + + @ApiProperty({ + description: 'Goal pattern with {{variable}} placeholders', + example: 'Deploy {{service_name}} to {{environment}} using {{deploy_strategy}} strategy', + minLength: 10, + }) + @IsString() + @MinLength(10) + goalPattern!: string; + + @ApiProperty({ required: false, description: 'Default constraints for goal execution' }) + @IsOptional() + @IsObject() + defaultConstraints?: Record; + + @ApiProperty({ required: false, type: [TemplateVariableDto], description: 'Variable definitions' }) + @IsOptional() + @IsArray() + @ValidateNested({ each: true }) + @Type(() => TemplateVariableDto) + variables?: TemplateVariableDto[]; + + @ApiProperty({ required: false, type: [ChecklistTemplateItemDto], description: 'Checklist template for progress tracking' }) + @IsOptional() + @IsArray() + @ValidateNested({ each: true }) + @Type(() => ChecklistTemplateItemDto) + checklistTemplate?: ChecklistTemplateItemDto[]; +} + +class UpdateTemplateDto { + @ApiProperty({ required: false, description: 'Template name', minLength: 3 }) + @IsOptional() + @IsString() + @MinLength(3) + name?: string; + + @ApiProperty({ required: false, description: 'Template description' }) + @IsOptional() + @IsString() + description?: string; + + @ApiProperty({ required: false, description: 'Category for organization' }) + @IsOptional() + @IsString() + category?: string; + + @ApiProperty({ required: false, type: [String], description: 'Tags for filtering' }) + @IsOptional() + @IsArray() + tags?: string[]; + + @ApiProperty({ required: false, description: 'Icon identifier' }) + @IsOptional() + @IsString() + icon?: string; + + @ApiProperty({ required: false, description: 'Goal pattern with {{variable}} placeholders' }) + @IsOptional() + @IsString() + goalPattern?: string; + + @ApiProperty({ required: false, description: 'Default constraints for goal execution' }) + @IsOptional() + @IsObject() + defaultConstraints?: Record; + + @ApiProperty({ required: false, type: [TemplateVariableDto], description: 'Variable definitions' }) + @IsOptional() + @IsArray() + @ValidateNested({ each: true }) + @Type(() => TemplateVariableDto) + variables?: TemplateVariableDto[]; + + @ApiProperty({ required: false, type: [ChecklistTemplateItemDto], description: 'Checklist template' }) + @IsOptional() + @IsArray() + @ValidateNested({ each: true }) + @Type(() => ChecklistTemplateItemDto) + checklistTemplate?: ChecklistTemplateItemDto[]; +} + +class CreateFromTemplateDto { + @ApiProperty({ + description: 'Values for template variables', + example: { service_name: 'api-gateway', environment: 'production', deploy_strategy: 'rolling' }, + }) + @IsObject() + variableValues!: Record; + + @ApiProperty({ required: false, description: 'Override default constraints' }) + @IsOptional() + @IsObject() + constraintOverrides?: Record; + + @ApiProperty({ required: false, description: 'Auto-start goal run after creation', default: false }) + @IsOptional() + @IsBoolean() + autoStart?: boolean; +} + +class PreviewInstantiationDto { + @ApiProperty({ + description: 'Values for template variables to preview', + example: { service_name: 'api-gateway', environment: 'staging' }, + }) + @IsObject() + variableValues!: Record; +} + +class TemplateFiltersDto { + @ApiProperty({ required: false, description: 'Filter by category' }) + @IsOptional() + @IsString() + category?: string; + + @ApiProperty({ required: false, description: 'Filter by tags (comma-separated)' }) + @IsOptional() + @IsString() + tags?: string; + + @ApiProperty({ required: false, description: 'Filter by published status' }) + @IsOptional() + @IsString() + isPublished?: string; + + @ApiProperty({ required: false, description: 'Filter by built-in status' }) + @IsOptional() + @IsString() + isBuiltIn?: string; + + @ApiProperty({ required: false, description: 'Search in name and description' }) + @IsOptional() + @IsString() + search?: string; + + @ApiProperty({ required: false, description: 'Page number', default: '1' }) + @IsOptional() + @IsString() + page?: string; + + @ApiProperty({ required: false, description: 'Page size', default: '20' }) + @IsOptional() + @IsString() + pageSize?: string; +} + +@ApiTags('templates') +// v5.11.3: Removed deprecated api/v1/templates backward compatibility prefix (was scheduled for v5.6.0) +@Controller('templates') +export class TemplateController { + constructor(private templateService: GoalTemplateService) {} + + /** + * POST /api/v1/templates + * Create a new goal template + */ + @Post() + @HttpCode(HttpStatus.CREATED) + @ApiOperation({ + summary: 'Create a new goal template', + description: 'Creates a reusable goal template with variable placeholders for parameterization.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiHeader({ name: 'x-user-id', description: 'User identifier (optional)', required: false }) + @ApiResponse({ status: 201, description: 'Template created successfully' }) + @ApiResponse({ status: 400, description: 'Invalid input' }) + async createTemplate( + @Body() dto: CreateTemplateDto, + @Headers('x-tenant-id') tenantId?: string, + @Headers('x-user-id') userId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.name || dto.name.trim().length < 3) { + throw new BadRequestException('Template name must be at least 3 characters'); + } + + if (!dto.goalPattern || dto.goalPattern.trim().length < 10) { + throw new BadRequestException('Goal pattern must be at least 10 characters'); + } + + const input: CreateGoalTemplateInput = { + tenantId, + name: dto.name.trim(), + description: dto.description, + category: dto.category, + tags: dto.tags, + icon: dto.icon, + goalPattern: dto.goalPattern, + defaultConstraints: dto.defaultConstraints, + variables: dto.variables, + checklistTemplate: dto.checklistTemplate, + createdBy: userId, + }; + + const template = await this.templateService.create(input); + + return { + success: true, + data: template, + }; + } + + /** + * GET /api/v1/templates + * List templates for tenant + */ + @Get() + @ApiOperation({ + summary: 'List goal templates', + description: 'Returns paginated list of goal templates for the tenant with optional filtering.', + }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Templates retrieved successfully' }) + async listTemplates( + @Query() query: TemplateFiltersDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const filters: GoalTemplateFilters = { + category: query.category, + tags: query.tags ? query.tags.split(',') : undefined, + isPublished: query.isPublished === 'true' ? true : query.isPublished === 'false' ? false : undefined, + isBuiltIn: query.isBuiltIn === 'true' ? true : query.isBuiltIn === 'false' ? false : undefined, + search: query.search, + page: query.page ? parseInt(query.page, 10) : 1, + pageSize: query.pageSize ? parseInt(query.pageSize, 10) : 20, + }; + + const result = await this.templateService.findByTenant(tenantId, filters); + + return { + success: true, + ...result, + }; + } + + /** + * GET /api/v1/templates/categories + * Get all template categories + */ + @Get('categories') + @ApiOperation({ summary: 'Get template categories', description: 'Returns all unique categories for organizing templates.' }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 200, description: 'Categories retrieved successfully' }) + async getCategories(@Headers('x-tenant-id') tenantId?: string) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const categories = await this.templateService.getCategories(tenantId); + + return { + success: true, + data: categories, + }; + } + + /** + * GET /api/v1/templates/:id + * Get template by ID + */ + @Get(':id') + @ApiOperation({ summary: 'Get template by ID', description: 'Returns a specific goal template with all its details.' }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 200, description: 'Template retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async getTemplate(@Param('id') id: string) { + const template = await this.templateService.findById(id); + + return { + success: true, + data: template, + }; + } + + /** + * PUT /api/v1/templates/:id + * Update template + */ + @Put(':id') + @ApiOperation({ summary: 'Update template', description: 'Updates an existing goal template. Creates a new version if structural changes are made.' }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 200, description: 'Template updated successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async updateTemplate( + @Param('id') id: string, + @Body() dto: UpdateTemplateDto, + ) { + const input: UpdateGoalTemplateInput = { + name: dto.name, + description: dto.description, + category: dto.category, + tags: dto.tags, + icon: dto.icon, + goalPattern: dto.goalPattern, + defaultConstraints: dto.defaultConstraints, + variables: dto.variables, + checklistTemplate: dto.checklistTemplate, + }; + + const template = await this.templateService.update(id, input); + + return { + success: true, + data: template, + }; + } + + /** + * POST /api/v1/templates/:id/version + * Create new version of template + */ + @Post(':id/version') + @HttpCode(HttpStatus.CREATED) + @ApiOperation({ summary: 'Create new template version', description: 'Creates a new version of the template while preserving the original.' }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 201, description: 'New version created successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async createNewVersion( + @Param('id') id: string, + @Body() dto: UpdateTemplateDto, + ) { + const input: UpdateGoalTemplateInput = { + name: dto.name, + description: dto.description, + category: dto.category, + tags: dto.tags, + icon: dto.icon, + goalPattern: dto.goalPattern, + defaultConstraints: dto.defaultConstraints, + variables: dto.variables, + checklistTemplate: dto.checklistTemplate, + }; + + const template = await this.templateService.createNewVersion(id, input); + + return { + success: true, + data: template, + }; + } + + /** + * POST /api/v1/templates/:id/publish + * Publish template + */ + @Post(':id/publish') + @HttpCode(HttpStatus.OK) + @ApiOperation({ summary: 'Publish template', description: 'Makes the template available for use by other users in the tenant.' }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 200, description: 'Template published successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async publishTemplate(@Param('id') id: string) { + const template = await this.templateService.publish(id); + + return { + success: true, + data: template, + }; + } + + /** + * POST /api/v1/templates/:id/unpublish + * Unpublish template + */ + @Post(':id/unpublish') + @HttpCode(HttpStatus.OK) + @ApiOperation({ summary: 'Unpublish template', description: 'Removes the template from public availability.' }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 200, description: 'Template unpublished successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async unpublishTemplate(@Param('id') id: string) { + const template = await this.templateService.unpublish(id); + + return { + success: true, + data: template, + }; + } + + /** + * DELETE /api/v1/templates/:id + * Delete template + */ + @Delete(':id') + @HttpCode(HttpStatus.NO_CONTENT) + @ApiOperation({ summary: 'Delete template', description: 'Permanently deletes a goal template.' }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 204, description: 'Template deleted successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async deleteTemplate(@Param('id') id: string) { + await this.templateService.delete(id); + } + + /** + * POST /api/v1/templates/:id/instantiate + * Create goal run from template + * Rate limited: 5 per minute (expensive operation) + */ + @Post(':id/instantiate') + @HttpCode(HttpStatus.CREATED) + @Throttle({ default: { limit: 5, ttl: 60000 } }) + @ApiOperation({ + summary: 'Instantiate template', + description: 'Creates a new goal run from the template with the provided variable values. Rate limited to 5 requests per minute.', + }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiHeader({ name: 'x-tenant-id', description: 'Tenant identifier', required: true }) + @ApiResponse({ status: 201, description: 'Goal run created successfully' }) + @ApiResponse({ status: 400, description: 'Invalid variable values or missing required variables' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + @ApiResponse({ status: 429, description: 'Rate limit exceeded' }) + async instantiateTemplate( + @Param('id') id: string, + @Body() dto: CreateFromTemplateDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + if (!dto.variableValues) { + throw new BadRequestException('variableValues is required'); + } + + const input: CreateFromTemplateInput = { + tenantId, + templateId: id, + variableValues: dto.variableValues, + constraintOverrides: dto.constraintOverrides, + autoStart: dto.autoStart, + }; + + const goalRun = await this.templateService.createGoalRunFromTemplate(input); + + return { + success: true, + data: goalRun, + }; + } + + /** + * POST /api/v1/templates/:id/preview + * Preview template instantiation + */ + @Post(':id/preview') + @HttpCode(HttpStatus.OK) + @ApiOperation({ + summary: 'Preview template instantiation', + description: 'Returns a preview of what the goal text would look like with the provided variable values without creating a goal run.', + }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 200, description: 'Preview generated successfully' }) + @ApiResponse({ status: 400, description: 'Invalid variable values' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async previewInstantiation( + @Param('id') id: string, + @Body() dto: PreviewInstantiationDto, + ) { + if (!dto.variableValues) { + throw new BadRequestException('variableValues is required'); + } + + const preview = await this.templateService.previewInstantiation( + id, + dto.variableValues, + ); + + return { + success: true, + data: preview, + }; + } + + /** + * GET /api/v1/templates/:id/stats + * Get template usage statistics + */ + @Get(':id/stats') + @ApiOperation({ + summary: 'Get template statistics', + description: 'Returns usage statistics for the template including execution counts, success rates, and recent runs.', + }) + @ApiParam({ name: 'id', description: 'Template ID' }) + @ApiResponse({ status: 200, description: 'Statistics retrieved successfully' }) + @ApiResponse({ status: 404, description: 'Template not found' }) + async getTemplateStats(@Param('id') id: string) { + const stats = await this.templateService.getUsageStats(id); + + return { + success: true, + data: stats, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/user-prompt.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/user-prompt.controller.ts new file mode 100644 index 000000000..4bc1cdeaa --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/user-prompt.controller.ts @@ -0,0 +1,147 @@ +import { Body, Controller, Get, HttpCode, HttpStatus, Param, Post, BadRequestException, Headers, Query, Req } from '@nestjs/common'; +import { IsEnum, IsInt, IsObject, IsOptional, IsString, Max, Min, ValidateNested } from 'class-validator'; +import { Type } from 'class-transformer'; +import { ActorType, UserPromptKind, UserPromptScope, UserPromptStatus } from '@prisma/client'; +import { UserPromptResolutionService } from '../services/user-prompt-resolution.service'; +import { UserPromptService } from '../services/user-prompt.service'; +import type { Request } from 'express'; + +class PromptActorDto { + @IsEnum(ActorType) + type!: ActorType; + + @IsOptional() + @IsString() + id?: string; + + @IsOptional() + @IsString() + email?: string; + + @IsOptional() + @IsString() + name?: string; + + @IsOptional() + @IsObject() + authContext?: Record; +} + +class ResolveUserPromptDto { + @ValidateNested() + @Type(() => PromptActorDto) + actor!: PromptActorDto; + + @IsObject() + answers!: Record; +} + +class ListUserPromptsQueryDto { + @IsOptional() + @IsString() + goalRunId?: string; + + @IsOptional() + @IsEnum(UserPromptStatus) + status?: UserPromptStatus; + + @IsOptional() + @IsEnum(UserPromptKind) + kind?: UserPromptKind; + + @IsOptional() + @IsEnum(UserPromptScope) + scope?: UserPromptScope; + + @IsOptional() + @Type(() => Number) + @IsInt() + @Min(1) + @Max(200) + limit?: number; +} + +@Controller() +export class UserPromptController { + constructor( + private readonly userPromptResolutionService: UserPromptResolutionService, + private readonly userPromptService: UserPromptService, + ) {} + + /** + * GET /api/v1/user-prompts + * List prompts for a tenant (default limit=50). Answers are not returned. + */ + @Get('user-prompts') + @HttpCode(HttpStatus.OK) + async listPrompts( + @Query() query: ListUserPromptsQueryDto, + @Headers('x-tenant-id') tenantId?: string, + ) { + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + + const prompts = await this.userPromptService.listUserPrompts({ + tenantId, + goalRunId: query.goalRunId, + status: query.status, + kind: query.kind, + scope: query.scope, + limit: query.limit, + }); + + return { + success: true, + data: { + prompts, + }, + }; + } + + /** + * POST /api/v1/user-prompts/:promptId/resolve + * Resolve an OPEN prompt (idempotent). + */ + @Post('user-prompts/:promptId/resolve') + @HttpCode(HttpStatus.OK) + async resolvePrompt( + @Param('promptId') promptId: string, + @Body() dto: ResolveUserPromptDto, + @Headers('x-tenant-id') tenantId?: string, + @Req() req?: Request, + ) { + if (!promptId) { + throw new BadRequestException('promptId is required'); + } + if (!tenantId) { + throw new BadRequestException('x-tenant-id header is required'); + } + if (!dto?.actor || typeof dto.actor !== 'object') { + throw new BadRequestException('actor is required'); + } + if (!dto?.answers || typeof dto.answers !== 'object') { + throw new BadRequestException('answers must be an object'); + } + + const result = await this.userPromptResolutionService.resolvePrompt({ + promptId, + tenantId, + actor: dto.actor, + answers: dto.answers, + requestId: (req?.headers?.['x-request-id'] as string | undefined) ?? undefined, + clientRequestId: (req?.headers?.['x-client-request-id'] as string | undefined) ?? undefined, + idempotencyKey: + (req?.headers?.['idempotency-key'] as string | undefined) ?? + (req?.headers?.['x-idempotency-key'] as string | undefined) ?? + undefined, + ipAddress: req?.ip, + userAgent: (req?.headers?.['user-agent'] as string | undefined) ?? undefined, + }); + + return { + success: true, + data: result, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/webhook.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/webhook.controller.ts new file mode 100644 index 000000000..b3fdb1814 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/webhook.controller.ts @@ -0,0 +1,457 @@ +/** + * Webhook Controller + * Post-M5 Enhancement: API endpoints for managing webhook configurations + * + * Endpoints: + * - GET /api/v1/webhooks List webhooks for tenant + * - POST /api/v1/webhooks Create webhook configuration + * - GET /api/v1/webhooks/:id Get webhook details + * - PUT /api/v1/webhooks/:id Update webhook configuration + * - DELETE /api/v1/webhooks/:id Delete webhook + * - POST /api/v1/webhooks/:id/test Test webhook delivery + * - POST /api/v1/webhooks/:id/rotate-secret Rotate webhook secret + * - GET /api/v1/webhooks/:id/deliveries Get delivery history + */ + +import { + Controller, + Get, + Post, + Put, + Delete, + Param, + Body, + Query, + HttpException, + HttpStatus, + Logger, + Headers, +} from '@nestjs/common'; +import { WebhookService, WebhookEventType } from '../services/webhook.service'; + +/** + * DTOs for webhook endpoints + */ +interface CreateWebhookDto { + url: string; + events: string[]; + secret?: string; +} + +interface UpdateWebhookDto { + url?: string; + events?: string[]; + enabled?: boolean; +} + +interface ListWebhooksQuery { + limit?: string; + offset?: string; +} + +@Controller('webhooks') +export class WebhookController { + private readonly logger = new Logger(WebhookController.name); + + constructor(private readonly webhookService: WebhookService) {} + + /** + * GET /api/v1/webhooks + * List webhooks for a tenant + */ + @Get() + async listWebhooks( + @Query() query: ListWebhooksQuery, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const limit = parseInt(query.limit || '20', 10); + const offset = parseInt(query.offset || '0', 10); + + // Get all webhooks for tenant (we'll implement pagination in the service if needed) + const allEvents = Object.values(WebhookEventType); + const webhooks: any[] = []; + + for (const eventType of allEvents) { + const eventWebhooks = await this.webhookService.getActiveWebhooks(tenantId, eventType); + for (const webhook of eventWebhooks) { + if (!webhooks.find((w) => w.id === webhook.id)) { + webhooks.push(webhook); + } + } + } + + // Apply pagination + const paginated = webhooks.slice(offset, offset + limit); + + return { + success: true, + webhooks: paginated.map((w) => this.formatWebhook(w)), + pagination: { + total: webhooks.length, + limit, + offset, + hasMore: offset + paginated.length < webhooks.length, + }, + }; + } + + /** + * POST /api/v1/webhooks + * Create a new webhook configuration + */ + @Post() + async createWebhook( + @Body() body: CreateWebhookDto, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + if (!body.url) { + throw new HttpException('url is required', HttpStatus.BAD_REQUEST); + } + + if (!body.events || body.events.length === 0) { + throw new HttpException('events array is required', HttpStatus.BAD_REQUEST); + } + + // Validate URL + try { + new URL(body.url); + } catch { + throw new HttpException('Invalid URL format', HttpStatus.BAD_REQUEST); + } + + // Validate event types + const validEvents = Object.values(WebhookEventType); + const invalidEvents = body.events.filter((e) => !validEvents.includes(e as WebhookEventType)); + if (invalidEvents.length > 0) { + throw new HttpException( + `Invalid event types: ${invalidEvents.join(', ')}. Valid types: ${validEvents.join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + + try { + const webhook = await this.webhookService.createWebhook({ + tenantId, + url: body.url, + events: body.events as WebhookEventType[], + secret: body.secret, + }); + + this.logger.log(`Created webhook ${webhook.id} for tenant ${tenantId}`); + + return { + success: true, + webhook: this.formatWebhookWithSecret(webhook), + message: 'Webhook created successfully. Save the secret - it will not be shown again.', + }; + } catch (error: any) { + this.logger.error(`Failed to create webhook: ${error.message}`); + throw new HttpException( + `Failed to create webhook: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/webhooks/:id + * Get webhook details + */ + @Get(':id') + async getWebhook( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // For now, just return basic info - we'd need a getById method in the service + throw new HttpException( + 'Get by ID not yet implemented - use list endpoint', + HttpStatus.NOT_IMPLEMENTED, + ); + } + + /** + * PUT /api/v1/webhooks/:id + * Update webhook configuration + */ + @Put(':id') + async updateWebhook( + @Param('id') id: string, + @Body() body: UpdateWebhookDto, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + // Validate URL if provided + if (body.url) { + try { + new URL(body.url); + } catch { + throw new HttpException('Invalid URL format', HttpStatus.BAD_REQUEST); + } + } + + // Validate event types if provided + if (body.events) { + const validEvents = Object.values(WebhookEventType); + const invalidEvents = body.events.filter((e) => !validEvents.includes(e as WebhookEventType)); + if (invalidEvents.length > 0) { + throw new HttpException( + `Invalid event types: ${invalidEvents.join(', ')}`, + HttpStatus.BAD_REQUEST, + ); + } + } + + try { + const webhook = await this.webhookService.updateWebhook(id, { + url: body.url, + events: body.events as WebhookEventType[] | undefined, + enabled: body.enabled, + }); + + this.logger.log(`Updated webhook ${id}`); + + return { + success: true, + webhook: this.formatWebhook(webhook), + message: 'Webhook updated successfully', + }; + } catch (error: any) { + this.logger.error(`Failed to update webhook ${id}: ${error.message}`); + + if (error.code === 'P2025') { + throw new HttpException('Webhook not found', HttpStatus.NOT_FOUND); + } + + throw new HttpException( + `Failed to update webhook: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * DELETE /api/v1/webhooks/:id + * Delete webhook configuration + */ + @Delete(':id') + async deleteWebhook( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + try { + await this.webhookService.deleteWebhook(id); + + this.logger.log(`Deleted webhook ${id}`); + + return { + success: true, + message: 'Webhook deleted successfully', + }; + } catch (error: any) { + this.logger.error(`Failed to delete webhook ${id}: ${error.message}`); + + if (error.code === 'P2025') { + throw new HttpException('Webhook not found', HttpStatus.NOT_FOUND); + } + + throw new HttpException( + `Failed to delete webhook: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/webhooks/:id/test + * Test webhook delivery + */ + @Post(':id/test') + async testWebhook( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + try { + const result = await this.webhookService.testWebhook(id); + + if (result.success) { + return { + success: true, + message: 'Test webhook delivered successfully', + statusCode: result.statusCode, + attempts: result.attempts, + }; + } else { + return { + success: false, + message: 'Test webhook delivery failed', + error: result.error, + statusCode: result.statusCode, + attempts: result.attempts, + }; + } + } catch (error: any) { + this.logger.error(`Failed to test webhook ${id}: ${error.message}`); + + if (error.message === 'Webhook not found') { + throw new HttpException('Webhook not found', HttpStatus.NOT_FOUND); + } + + throw new HttpException( + `Failed to test webhook: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/webhooks/:id/rotate-secret + * Rotate webhook secret + */ + @Post(':id/rotate-secret') + async rotateSecret( + @Param('id') id: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + try { + const result = await this.webhookService.rotateSecret(id); + + this.logger.log(`Rotated secret for webhook ${id}`); + + return { + success: true, + secret: result.secret, + message: 'Secret rotated successfully. Save the new secret - it will not be shown again.', + }; + } catch (error: any) { + this.logger.error(`Failed to rotate secret for webhook ${id}: ${error.message}`); + + if (error.code === 'P2025') { + throw new HttpException('Webhook not found', HttpStatus.NOT_FOUND); + } + + throw new HttpException( + `Failed to rotate secret: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/webhooks/:id/deliveries + * Get webhook delivery history + */ + @Get(':id/deliveries') + async getDeliveries( + @Param('id') id: string, + @Query('limit') limit?: string, + @Headers('X-Tenant-Id') tenantId?: string, + ) { + if (!tenantId) { + throw new HttpException('X-Tenant-Id header required', HttpStatus.BAD_REQUEST); + } + + const deliveryLimit = parseInt(limit || '50', 10); + + try { + const deliveries = await this.webhookService.getDeliveryHistory(id, deliveryLimit); + + return { + success: true, + deliveries: deliveries.map((d: any) => ({ + id: d.id, + eventId: d.eventId, + success: d.success, + statusCode: d.statusCode, + error: d.error, + attempts: d.attempts, + deliveredAt: d.deliveredAt, + createdAt: d.createdAt, + })), + }; + } catch (error: any) { + throw new HttpException( + `Failed to get delivery history: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/webhooks/events + * Get available webhook event types + */ + @Get('events/types') + getEventTypes() { + return { + success: true, + eventTypes: Object.values(WebhookEventType).map((type) => ({ + type, + description: this.getEventDescription(type), + })), + }; + } + + /** + * Format webhook for response (without secret) + */ + private formatWebhook(webhook: any) { + return { + id: webhook.id, + tenantId: webhook.tenantId, + url: webhook.url, + events: webhook.events, + enabled: webhook.enabled, + createdAt: webhook.createdAt, + }; + } + + /** + * Format webhook with secret (only for create) + */ + private formatWebhookWithSecret(webhook: any) { + return { + ...this.formatWebhook(webhook), + secret: webhook.secret, + }; + } + + /** + * Get human-readable event description + */ + private getEventDescription(eventType: WebhookEventType): string { + const descriptions: Record = { + [WebhookEventType.APPROVAL_REQUESTED]: 'Sent when a high-risk action requires approval', + [WebhookEventType.APPROVAL_APPROVED]: 'Sent when an action is approved', + [WebhookEventType.APPROVAL_REJECTED]: 'Sent when an action is rejected', + [WebhookEventType.APPROVAL_EXPIRED]: 'Sent when an approval request expires', + }; + return descriptions[eventType] || 'Unknown event type'; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/workflow.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/workflow.controller.ts new file mode 100644 index 000000000..0f034f852 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/workflow.controller.ts @@ -0,0 +1,159 @@ +/** + * Workflow Controller + * v1.0.0: REST API for workflow management + * + * Endpoints: + * - POST /workflows - Create a new workflow + * - GET /workflows/:id - Get workflow status + * - POST /workflows/:id/start - Start workflow execution + * - POST /workflows/:id/cancel - Cancel workflow + * - GET /workflows/:id/nodes - List workflow nodes + * - GET /workflows/:id/nodes/:nodeId - Get node details + */ + +import { + Controller, + Get, + Post, + Param, + Body, + HttpException, + HttpStatus, + Logger, +} from '@nestjs/common'; +import { IsString, IsOptional, IsArray, IsObject, MinLength } from 'class-validator'; +import { WorkflowService, CreateWorkflowInput } from '../services/workflow.service'; + +// ============================================================================ +// DTOs with class-validator decorators for ValidationPipe compatibility +// ============================================================================ + +/** + * DTO for creating a workflow + */ +class CreateWorkflowDto { + @IsString() + tenantId!: string; + + @IsString() + @MinLength(3) + name!: string; + + @IsOptional() + @IsString() + description?: string; + + @IsArray() + nodes!: any[]; + + @IsOptional() + @IsObject() + metadata?: Record; +} + +/** + * DTO for cancelling a workflow + */ +class CancelWorkflowDto { + @IsOptional() + @IsString() + reason?: string; +} + +@Controller('workflows') +export class WorkflowController { + private readonly logger = new Logger(WorkflowController.name); + + constructor(private workflowService: WorkflowService) {} + + /** + * POST /api/v1/workflows + * Create a new workflow + */ + @Post() + async createWorkflow(@Body() dto: CreateWorkflowDto) { + this.logger.log(`Creating workflow for tenant ${dto.tenantId}`); + + try { + const input: CreateWorkflowInput = dto; + const result = await this.workflowService.createWorkflow(input); + return { + success: true, + workflow: result, + }; + } catch (error: any) { + this.logger.error(`Failed to create workflow: ${error.message}`); + throw new HttpException( + `Failed to create workflow: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * GET /api/v1/workflows/:id + * Get workflow details + */ + @Get(':id') + async getWorkflow(@Param('id') id: string) { + const workflow = await this.workflowService.getWorkflow(id); + + if (!workflow) { + throw new HttpException('Workflow not found', HttpStatus.NOT_FOUND); + } + + return { + success: true, + workflow, + }; + } + + /** + * POST /api/v1/workflows/:id/start + * Start workflow execution + */ + @Post(':id/start') + async startWorkflow(@Param('id') id: string) { + this.logger.log(`Starting workflow ${id}`); + + try { + await this.workflowService.startWorkflow(id); + return { + success: true, + message: 'Workflow started', + }; + } catch (error: any) { + this.logger.error(`Failed to start workflow: ${error.message}`); + throw new HttpException( + `Failed to start workflow: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/workflows/:id/cancel + * Cancel workflow execution + */ + @Post(':id/cancel') + async cancelWorkflow( + @Param('id') id: string, + @Body() dto: CancelWorkflowDto, + ) { + this.logger.log(`Cancelling workflow ${id}`); + + try { + await this.workflowService.cancelWorkflow(id, dto.reason); + return { + success: true, + message: 'Workflow cancelled', + }; + } catch (error: any) { + this.logger.error(`Failed to cancel workflow: ${error.message}`); + throw new HttpException( + `Failed to cancel workflow: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/controllers/workspace-proxy.controller.ts b/packages/bytebot-workflow-orchestrator/src/controllers/workspace-proxy.controller.ts new file mode 100644 index 000000000..d97997cc4 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/controllers/workspace-proxy.controller.ts @@ -0,0 +1,337 @@ +/** + * Workspace Proxy Controller + * v1.0.0: Proxy for workspace operations + * + * This controller provides a convenient API for accessing workspace + * information without direct access to task-controller. + * It's primarily used by UI components and monitoring. + */ + +import { + Controller, + Get, + Post, + Delete, + Param, + Body, + HttpException, + HttpStatus, + Logger, +} from '@nestjs/common'; +import { WorkspaceService } from '../services/workspace.service'; +import { PrismaService } from '../services/prisma.service'; + +/** + * v2.3.0 M4: Lock request DTOs + */ +interface AcquireLockDto { + nodeRunId: string; + leaseSeconds?: number; +} + +interface RenewLockDto { + nodeRunId: string; + leaseSeconds?: number; +} + +interface ReleaseLockDto { + nodeRunId: string; +} + +@Controller('workspaces') +export class WorkspaceProxyController { + private readonly logger = new Logger(WorkspaceProxyController.name); + + constructor( + private workspaceService: WorkspaceService, + private prisma: PrismaService, + ) {} + + /** + * GET /api/v1/workspaces/:workspaceId + * Get workspace details including DB record and desktop status + */ + @Get(':workspaceId') + async getWorkspace(@Param('workspaceId') workspaceId: string) { + // Get DB record + const workspace = await this.prisma.workspace.findUnique({ + where: { id: workspaceId }, + include: { + workflowRun: { + select: { + id: true, + name: true, + status: true, + }, + }, + }, + }); + + if (!workspace) { + throw new HttpException('Workspace not found', HttpStatus.NOT_FOUND); + } + + // Get desktop status from task-controller + const desktopStatus = await this.workspaceService.getWorkspaceDesktopStatus( + workspaceId, + ); + + return { + success: true, + workspace: { + ...workspace, + desktop: desktopStatus, + }, + }; + } + + /** + * GET /api/v1/workspaces/:workspaceId/desktop + * Get just the desktop status + */ + @Get(':workspaceId/desktop') + async getDesktopStatus(@Param('workspaceId') workspaceId: string) { + const status = await this.workspaceService.getWorkspaceDesktopStatus( + workspaceId, + ); + + return { + success: true, + desktop: status, + }; + } + + /** + * POST /api/v1/workspaces/:workspaceId/wake + * Wake a hibernated workspace + */ + @Post(':workspaceId/wake') + async wakeWorkspace(@Param('workspaceId') workspaceId: string) { + this.logger.log(`Waking workspace ${workspaceId}`); + + // Get workspace from DB + const workspace = await this.prisma.workspace.findUnique({ + where: { id: workspaceId }, + }); + + if (!workspace) { + throw new HttpException('Workspace not found', HttpStatus.NOT_FOUND); + } + + try { + const status = await this.workspaceService.ensureWorkspaceDesktop( + workspaceId, + workspace.tenantId, + { + enabled: workspace.persistenceEnabled, + storageClass: workspace.storageClass || undefined, + size: workspace.storageSize || undefined, + }, + ); + + // Update DB record + await this.prisma.workspace.update({ + where: { id: workspaceId }, + data: { status: status.status === 'READY' ? 'READY' : 'CREATING' }, + }); + + return { + success: true, + desktop: status, + }; + } catch (error: any) { + this.logger.error(`Failed to wake workspace: ${error.message}`); + throw new HttpException( + `Failed to wake workspace: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/workspaces/:workspaceId/hibernate + * Hibernate workspace (delete pod, keep PVC) + */ + @Post(':workspaceId/hibernate') + async hibernateWorkspace(@Param('workspaceId') workspaceId: string) { + this.logger.log(`Hibernating workspace ${workspaceId}`); + + try { + const status = await this.workspaceService.hibernateWorkspace(workspaceId); + + // Update DB record + await this.prisma.workspace.update({ + where: { id: workspaceId }, + data: { status: 'HIBERNATED' }, + }); + + return { + success: true, + desktop: status, + }; + } catch (error: any) { + this.logger.error(`Failed to hibernate workspace: ${error.message}`); + throw new HttpException( + `Failed to hibernate workspace: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + /** + * POST /api/v1/workspaces/:workspaceId/terminate + * Fully terminate workspace (delete pod and PVC) + */ + @Post(':workspaceId/terminate') + async terminateWorkspace(@Param('workspaceId') workspaceId: string) { + this.logger.log(`Terminating workspace ${workspaceId}`); + + try { + const status = await this.workspaceService.terminateWorkspace( + workspaceId, + true, + ); + + // Update DB record + await this.prisma.workspace.update({ + where: { id: workspaceId }, + data: { status: 'TERMINATED' }, + }); + + return { + success: true, + desktop: status, + }; + } catch (error: any) { + this.logger.error(`Failed to terminate workspace: ${error.message}`); + throw new HttpException( + `Failed to terminate workspace: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + + // ============================================================================ + // v2.3.0 M4: Workspace Lock Endpoints + // Granular locking for desktop tool execution (30-60 second leases) + // ============================================================================ + + /** + * POST /api/v1/workspaces/:workspaceId/lock + * Acquire a granular lock on the workspace for desktop tool execution + */ + @Post(':workspaceId/lock') + async acquireLock( + @Param('workspaceId') workspaceId: string, + @Body() body: AcquireLockDto, + ) { + if (!body.nodeRunId) { + throw new HttpException('nodeRunId is required', HttpStatus.BAD_REQUEST); + } + + this.logger.log( + `Lock request on workspace ${workspaceId} by nodeRun ${body.nodeRunId}`, + ); + + const result = await this.workspaceService.acquireLock( + workspaceId, + body.nodeRunId, + body.leaseSeconds || 30, + ); + + if (!result.acquired) { + throw new HttpException( + { + success: false, + message: result.message, + retryAfterMs: result.retryAfterMs, + currentOwner: result.currentOwner, + }, + HttpStatus.CONFLICT, + ); + } + + return { + success: true, + acquired: true, + lockExpiresAt: result.lockExpiresAt, + message: result.message, + }; + } + + /** + * POST /api/v1/workspaces/:workspaceId/lock/renew + * Renew an existing lock + */ + @Post(':workspaceId/lock/renew') + async renewLock( + @Param('workspaceId') workspaceId: string, + @Body() body: RenewLockDto, + ) { + if (!body.nodeRunId) { + throw new HttpException('nodeRunId is required', HttpStatus.BAD_REQUEST); + } + + const result = await this.workspaceService.renewLock( + workspaceId, + body.nodeRunId, + body.leaseSeconds || 30, + ); + + if (!result.renewed) { + throw new HttpException( + { + success: false, + message: result.message, + }, + HttpStatus.CONFLICT, + ); + } + + return { + success: true, + renewed: true, + lockExpiresAt: result.lockExpiresAt, + message: result.message, + }; + } + + /** + * DELETE /api/v1/workspaces/:workspaceId/lock + * Release a lock on the workspace + */ + @Delete(':workspaceId/lock') + async releaseLock( + @Param('workspaceId') workspaceId: string, + @Body() body: ReleaseLockDto, + ) { + if (!body.nodeRunId) { + throw new HttpException('nodeRunId is required', HttpStatus.BAD_REQUEST); + } + + const result = await this.workspaceService.releaseLock( + workspaceId, + body.nodeRunId, + ); + + return { + success: true, + released: result.released, + message: result.message, + }; + } + + /** + * GET /api/v1/workspaces/:workspaceId/lock + * Get current lock status for a workspace + */ + @Get(':workspaceId/lock') + async getLockStatus(@Param('workspaceId') workspaceId: string) { + const status = await this.workspaceService.getLockStatus(workspaceId); + + return { + success: true, + ...status, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/gateways/run-events.gateway.ts b/packages/bytebot-workflow-orchestrator/src/gateways/run-events.gateway.ts new file mode 100644 index 000000000..a82129296 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/gateways/run-events.gateway.ts @@ -0,0 +1,410 @@ +/** + * Run Events Gateway + * Phase 5: Real-Time Event System + * + * WebSocket gateway for broadcasting goal run events in real-time: + * - Run status changes + * - Step progress updates + * - Activity events + * - Desktop status changes + * - Metrics updates + */ + +import { + WebSocketGateway, + WebSocketServer, + SubscribeMessage, + OnGatewayConnection, + OnGatewayDisconnect, + OnGatewayInit, + ConnectedSocket, + MessageBody, +} from '@nestjs/websockets'; +import { Server, Socket } from 'socket.io'; +import { Injectable, Logger, UseFilters } from '@nestjs/common'; +import { OnEvent } from '@nestjs/event-emitter'; +import { JwtService } from '@nestjs/jwt'; + +// Event types for type safety +export type RunEventType = + | 'run:started' + | 'run:paused' + | 'run:resumed' + | 'run:completed' + | 'run:failed' + | 'run:cancelled' + | 'run:phase_changed' + | 'run:intervened' + | 'run:control_returned' + | 'step:started' + | 'step:completed' + | 'step:failed' + | 'step:approved' + | 'step:rejected' + | 'approval:requested' + | 'activity:logged' + | 'desktop:status_changed' + | 'desktop:waking' + | 'screenshot:captured' + | 'metrics:updated'; + +export interface RunEvent { + type: RunEventType; + runId: string; + timestamp: Date; + data: T; +} + +// Extended Socket interface with authentication data +// Socket already has id, join, leave, emit, etc. from socket.io +interface AuthenticatedSocket extends Socket { + userId?: string; + tenantId?: string; + email?: string; + // Re-declare id to ensure TypeScript recognizes it + id: string; +} + +@Injectable() +@WebSocketGateway({ + namespace: '/ws/runs', + cors: { + origin: process.env.CORS_ORIGIN || '*', + credentials: true, + }, + transports: ['websocket', 'polling'], + pingInterval: 25000, + pingTimeout: 60000, +}) +export class RunEventsGateway + implements OnGatewayInit, OnGatewayConnection, OnGatewayDisconnect +{ + @WebSocketServer() + server: Server; + + private readonly logger = new Logger(RunEventsGateway.name); + + // Track run subscriptions: runId -> Set + private runSubscriptions = new Map>(); + + // Track client metadata: clientId -> { tenantId, userId } + private clientMetadata = new Map(); + + constructor(private jwtService: JwtService) {} + + afterInit(server: Server) { + this.logger.log('Run Events Gateway initialized'); + + // Authentication middleware + server.use(async (socket: AuthenticatedSocket, next) => { + try { + const token = socket.handshake.auth?.token; + + if (token) { + const decoded = this.jwtService.verify(token); + socket.userId = decoded.userId || decoded.sub; + socket.tenantId = decoded.tenantId || decoded.tenant_id; + socket.email = decoded.email; + } + + next(); + } catch (error: any) { + this.logger.warn(`Authentication failed: ${error.message}`); + // Allow connection but mark as unauthenticated + next(); + } + }); + } + + handleConnection(@ConnectedSocket() client: AuthenticatedSocket) { + this.logger.log( + `Client ${client.id} connected (tenant: ${client.tenantId}, user: ${client.userId})`, + ); + + if (client.tenantId && client.userId) { + this.clientMetadata.set(client.id, { + tenantId: client.tenantId, + userId: client.userId, + }); + + // Join tenant room + client.join(`tenant:${client.tenantId}`); + + // Join user room + client.join(`user:${client.userId}`); + } + + // Send connection confirmation + client.emit('connection:established', { + clientId: client.id, + authenticated: !!client.tenantId, + timestamp: new Date(), + }); + } + + handleDisconnect(@ConnectedSocket() client: AuthenticatedSocket) { + this.logger.log(`Client ${client.id} disconnected`); + + // Clean up subscriptions + this.runSubscriptions.forEach((subscribers, runId) => { + if (subscribers.has(client.id)) { + subscribers.delete(client.id); + if (subscribers.size === 0) { + this.runSubscriptions.delete(runId); + } + } + }); + + // Clean up metadata + this.clientMetadata.delete(client.id); + } + + /** + * Client subscribes to a specific run's events + */ + @SubscribeMessage('subscribe:run') + handleSubscribeToRun( + @ConnectedSocket() client: AuthenticatedSocket, + @MessageBody() data: { runId: string }, + ) { + const { runId } = data; + const roomName = `run:${runId}`; + + // TODO: Verify tenant access to this run + + client.join(roomName); + + if (!this.runSubscriptions.has(runId)) { + this.runSubscriptions.set(runId, new Set()); + } + this.runSubscriptions.get(runId)?.add(client.id); + + this.logger.debug( + `Client ${client.id} subscribed to run ${runId} (${this.runSubscriptions.get(runId)?.size} subscribers)`, + ); + + client.emit('subscription:confirmed', { + runId, + subscribed: true, + subscriberCount: this.runSubscriptions.get(runId)?.size || 0, + }); + } + + /** + * Client unsubscribes from a run + */ + @SubscribeMessage('unsubscribe:run') + handleUnsubscribeFromRun( + @ConnectedSocket() client: AuthenticatedSocket, + @MessageBody() data: { runId: string }, + ) { + const { runId } = data; + const roomName = `run:${runId}`; + + client.leave(roomName); + + this.runSubscriptions.get(runId)?.delete(client.id); + if (this.runSubscriptions.get(runId)?.size === 0) { + this.runSubscriptions.delete(runId); + } + + this.logger.debug(`Client ${client.id} unsubscribed from run ${runId}`); + + client.emit('subscription:cancelled', { runId }); + } + + /** + * Broadcast event to all subscribers of a run + */ + broadcastToRun(runId: string, event: RunEvent) { + this.server.to(`run:${runId}`).emit(event.type, event); + this.logger.debug(`Broadcast ${event.type} to run ${runId}`); + } + + /** + * Broadcast event to all clients in a tenant + */ + broadcastToTenant(tenantId: string, event: RunEvent) { + this.server.to(`tenant:${tenantId}`).emit(event.type, event); + } + + /** + * Get subscriber count for a run + */ + getRunSubscriberCount(runId: string): number { + return this.runSubscriptions.get(runId)?.size || 0; + } + + // ========================================== + // Event Handlers - Listen to internal events + // ========================================== + + @OnEvent('goal-run.started') + handleRunStarted(payload: { goalRunId: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:started', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.paused') + handleRunPaused(payload: { goalRunId: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:paused', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.resumed') + handleRunResumed(payload: { goalRunId: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:resumed', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.completed') + handleRunCompleted(payload: { goalRunId: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:completed', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.failed') + handleRunFailed(payload: { goalRunId: string; error: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:failed', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.cancelled') + handleRunCancelled(payload: { goalRunId: string; reason?: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:cancelled', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.phase-changed') + handlePhaseChanged(payload: { + goalRunId: string; + previousPhase: string; + newPhase: string; + }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:phase_changed', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.intervened') + handleIntervened(payload: { goalRunId: string; userId?: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:intervened', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('goal-run.control-returned') + handleControlReturned(payload: { goalRunId: string; userId?: string }) { + this.broadcastToRun(payload.goalRunId, { + type: 'run:control_returned', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('step.approved') + handleStepApproved(payload: { + goalRunId: string; + stepId: string; + userId?: string; + }) { + this.broadcastToRun(payload.goalRunId, { + type: 'step:approved', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('step.rejected') + handleStepRejected(payload: { + goalRunId: string; + stepId: string; + reason: string; + userId?: string; + }) { + this.broadcastToRun(payload.goalRunId, { + type: 'step:rejected', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('activity-event.created') + handleActivityEvent(payload: { + goalRunId: string; + eventType: string; + title: string; + description?: string; + severity?: string; + details?: Record; + }) { + this.broadcastToRun(payload.goalRunId, { + type: 'activity:logged', + runId: payload.goalRunId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('desktop.waking') + handleDesktopWaking(payload: { runId: string }) { + this.broadcastToRun(payload.runId, { + type: 'desktop:waking', + runId: payload.runId, + timestamp: new Date(), + data: payload, + }); + } + + @OnEvent('screenshot.captured') + handleScreenshotCaptured(payload: { + runId: string; + screenshot: { + id: string; + url: string; + timestamp: Date; + stepId?: string; + stepDescription?: string; + }; + }) { + this.broadcastToRun(payload.runId, { + type: 'screenshot:captured', + runId: payload.runId, + timestamp: new Date(), + data: payload.screenshot, + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/main.ts b/packages/bytebot-workflow-orchestrator/src/main.ts new file mode 100644 index 000000000..85f0f3ba1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/main.ts @@ -0,0 +1,135 @@ +/** + * ByteBot Workflow Orchestrator + * v5.2.0: Phase 7 Enhanced Features with Swagger API Documentation + * + * This service is the brain of the ByteBot Workflows product (Product 2). + * It manages: + * - Workflow lifecycle (create, execute, complete, fail) + * - Node scheduling using DB-driven FOR UPDATE SKIP LOCKED pattern + * - Workspace lifecycle coordination with task-controller + * - Agent task dispatch and result collection + * - Goal-first orchestration (Manus-style) + * - Goal templates and batch execution (Phase 7) + */ + +import { NestFactory } from '@nestjs/core'; +import { ValidationPipe, Logger } from '@nestjs/common'; +import { SwaggerModule, DocumentBuilder } from '@nestjs/swagger'; +import { AppModule } from './app.module'; + +async function bootstrap() { + const logger = new Logger('Bootstrap'); + const app = await NestFactory.create(AppModule, { rawBody: true }); + + // Enable validation pipes + app.useGlobalPipes( + new ValidationPipe({ + whitelist: true, + transform: true, + forbidNonWhitelisted: true, + }), + ); + + // Enable CORS for internal services + app.enableCors({ + origin: process.env.CORS_ORIGIN || '*', + methods: ['GET', 'POST', 'PUT', 'DELETE', 'PATCH'], + credentials: true, + }); + + // Set global prefix + app.setGlobalPrefix('api/v1'); + + // Configure Swagger API Documentation + const config = new DocumentBuilder() + .setTitle('ByteBot Workflow Orchestrator API') + .setDescription( + ` +## Overview + +The ByteBot Workflow Orchestrator API provides goal-first workflow orchestration capabilities. +Users can define natural language goals, and the system will autonomously plan and execute +workflows to achieve those goals. + +## Key Features + +- **Goal Runs**: Create and manage goal-based workflow executions +- **Templates**: Reusable goal templates with parameterization +- **Batch Execution**: Execute multiple goals in parallel or sequence +- **Analytics**: Comprehensive execution insights and metrics +- **Approvals**: Human-in-the-loop approval for high-risk actions + +## Authentication + +All API endpoints require tenant identification via the \`x-tenant-id\` header. +Some endpoints may require additional authentication headers. + +## Rate Limiting + +The API implements rate limiting: +- **Goal Creation**: 5 requests per minute +- **Batch Creation**: 3 requests per minute +- **General**: 100 requests per minute + +## Error Responses + +All errors follow a standard format: +\`\`\`json +{ + "statusCode": 400, + "message": "Error description", + "error": "Bad Request" +} +\`\`\` + `, + ) + .setVersion('5.2.0') + .addTag('goal-runs', 'Goal-first workflow orchestration') + .addTag('templates', 'Reusable goal templates') + .addTag('batches', 'Batch goal execution') + .addTag('analytics', 'Execution analytics and insights') + .addTag('approvals', 'High-risk action approvals') + .addTag('workflows', 'Low-level workflow management') + .addTag('agents', 'Multi-agent orchestration') + .addTag('webhooks', 'Webhook notifications') + .addTag('health', 'Health checks') + .addApiKey( + { + type: 'apiKey', + name: 'x-tenant-id', + in: 'header', + description: 'Tenant identifier', + }, + 'x-tenant-id', + ) + .addApiKey( + { + type: 'apiKey', + name: 'x-user-id', + in: 'header', + description: 'User identifier (optional)', + }, + 'x-user-id', + ) + .build(); + + const document = SwaggerModule.createDocument(app, config); + SwaggerModule.setup('docs', app, document, { + customSiteTitle: 'ByteBot Orchestrator API', + customfavIcon: '/favicon.ico', + swaggerOptions: { + persistAuthorization: true, + tagsSorter: 'alpha', + operationsSorter: 'alpha', + }, + }); + + const port = process.env.PORT || 8080; + await app.listen(port); + + logger.log(`Workflow Orchestrator listening on port ${port}`); + logger.log(`Health check: http://localhost:${port}/api/v1/health`); + logger.log(`API Documentation: http://localhost:${port}/docs`); +} + +bootstrap(); diff --git a/packages/bytebot-workflow-orchestrator/src/modules/goal-run.module.ts b/packages/bytebot-workflow-orchestrator/src/modules/goal-run.module.ts new file mode 100644 index 000000000..3cfea9b6a --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/modules/goal-run.module.ts @@ -0,0 +1,47 @@ +/** + * Goal Run Module + * v1.0.0: NestJS module for Manus-style goal-first orchestration + * + * Provides: + * - GoalRunService - Goal run lifecycle management + * - PlannerService - LLM-powered plan generation + * - OrchestratorLoopService - PEVR loop execution + * - GoalRunController - REST API endpoints + */ + +import { Module } from '@nestjs/common'; +import { ConfigModule } from '@nestjs/config'; +import { EventEmitterModule } from '@nestjs/event-emitter'; + +// Services +import { GoalRunService } from '../services/goal-run.service'; +import { PlannerService } from '../services/planner.service'; +import { OrchestratorLoopService } from '../services/orchestrator-loop.service'; +import { PrismaService } from '../services/prisma.service'; +import { WorkflowService } from '../services/workflow.service'; +import { WorkspaceService } from '../services/workspace.service'; + +// Controllers +import { GoalRunController } from '../controllers/goal-run.controller'; + +@Module({ + imports: [ + ConfigModule, + EventEmitterModule.forRoot(), + ], + controllers: [GoalRunController], + providers: [ + GoalRunService, + PlannerService, + OrchestratorLoopService, + PrismaService, + WorkflowService, + WorkspaceService, + ], + exports: [ + GoalRunService, + PlannerService, + OrchestratorLoopService, + ], +}) +export class GoalRunModule {} diff --git a/packages/bytebot-workflow-orchestrator/src/modules/metrics.module.ts b/packages/bytebot-workflow-orchestrator/src/modules/metrics.module.ts new file mode 100644 index 000000000..11535cdbe --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/modules/metrics.module.ts @@ -0,0 +1,529 @@ +/** + * Metrics Module + * Prometheus metrics for workflow orchestrator + * + * Post-M5 Enhancement: Added approval flow metrics + * + * Metric Types: + * - Counters: Total counts (approvals_total, webhooks_total) + * - Gauges: Current values (approvals_pending) + * - Histograms: Latency distributions (approval_latency_seconds) + * + * Best Practices Applied: + * - Low cardinality labels (status, risk_level, tool_category) + * - Meaningful bucket boundaries for histograms + * - Clear metric naming (domain_metric_unit) + */ + +import { Module, Global } from '@nestjs/common'; +import { PrometheusModule, makeCounterProvider, makeGaugeProvider, makeHistogramProvider } from '@willsoto/nestjs-prometheus'; + +@Global() +@Module({ + imports: [ + PrometheusModule.register({ + defaultMetrics: { + enabled: true, + }, + }), + ], + providers: [ + // ========================================================================= + // Workflow metrics + // ========================================================================= + makeCounterProvider({ + name: 'workflow_runs_total', + help: 'Total number of workflow runs', + labelNames: ['status', 'tenant_id'], + }), + makeGaugeProvider({ + name: 'workflow_runs_active', + help: 'Number of currently active workflow runs', + labelNames: ['tenant_id'], + }), + makeHistogramProvider({ + name: 'workflow_duration_seconds', + help: 'Workflow execution duration in seconds', + labelNames: ['status'], + buckets: [60, 300, 600, 1800, 3600, 7200], + }), + + // ========================================================================= + // Node metrics + // ========================================================================= + makeCounterProvider({ + name: 'workflow_nodes_total', + help: 'Total number of workflow nodes executed', + labelNames: ['type', 'status'], + }), + makeHistogramProvider({ + name: 'workflow_node_duration_seconds', + help: 'Node execution duration in seconds', + labelNames: ['type', 'status'], + buckets: [1, 5, 10, 30, 60, 120, 300], + }), + + // ========================================================================= + // Workspace metrics + // ========================================================================= + makeGaugeProvider({ + name: 'workspaces_active', + help: 'Number of active workspaces', + labelNames: ['status'], + }), + makeCounterProvider({ + name: 'workspace_lock_acquisitions_total', + help: 'Total workspace lock acquisition attempts', + labelNames: ['success'], + }), + + // ========================================================================= + // Scheduler metrics + // ========================================================================= + makeGaugeProvider({ + name: 'scheduler_queue_depth', + help: 'Number of nodes waiting to be scheduled', + }), + makeHistogramProvider({ + name: 'scheduler_loop_duration_seconds', + help: 'Scheduler loop execution time', + buckets: [0.01, 0.05, 0.1, 0.5, 1, 5], + }), + + // ========================================================================= + // Post-M5: Approval flow metrics + // ========================================================================= + + // Counter: Total approval requests by status and risk level + makeCounterProvider({ + name: 'approvals_total', + help: 'Total number of approval requests', + labelNames: ['status', 'risk_level', 'tool_category'], + }), + + // Gauge: Currently pending approvals + makeGaugeProvider({ + name: 'approvals_pending', + help: 'Number of pending approval requests', + labelNames: ['risk_level'], + }), + + // Histogram: Time from request to decision (approval latency) + makeHistogramProvider({ + name: 'approval_latency_seconds', + help: 'Time from approval request to decision in seconds', + labelNames: ['status', 'risk_level'], + // Buckets: 1min, 5min, 15min, 30min, 1hr, 2hr, 6hr, 12hr, 24hr + buckets: [60, 300, 900, 1800, 3600, 7200, 21600, 43200, 86400], + }), + + // Counter: Expired approvals (subset of total, but useful for alerting) + makeCounterProvider({ + name: 'approvals_expired_total', + help: 'Total number of expired approval requests', + labelNames: ['risk_level', 'tool_category'], + }), + + // ========================================================================= + // Post-M5: Idempotency metrics + // ========================================================================= + + // Counter: Idempotency checks + makeCounterProvider({ + name: 'idempotency_checks_total', + help: 'Total idempotency checks performed', + labelNames: ['result'], // 'new', 'cached', 'processing', 'failed' + }), + + // Gauge: Active processing records + makeGaugeProvider({ + name: 'idempotency_processing', + help: 'Number of actions currently being processed', + }), + + // Counter: Cache hits (returns cached result) + makeCounterProvider({ + name: 'idempotency_cache_hits_total', + help: 'Total cached results returned', + }), + + // ========================================================================= + // Post-M5: Webhook notification metrics + // ========================================================================= + + // Counter: Webhook delivery attempts + makeCounterProvider({ + name: 'webhooks_total', + help: 'Total webhook delivery attempts', + labelNames: ['event_type', 'success'], + }), + + // Histogram: Webhook delivery latency + makeHistogramProvider({ + name: 'webhook_delivery_seconds', + help: 'Webhook delivery latency in seconds', + labelNames: ['event_type'], + // Buckets: 100ms, 500ms, 1s, 2s, 5s, 10s, 30s + buckets: [0.1, 0.5, 1, 2, 5, 10, 30], + }), + + // Counter: Webhook retries + makeCounterProvider({ + name: 'webhook_retries_total', + help: 'Total webhook retry attempts', + labelNames: ['event_type'], + }), + + // ========================================================================= + // Post-M5: Audit logging metrics + // ========================================================================= + + // Counter: Audit log entries created + makeCounterProvider({ + name: 'audit_logs_total', + help: 'Total audit log entries created', + labelNames: ['event_type', 'resource_type'], + }), + + // Counter: Audit logs cleaned up + makeCounterProvider({ + name: 'audit_logs_cleaned_total', + help: 'Total expired audit logs cleaned up', + }), + + // ========================================================================= + // Post-M5: High-risk action metrics + // ========================================================================= + + // Counter: High-risk actions detected + makeCounterProvider({ + name: 'high_risk_actions_total', + help: 'Total high-risk actions detected', + labelNames: ['tool_name', 'risk_level'], + }), + + // Counter: High-risk actions by outcome + makeCounterProvider({ + name: 'high_risk_outcomes_total', + help: 'High-risk action outcomes', + labelNames: ['outcome'], // 'approved', 'rejected', 'expired', 'executed' + }), + + // ========================================================================= + // Phase 6: Goal Run metrics (Manus-style orchestration) + // ========================================================================= + + // Counter: Total goal runs by status + makeCounterProvider({ + name: 'goal_runs_total', + help: 'Total number of goal runs created', + labelNames: ['status', 'tenant_id'], + }), + + // Gauge: Currently active goal runs by phase + makeGaugeProvider({ + name: 'goal_runs_active', + help: 'Number of currently active goal runs', + labelNames: ['phase', 'tenant_id'], + }), + + // Histogram: Goal run total duration in seconds + makeHistogramProvider({ + name: 'goal_run_duration_seconds', + help: 'Total goal run duration from creation to completion', + labelNames: ['status'], + // Buckets: 1min, 5min, 15min, 30min, 1hr, 2hr, 4hr, 8hr + buckets: [60, 300, 900, 1800, 3600, 7200, 14400, 28800], + }), + + // Counter: Plan versions created (replanning events) + makeCounterProvider({ + name: 'goal_run_replans_total', + help: 'Total number of plan revisions (replanning events)', + labelNames: ['tenant_id'], + }), + + // Counter: Checklist items by status + makeCounterProvider({ + name: 'checklist_items_total', + help: 'Total checklist items processed', + labelNames: ['status'], // PENDING, IN_PROGRESS, COMPLETED, FAILED, SKIPPED + }), + + // Histogram: Individual checklist item execution duration + makeHistogramProvider({ + name: 'checklist_item_duration_seconds', + help: 'Individual checklist item execution duration', + labelNames: ['status'], + // Buckets: 10s, 30s, 1min, 2min, 5min, 10min, 30min + buckets: [10, 30, 60, 120, 300, 600, 1800], + }), + + // Counter: Steering messages received + makeCounterProvider({ + name: 'steering_messages_total', + help: 'Total steering messages received from users', + labelNames: ['type'], // PAUSE, RESUME, CANCEL, MODIFY_PLAN, APPROVE, REJECT, INSTRUCTION + }), + + // Counter: Activity events by type + makeCounterProvider({ + name: 'activity_events_total', + help: 'Total activity events recorded', + labelNames: ['event_type', 'severity'], + }), + + // ========================================================================= + // Capacity Hardening Metrics (v5.5.13) + // ========================================================================= + + // Gauge: Running workspace pods + makeGaugeProvider({ + name: 'workspace_pods_running', + help: 'Number of workspace pods currently running', + }), + + // Gauge: Workspace capacity utilization (0.0-1.0) + makeGaugeProvider({ + name: 'workspace_capacity_utilization_ratio', + help: 'Workspace capacity utilization ratio (running/max)', + labelNames: ['status'], // 'normal', 'warning', 'critical' + }), + + // Counter: Capacity exhaustion events + makeCounterProvider({ + name: 'workspace_capacity_exhausted_total', + help: 'Total times capacity was exhausted', + }), + + // ========================================================================= + // Storage Hardening Metrics (v5.5.13) + // ========================================================================= + + // Gauge: Total workspace PVCs + makeGaugeProvider({ + name: 'workspace_pvcs_total', + help: 'Total number of workspace PVCs', + labelNames: ['status'], // 'active', 'hibernated', 'retained' + }), + + // Gauge: Total PVC storage in bytes + makeGaugeProvider({ + name: 'workspace_pvcs_storage_bytes', + help: 'Total workspace PVC storage in bytes', + }), + + // ========================================================================= + // Task Dispatch Reliability Metrics (v5.5.13) + // ========================================================================= + + // Counter: Task dispatch errors by type + makeCounterProvider({ + name: 'task_dispatch_errors_total', + help: 'Total task dispatch errors', + labelNames: ['error_type'], // '404', 'timeout', 'network', 'infra', 'semantic' + }), + + // Counter: Infrastructure retries + makeCounterProvider({ + name: 'task_dispatch_infra_retries_total', + help: 'Total infrastructure retry attempts', + }), + + // Histogram: Task dispatch latency + makeHistogramProvider({ + name: 'task_dispatch_latency_seconds', + help: 'Task dispatch API call latency', + labelNames: ['operation'], // 'dispatch', 'status_check', 'cancel' + buckets: [0.1, 0.5, 1, 2, 5, 10, 30], + }), + + // ========================================================================= + // Garbage Collection Metrics (v5.5.13) + // ========================================================================= + + // Counter: Workspaces hibernated by GC + makeCounterProvider({ + name: 'workspace_gc_hibernated_total', + help: 'Total workspaces hibernated by GC', + labelNames: ['reason'], // 'orphan', 'completed', 'idle', 'retry' + }), + + // Gauge: Idle workspaces pending hibernation + makeGaugeProvider({ + name: 'workspace_gc_idle_pending', + help: 'Number of idle workspaces pending hibernation', + }), + + // Counter: GC cycle runs + makeCounterProvider({ + name: 'workspace_gc_cycles_total', + help: 'Total GC cycles executed', + labelNames: ['result'], // 'success', 'partial', 'failed' + }), + + // ========================================================================= + // Database Metrics (v5.5.13) + // ========================================================================= + + // Histogram: Orchestrator DB query latency + makeHistogramProvider({ + name: 'orchestrator_db_query_duration_seconds', + help: 'Orchestrator database query duration', + labelNames: ['operation'], // 'goal_run_read', 'checklist_update', 'workspace_read' + buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5], + }), + + // ========================================================================= + // Outbox + User Prompt Ops Metrics (PR 3) + // ========================================================================= + + makeGaugeProvider({ + name: 'outbox_pending_total', + help: 'Number of pending outbox rows (processed_at IS NULL)', + labelNames: ['event_type'], + }), + makeGaugeProvider({ + name: 'outbox_oldest_pending_age_seconds', + help: 'Age in seconds of the oldest pending outbox row', + }), + makeCounterProvider({ + name: 'outbox_publish_attempts_total', + help: 'Outbox publish attempts (success/failure)', + labelNames: ['event_type', 'result'], + }), + + makeGaugeProvider({ + name: 'user_prompts_open_total', + help: 'Number of OPEN user prompts', + labelNames: ['kind'], + }), + makeHistogramProvider({ + name: 'user_prompt_time_to_resolve_seconds', + help: 'Time from prompt creation to resolution in seconds', + labelNames: ['kind'], + buckets: [30, 60, 300, 900, 1800, 3600, 7200, 21600, 86400], + }), + + // ========================================================================= + // Stark Interaction SLIs (P1) + // ========================================================================= + + // Counter: Goal intake started (gate vs planner safety-net) + makeCounterProvider({ + name: 'goal_intake_started_total', + help: 'Total number of goal intake gates triggered', + labelNames: ['source'], // 'gate' | 'planner_error' + }), + + // Counter: Goal intake completed (GoalSpec marked COMPLETE) + makeCounterProvider({ + name: 'goal_intake_completed_total', + help: 'Total number of goal intake completions', + }), + + // Counter: Prompt resolutions by actor + kind + makeCounterProvider({ + name: 'prompt_resolved_total', + help: 'Total prompt resolutions by actorType and kind', + labelNames: ['actor_type', 'kind'], + }), + + // Counter: Prompt resolution failures (fail-closed) + makeCounterProvider({ + name: 'user_prompt_resolution_validation_fail_total', + help: 'Total prompt resolution attempts rejected by JSON schema validation', + labelNames: ['kind', 'scope'], + }), + makeCounterProvider({ + name: 'user_prompt_resolution_unauthorized_total', + help: 'Total prompt resolution attempts rejected by authorization policy', + labelNames: ['kind', 'actor_type'], + }), + makeCounterProvider({ + name: 'user_prompt_resolution_incomplete_after_apply_total', + help: 'Total prompt resolution attempts where answers applied but derived completeness still failed', + labelNames: ['kind'], + }), + + // Counter: Resume outbox enqueued (resolution vs reconciler repair) + makeCounterProvider({ + name: 'resume_outbox_enqueued_total', + help: 'Total resume outbox events enqueued', + labelNames: ['source'], // 'resolution' | 'reconciler' + }), + + // Counter: Temporal resume Update outcomes (idempotent by updateId) + makeCounterProvider({ + name: 'resume_update_success_total', + help: 'Total successful Temporal resume Updates', + }), + makeCounterProvider({ + name: 'resume_update_failed_total', + help: 'Total failed Temporal resume Updates', + }), + + // Gauge: Runs stuck in WAITING_USER_INPUT by age bucket + makeGaugeProvider({ + name: 'runs_stuck_waiting_user_input_total', + help: 'Number of goal runs in WAITING_USER_INPUT by age bucket', + labelNames: ['age_bucket'], // 'lt_5m' | '5m_15m' | '15m_1h' | '1h_24h' | 'gt_24h' + }), + makeGaugeProvider({ + name: 'runs_stuck_waiting_provider_total', + help: 'Number of goal runs in WAITING_PROVIDER by age bucket', + labelNames: ['age_bucket'], // 'lt_5m' | '5m_15m' | '15m_1h' | '1h_24h' | 'gt_24h' + }), + makeGaugeProvider({ + name: 'runs_stuck_waiting_capacity_total', + help: 'Number of goal runs in WAITING_CAPACITY by age bucket', + labelNames: ['age_bucket'], // 'lt_5m' | '5m_15m' | '15m_1h' | '1h_24h' | 'gt_24h' + }), + + // Gauge: RESOLVED prompts without durable resume acknowledgement + makeGaugeProvider({ + name: 'prompts_resolved_without_resume_ack_total', + help: 'Number of resolved prompts missing resume_acknowledged_at', + }), + + // ========================================================================= + // Temporal Rollout Guardrails (Capability Probe) + // ========================================================================= + + makeGaugeProvider({ + name: 'temporal_capability_probe_ok', + help: '1 if orchestrator can reach Temporal service APIs; 0 otherwise', + }), + makeCounterProvider({ + name: 'temporal_capability_probe_failures_total', + help: 'Total Temporal capability probe failures', + }), + ], + exports: [ + PrometheusModule, + // Export metric providers used via `@InjectMetric(...)` + 'PROM_METRIC_OUTBOX_PENDING_TOTAL', + 'PROM_METRIC_OUTBOX_OLDEST_PENDING_AGE_SECONDS', + 'PROM_METRIC_OUTBOX_PUBLISH_ATTEMPTS_TOTAL', + 'PROM_METRIC_USER_PROMPTS_OPEN_TOTAL', + 'PROM_METRIC_USER_PROMPT_TIME_TO_RESOLVE_SECONDS', + + 'PROM_METRIC_GOAL_INTAKE_STARTED_TOTAL', + 'PROM_METRIC_GOAL_INTAKE_COMPLETED_TOTAL', + 'PROM_METRIC_PROMPT_RESOLVED_TOTAL', + 'PROM_METRIC_USER_PROMPT_RESOLUTION_VALIDATION_FAIL_TOTAL', + 'PROM_METRIC_USER_PROMPT_RESOLUTION_UNAUTHORIZED_TOTAL', + 'PROM_METRIC_USER_PROMPT_RESOLUTION_INCOMPLETE_AFTER_APPLY_TOTAL', + 'PROM_METRIC_RESUME_OUTBOX_ENQUEUED_TOTAL', + 'PROM_METRIC_RESUME_UPDATE_SUCCESS_TOTAL', + 'PROM_METRIC_RESUME_UPDATE_FAILED_TOTAL', + 'PROM_METRIC_RUNS_STUCK_WAITING_USER_INPUT_TOTAL', + 'PROM_METRIC_RUNS_STUCK_WAITING_PROVIDER_TOTAL', + 'PROM_METRIC_RUNS_STUCK_WAITING_CAPACITY_TOTAL', + 'PROM_METRIC_PROMPTS_RESOLVED_WITHOUT_RESUME_ACK_TOTAL', + + 'PROM_METRIC_TEMPORAL_CAPABILITY_PROBE_OK', + 'PROM_METRIC_TEMPORAL_CAPABILITY_PROBE_FAILURES_TOTAL', + ], +}) +export class MetricsModule {} diff --git a/packages/bytebot-workflow-orchestrator/src/services/agent-health.service.ts b/packages/bytebot-workflow-orchestrator/src/services/agent-health.service.ts new file mode 100644 index 000000000..7782a5a13 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/agent-health.service.ts @@ -0,0 +1,461 @@ +/** + * Agent Health Service + * v1.0.0: Phase 7 Multi-Agent Orchestration + * + * Actively monitors agent health by performing periodic health checks. + * This complements the passive heartbeat tracking in AgentRegistryService. + * + * Key features: + * - Active health probing (HTTP requests to agent health endpoints) + * - Health check history for trend analysis + * - Automatic agent status updates based on check results + * - Kubernetes integration for pod health correlation + * + * Based on best practices for distributed system health monitoring. + * Reference: https://aws.amazon.com/solutions/guidance/multi-agent-orchestration-on-aws/ + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { Cron } from '@nestjs/schedule'; +import axios, { AxiosInstance } from 'axios'; +import { PrismaService } from './prisma.service'; +import { LeaderElectionService } from './leader-election.service'; +import { + AgentRegistryService, + AgentStatus, + AGENT_STATUS_CHANGED_EVENT, +} from './agent-registry.service'; + +// Health check result +export interface HealthCheckResult { + agentId: string; + endpoint: string; + success: boolean; + statusCode?: number; + latencyMs: number; + error?: string; + timestamp: Date; +} + +// Events emitted by this service +export const AGENT_HEALTH_CHECK_COMPLETED = 'agent.health.check.completed'; + +@Injectable() +export class AgentHealthService implements OnModuleInit { + private readonly logger = new Logger(AgentHealthService.name); + private readonly httpClient: AxiosInstance; + + // Configuration + private readonly checkIntervalMs: number; + private readonly checkTimeoutMs: number; + private readonly unhealthyThreshold: number; + private readonly healthyThreshold: number; + private readonly historyRetentionHours: number; + + // Track consecutive failures for each agent + private consecutiveFailures = new Map(); + private consecutiveSuccesses = new Map(); + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly leaderElection: LeaderElectionService, + private readonly agentRegistry: AgentRegistryService, + ) { + // Health check interval (default: 15 seconds) + this.checkIntervalMs = parseInt( + this.configService.get('AGENT_HEALTH_CHECK_INTERVAL_MS', '15000'), + 10, + ); + + // Health check timeout (default: 5 seconds) + this.checkTimeoutMs = parseInt( + this.configService.get('AGENT_HEALTH_CHECK_TIMEOUT_MS', '5000'), + 10, + ); + + // Number of consecutive failures before marking unhealthy (default: 3) + this.unhealthyThreshold = parseInt( + this.configService.get('AGENT_UNHEALTHY_THRESHOLD', '3'), + 10, + ); + + // Number of consecutive successes before marking healthy (default: 2) + this.healthyThreshold = parseInt( + this.configService.get('AGENT_HEALTHY_THRESHOLD', '2'), + 10, + ); + + // Health check history retention (default: 24 hours) + this.historyRetentionHours = parseInt( + this.configService.get( + 'AGENT_HEALTH_HISTORY_RETENTION_HOURS', + '24', + ), + 10, + ); + + this.httpClient = axios.create({ + timeout: this.checkTimeoutMs, + }); + } + + async onModuleInit(): Promise { + this.logger.log( + `Agent Health Service initialized (interval: ${this.checkIntervalMs}ms, ` + + `timeout: ${this.checkTimeoutMs}ms, unhealthyThreshold: ${this.unhealthyThreshold})`, + ); + } + + /** + * Periodic health check for all registered agents + * Runs only on leader to avoid duplicate checks + */ + @Cron('*/15 * * * * *') // Every 15 seconds + async performHealthChecks(): Promise { + if (!this.leaderElection.isLeader) { + return; + } + + const agents = await this.agentRegistry.getAgents({ + status: [ + AgentStatus.HEALTHY, + AgentStatus.UNHEALTHY, + AgentStatus.STARTING, + ], + }); + + this.logger.debug(`Performing health checks on ${agents.length} agents`); + + // Check agents in parallel (but with concurrency limit) + const results = await Promise.all( + agents.map((agent) => this.checkAgentHealth(agent.id, agent.endpoint)), + ); + + // Process results + for (const result of results) { + await this.processHealthCheckResult(result); + } + } + + /** + * Check health of a single agent + */ + async checkAgentHealth( + agentId: string, + agentEndpoint: string, + ): Promise { + const healthEndpoint = `${agentEndpoint}/health/live`; + const startTime = Date.now(); + + try { + const response = await this.httpClient.get(healthEndpoint); + + return { + agentId, + endpoint: healthEndpoint, + success: response.status >= 200 && response.status < 300, + statusCode: response.status, + latencyMs: Date.now() - startTime, + timestamp: new Date(), + }; + } catch (error: any) { + return { + agentId, + endpoint: healthEndpoint, + success: false, + statusCode: error.response?.status, + latencyMs: Date.now() - startTime, + error: error.message, + timestamp: new Date(), + }; + } + } + + /** + * Process a health check result + */ + private async processHealthCheckResult( + result: HealthCheckResult, + ): Promise { + // Record the health check in history + await this.recordHealthCheck(result); + + // Update consecutive counters + if (result.success) { + const successes = (this.consecutiveSuccesses.get(result.agentId) || 0) + 1; + this.consecutiveSuccesses.set(result.agentId, successes); + this.consecutiveFailures.set(result.agentId, 0); + + // Update heartbeat + await this.agentRegistry.updateHeartbeat(result.agentId); + + // Check if agent should become healthy + if (successes >= this.healthyThreshold) { + await this.tryMarkHealthy(result.agentId); + } + } else { + const failures = (this.consecutiveFailures.get(result.agentId) || 0) + 1; + this.consecutiveFailures.set(result.agentId, failures); + this.consecutiveSuccesses.set(result.agentId, 0); + + this.logger.warn( + `Health check failed for agent ${result.agentId}: ${result.error} ` + + `(${failures}/${this.unhealthyThreshold} failures)`, + ); + + // Check if agent should become unhealthy + if (failures >= this.unhealthyThreshold) { + await this.tryMarkUnhealthy(result.agentId, result.error); + } + } + + // Emit event + this.eventEmitter.emit(AGENT_HEALTH_CHECK_COMPLETED, result); + } + + /** + * Record health check in history + */ + private async recordHealthCheck(result: HealthCheckResult): Promise { + await this.prisma.agentHealthCheck.create({ + data: { + agentId: result.agentId, + success: result.success, + statusCode: result.statusCode, + latencyMs: result.latencyMs, + error: result.error, + endpoint: result.endpoint, + checkedAt: result.timestamp, + }, + }); + } + + /** + * Try to mark an agent as healthy + */ + private async tryMarkHealthy(agentId: string): Promise { + const agent = await this.agentRegistry.getAgent(agentId); + if (!agent) return; + + if (agent.status !== AgentStatus.HEALTHY) { + this.logger.log( + `Agent ${agent.name} (${agentId}) recovered and is now HEALTHY`, + ); + + await this.prisma.agent.update({ + where: { id: agentId }, + data: { status: AgentStatus.HEALTHY }, + }); + + this.eventEmitter.emit(AGENT_STATUS_CHANGED_EVENT, { + agentId, + previousStatus: agent.status, + newStatus: AgentStatus.HEALTHY, + }); + } + } + + /** + * Try to mark an agent as unhealthy + */ + private async tryMarkUnhealthy( + agentId: string, + error?: string, + ): Promise { + const agent = await this.agentRegistry.getAgent(agentId); + if (!agent) return; + + if (agent.status === AgentStatus.HEALTHY) { + this.logger.warn( + `Agent ${agent.name} (${agentId}) is now UNHEALTHY: ${error}`, + ); + + await this.prisma.agent.update({ + where: { id: agentId }, + data: { status: AgentStatus.UNHEALTHY }, + }); + + this.eventEmitter.emit(AGENT_STATUS_CHANGED_EVENT, { + agentId, + previousStatus: agent.status, + newStatus: AgentStatus.UNHEALTHY, + }); + } + } + + /** + * Get recent health check history for an agent + */ + async getHealthHistory( + agentId: string, + limit: number = 100, + ): Promise { + const checks = await this.prisma.agentHealthCheck.findMany({ + where: { agentId }, + orderBy: { checkedAt: 'desc' }, + take: limit, + }); + + return checks.map((check) => ({ + agentId: check.agentId, + endpoint: check.endpoint, + success: check.success, + statusCode: check.statusCode || undefined, + latencyMs: check.latencyMs, + error: check.error || undefined, + timestamp: check.checkedAt, + })); + } + + /** + * Get health statistics for an agent + */ + async getHealthStats(agentId: string): Promise<{ + successRate: number; + avgLatencyMs: number; + p95LatencyMs: number; + checksInLastHour: number; + failuresInLastHour: number; + consecutiveSuccesses: number; + consecutiveFailures: number; + }> { + const oneHourAgo = new Date(Date.now() - 60 * 60 * 1000); + + const recentChecks = await this.prisma.agentHealthCheck.findMany({ + where: { + agentId, + checkedAt: { gte: oneHourAgo }, + }, + orderBy: { latencyMs: 'asc' }, + }); + + if (recentChecks.length === 0) { + return { + successRate: 0, + avgLatencyMs: 0, + p95LatencyMs: 0, + checksInLastHour: 0, + failuresInLastHour: 0, + consecutiveSuccesses: this.consecutiveSuccesses.get(agentId) || 0, + consecutiveFailures: this.consecutiveFailures.get(agentId) || 0, + }; + } + + const successCount = recentChecks.filter((c) => c.success).length; + const totalLatency = recentChecks.reduce((sum, c) => sum + c.latencyMs, 0); + const p95Index = Math.floor(recentChecks.length * 0.95); + + return { + successRate: successCount / recentChecks.length, + avgLatencyMs: Math.round(totalLatency / recentChecks.length), + p95LatencyMs: recentChecks[p95Index]?.latencyMs || 0, + checksInLastHour: recentChecks.length, + failuresInLastHour: recentChecks.length - successCount, + consecutiveSuccesses: this.consecutiveSuccesses.get(agentId) || 0, + consecutiveFailures: this.consecutiveFailures.get(agentId) || 0, + }; + } + + /** + * Periodic cleanup of old health check records + */ + @Cron('0 * * * *') // Every hour + async cleanupOldHealthChecks(): Promise { + if (!this.leaderElection.isLeader) { + return; + } + + const cutoffTime = new Date( + Date.now() - this.historyRetentionHours * 60 * 60 * 1000, + ); + + const result = await this.prisma.agentHealthCheck.deleteMany({ + where: { + checkedAt: { lt: cutoffTime }, + }, + }); + + if (result.count > 0) { + this.logger.debug(`Cleaned up ${result.count} old health check records`); + } + } + + /** + * Get overall health summary for all agents + */ + async getOverallHealthSummary(): Promise<{ + totalAgents: number; + healthyAgents: number; + unhealthyAgents: number; + avgSuccessRate: number; + avgLatencyMs: number; + agentsSummary: Array<{ + agentId: string; + name: string; + status: string; + successRate: number; + avgLatencyMs: number; + }>; + }> { + const agents = await this.agentRegistry.getAgents(); + const agentsSummary: Array<{ + agentId: string; + name: string; + status: string; + successRate: number; + avgLatencyMs: number; + }> = []; + + let totalSuccessRate = 0; + let totalLatency = 0; + let agentsWithStats = 0; + + for (const agent of agents) { + const stats = await this.getHealthStats(agent.id); + if (stats.checksInLastHour > 0) { + totalSuccessRate += stats.successRate; + totalLatency += stats.avgLatencyMs; + agentsWithStats++; + } + + agentsSummary.push({ + agentId: agent.id, + name: agent.name, + status: agent.status, + successRate: stats.successRate, + avgLatencyMs: stats.avgLatencyMs, + }); + } + + return { + totalAgents: agents.length, + healthyAgents: agents.filter((a) => a.status === AgentStatus.HEALTHY) + .length, + unhealthyAgents: agents.filter((a) => a.status === AgentStatus.UNHEALTHY) + .length, + avgSuccessRate: + agentsWithStats > 0 ? totalSuccessRate / agentsWithStats : 0, + avgLatencyMs: + agentsWithStats > 0 ? Math.round(totalLatency / agentsWithStats) : 0, + agentsSummary, + }; + } + + /** + * Force a health check on a specific agent + */ + async forceHealthCheck(agentId: string): Promise { + const agent = await this.agentRegistry.getAgent(agentId); + if (!agent) { + throw new Error(`Agent not found: ${agentId}`); + } + + const result = await this.checkAgentHealth(agent.id, agent.endpoint); + await this.processHealthCheckResult(result); + return result; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/agent-registry.service.ts b/packages/bytebot-workflow-orchestrator/src/services/agent-registry.service.ts new file mode 100644 index 000000000..075698e4c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/agent-registry.service.ts @@ -0,0 +1,560 @@ +/** + * Agent Registry Service + * v1.0.0: Phase 7 Multi-Agent Orchestration + * + * Manages registration, discovery, and lifecycle of agent instances. + * Implements the Supervisor pattern where the orchestrator acts as a + * central coordinator for multiple worker agents. + * + * Key features: + * - Agent registration and deregistration + * - Agent discovery by status, capability, and availability + * - Heartbeat tracking for health monitoring + * - Capacity management (max concurrent tasks per agent) + * - Kubernetes pod discovery integration + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { PrismaService } from './prisma.service'; +import { LeaderElectionService } from './leader-election.service'; + +// Agent status enum +export enum AgentStatus { + STARTING = 'STARTING', + HEALTHY = 'HEALTHY', + UNHEALTHY = 'UNHEALTHY', + DRAINING = 'DRAINING', // No new tasks, completing existing + OFFLINE = 'OFFLINE', +} + +// Events emitted by this service +export const AGENT_REGISTERED_EVENT = 'agent.registered'; +export const AGENT_DEREGISTERED_EVENT = 'agent.deregistered'; +export const AGENT_STATUS_CHANGED_EVENT = 'agent.status.changed'; +export const AGENT_UNHEALTHY_EVENT = 'agent.unhealthy'; + +export interface AgentInfo { + id: string; + name: string; + endpoint: string; + podName?: string; + nodeIp?: string; + namespace: string; + status: AgentStatus; + lastHeartbeatAt: Date; + maxConcurrentTasks: number; + currentTaskCount: number; + weight: number; + version: string; + metadata: Record; + capabilities: AgentCapabilityInfo[]; +} + +export interface AgentCapabilityInfo { + name: string; + toolPattern: string; + priority: number; + costMultiplier: number; + requiresExclusiveWorkspace: boolean; +} + +export interface RegisterAgentInput { + name: string; + endpoint: string; + podName?: string; + nodeIp?: string; + namespace?: string; + maxConcurrentTasks?: number; + weight?: number; + version?: string; + metadata?: Record; + capabilities?: Array<{ + name: string; + toolPattern: string; + priority?: number; + costMultiplier?: number; + requiresExclusiveWorkspace?: boolean; + }>; +} + +@Injectable() +export class AgentRegistryService implements OnModuleInit { + private readonly logger = new Logger(AgentRegistryService.name); + + // Configuration + private readonly heartbeatTimeoutMs: number; + private readonly staleAgentTimeoutMs: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly leaderElection: LeaderElectionService, + ) { + // Heartbeat timeout: how long before an agent is marked unhealthy + this.heartbeatTimeoutMs = parseInt( + this.configService.get('AGENT_HEARTBEAT_TIMEOUT_MS', '30000'), + 10, + ); + + // Stale agent timeout: how long before an offline agent is removed + this.staleAgentTimeoutMs = parseInt( + this.configService.get('AGENT_STALE_TIMEOUT_MS', '3600000'), // 1 hour + 10, + ); + } + + async onModuleInit(): Promise { + this.logger.log('Agent Registry Service initialized'); + + // Bootstrap with default agent if configured (for backward compatibility) + const defaultAgentUrl = this.configService.get('AGENT_SERVICE_URL'); + if (defaultAgentUrl) { + await this.ensureDefaultAgent(defaultAgentUrl); + } + } + + /** + * Ensure a default agent exists (backward compatibility with single-agent mode) + */ + private async ensureDefaultAgent(endpoint: string): Promise { + const existing = await this.prisma.agent.findUnique({ + where: { endpoint }, + }); + + if (!existing) { + this.logger.log(`Registering default agent at ${endpoint}`); + await this.registerAgent({ + name: 'default-agent', + endpoint, + capabilities: [ + { + name: 'all', + toolPattern: '*', + priority: 100, + }, + ], + }); + } + } + + /** + * Register a new agent or update existing agent + */ + async registerAgent(input: RegisterAgentInput): Promise { + const { + name, + endpoint, + podName, + nodeIp, + namespace = 'bytebot', + maxConcurrentTasks = 3, + weight = 100, + version = '1.0.0', + metadata = {}, + capabilities = [], + } = input; + + this.logger.log(`Registering agent: ${name} at ${endpoint}`); + + // Upsert agent + const agent = await this.prisma.agent.upsert({ + where: { endpoint }, + create: { + name, + endpoint, + podName, + nodeIp, + namespace, + maxConcurrentTasks, + weight, + version, + metadata, + status: AgentStatus.STARTING, + lastHeartbeatAt: new Date(), + }, + update: { + name, + podName, + nodeIp, + namespace, + maxConcurrentTasks, + weight, + version, + metadata, + status: AgentStatus.HEALTHY, + lastHeartbeatAt: new Date(), + }, + include: { + capabilities: true, + }, + }); + + // Update capabilities (delete and recreate for simplicity) + await this.prisma.agentCapability.deleteMany({ + where: { agentId: agent.id }, + }); + + if (capabilities.length > 0) { + await this.prisma.agentCapability.createMany({ + data: capabilities.map((cap) => ({ + agentId: agent.id, + name: cap.name, + toolPattern: cap.toolPattern, + priority: cap.priority ?? 100, + costMultiplier: cap.costMultiplier ?? 1.0, + requiresExclusiveWorkspace: cap.requiresExclusiveWorkspace ?? false, + })), + }); + } + + // Fetch updated agent with capabilities + const updatedAgent = await this.prisma.agent.findUnique({ + where: { id: agent.id }, + include: { capabilities: true }, + }); + + const agentInfo = this.toAgentInfo(updatedAgent!); + + this.eventEmitter.emit(AGENT_REGISTERED_EVENT, { + agentId: agent.id, + name, + endpoint, + }); + + this.logger.log(`Agent registered: ${name} (${agent.id})`); + + return agentInfo; + } + + /** + * Deregister an agent (mark as offline) + */ + async deregisterAgent(agentId: string): Promise { + const agent = await this.prisma.agent.findUnique({ + where: { id: agentId }, + }); + + if (!agent) { + this.logger.warn(`Agent not found for deregistration: ${agentId}`); + return; + } + + await this.prisma.agent.update({ + where: { id: agentId }, + data: { + status: AgentStatus.OFFLINE, + }, + }); + + this.eventEmitter.emit(AGENT_DEREGISTERED_EVENT, { + agentId, + name: agent.name, + endpoint: agent.endpoint, + }); + + this.logger.log(`Agent deregistered: ${agent.name} (${agentId})`); + } + + /** + * Update agent heartbeat (called by agent health checks) + */ + async updateHeartbeat( + agentId: string, + currentTaskCount?: number, + ): Promise { + const updateData: any = { + lastHeartbeatAt: new Date(), + status: AgentStatus.HEALTHY, + }; + + if (currentTaskCount !== undefined) { + updateData.currentTaskCount = currentTaskCount; + } + + await this.prisma.agent.update({ + where: { id: agentId }, + data: updateData, + }); + } + + /** + * Get agent by ID + */ + async getAgent(agentId: string): Promise { + const agent = await this.prisma.agent.findUnique({ + where: { id: agentId }, + include: { capabilities: true }, + }); + + return agent ? this.toAgentInfo(agent) : null; + } + + /** + * Get agent by endpoint + */ + async getAgentByEndpoint(endpoint: string): Promise { + const agent = await this.prisma.agent.findUnique({ + where: { endpoint }, + include: { capabilities: true }, + }); + + return agent ? this.toAgentInfo(agent) : null; + } + + /** + * Get all agents with optional filtering + */ + async getAgents(filters?: { + status?: AgentStatus | AgentStatus[]; + namespace?: string; + hasCapacity?: boolean; + }): Promise { + const where: any = {}; + + if (filters?.status) { + where.status = Array.isArray(filters.status) + ? { in: filters.status } + : filters.status; + } + + if (filters?.namespace) { + where.namespace = filters.namespace; + } + + const agents = await this.prisma.agent.findMany({ + where, + include: { capabilities: true }, + orderBy: { registeredAt: 'asc' }, + }); + + let result = agents.map((a) => this.toAgentInfo(a)); + + // Filter by capacity if requested + if (filters?.hasCapacity) { + result = result.filter((a) => a.currentTaskCount < a.maxConcurrentTasks); + } + + return result; + } + + /** + * Get available agents (healthy with capacity) + */ + async getAvailableAgents(): Promise { + return this.getAgents({ + status: AgentStatus.HEALTHY, + hasCapacity: true, + }); + } + + /** + * Increment task count when a task is assigned + */ + async incrementTaskCount(agentId: string): Promise { + await this.prisma.agent.update({ + where: { id: agentId }, + data: { + currentTaskCount: { increment: 1 }, + }, + }); + } + + /** + * Decrement task count when a task completes + */ + async decrementTaskCount(agentId: string): Promise { + await this.prisma.agent.update({ + where: { id: agentId }, + data: { + currentTaskCount: { decrement: 1 }, + }, + }); + + // Ensure task count doesn't go negative + await this.prisma.$executeRaw` + UPDATE workflow_orchestrator.agents + SET current_task_count = 0 + WHERE id = ${agentId} AND current_task_count < 0 + `; + } + + /** + * Mark agent as draining (no new tasks) + */ + async drainAgent(agentId: string): Promise { + const previous = await this.prisma.agent.findUnique({ + where: { id: agentId }, + }); + + if (!previous) return; + + await this.prisma.agent.update({ + where: { id: agentId }, + data: { status: AgentStatus.DRAINING }, + }); + + this.eventEmitter.emit(AGENT_STATUS_CHANGED_EVENT, { + agentId, + previousStatus: previous.status, + newStatus: AgentStatus.DRAINING, + }); + + this.logger.log(`Agent ${agentId} is now draining`); + } + + /** + * Periodic health check for all agents + * Runs only on leader to avoid duplicate checks + */ + @Cron(CronExpression.EVERY_10_SECONDS) + async checkAgentHealth(): Promise { + if (!this.leaderElection.isLeader) { + return; + } + + const cutoffTime = new Date(Date.now() - this.heartbeatTimeoutMs); + + // Find agents that haven't sent heartbeat recently + const unhealthyAgents = await this.prisma.agent.findMany({ + where: { + status: { in: [AgentStatus.HEALTHY, AgentStatus.STARTING] }, + lastHeartbeatAt: { lt: cutoffTime }, + }, + }); + + for (const agent of unhealthyAgents) { + this.logger.warn( + `Agent ${agent.name} (${agent.id}) is unhealthy - no heartbeat since ${agent.lastHeartbeatAt}`, + ); + + await this.prisma.agent.update({ + where: { id: agent.id }, + data: { status: AgentStatus.UNHEALTHY }, + }); + + this.eventEmitter.emit(AGENT_UNHEALTHY_EVENT, { + agentId: agent.id, + name: agent.name, + lastHeartbeat: agent.lastHeartbeatAt, + }); + + this.eventEmitter.emit(AGENT_STATUS_CHANGED_EVENT, { + agentId: agent.id, + previousStatus: agent.status, + newStatus: AgentStatus.UNHEALTHY, + }); + } + } + + /** + * Periodic cleanup of stale agents + * Runs only on leader + */ + @Cron(CronExpression.EVERY_HOUR) + async cleanupStaleAgents(): Promise { + if (!this.leaderElection.isLeader) { + return; + } + + const cutoffTime = new Date(Date.now() - this.staleAgentTimeoutMs); + + // Delete agents that have been offline for too long + const result = await this.prisma.agent.deleteMany({ + where: { + status: AgentStatus.OFFLINE, + updatedAt: { lt: cutoffTime }, + }, + }); + + if (result.count > 0) { + this.logger.log(`Cleaned up ${result.count} stale agents`); + } + } + + /** + * Get registry statistics for monitoring + */ + async getStats(): Promise<{ + total: number; + healthy: number; + unhealthy: number; + draining: number; + offline: number; + totalCapacity: number; + usedCapacity: number; + utilizationPercent: number; + }> { + const agents = await this.prisma.agent.findMany(); + + const stats = { + total: agents.length, + healthy: 0, + unhealthy: 0, + draining: 0, + offline: 0, + totalCapacity: 0, + usedCapacity: 0, + utilizationPercent: 0, + }; + + for (const agent of agents) { + switch (agent.status) { + case AgentStatus.HEALTHY: + stats.healthy++; + stats.totalCapacity += agent.maxConcurrentTasks; + stats.usedCapacity += agent.currentTaskCount; + break; + case AgentStatus.UNHEALTHY: + stats.unhealthy++; + break; + case AgentStatus.DRAINING: + stats.draining++; + stats.usedCapacity += agent.currentTaskCount; + break; + case AgentStatus.OFFLINE: + stats.offline++; + break; + } + } + + if (stats.totalCapacity > 0) { + stats.utilizationPercent = Math.round( + (stats.usedCapacity / stats.totalCapacity) * 100, + ); + } + + return stats; + } + + /** + * Convert Prisma agent to AgentInfo + */ + private toAgentInfo(agent: any): AgentInfo { + return { + id: agent.id, + name: agent.name, + endpoint: agent.endpoint, + podName: agent.podName, + nodeIp: agent.nodeIp, + namespace: agent.namespace, + status: agent.status as AgentStatus, + lastHeartbeatAt: agent.lastHeartbeatAt, + maxConcurrentTasks: agent.maxConcurrentTasks, + currentTaskCount: agent.currentTaskCount, + weight: agent.weight, + version: agent.version, + metadata: agent.metadata as Record, + capabilities: (agent.capabilities || []).map((cap: any) => ({ + name: cap.name, + toolPattern: cap.toolPattern, + priority: cap.priority, + costMultiplier: cap.costMultiplier, + requiresExclusiveWorkspace: cap.requiresExclusiveWorkspace, + })), + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/agent-router.service.ts b/packages/bytebot-workflow-orchestrator/src/services/agent-router.service.ts new file mode 100644 index 000000000..ed3be0282 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/agent-router.service.ts @@ -0,0 +1,539 @@ +/** + * Agent Router Service + * v1.0.0: Phase 7 Multi-Agent Orchestration + * + * Implements intelligent task routing to select the best agent for each task. + * Uses multiple routing strategies: + * - Capability-based routing (match tools to agent capabilities) + * - Load-balanced routing (distribute work evenly) + * - Affinity routing (prefer agents that handled related tasks) + * - Cost-aware routing (consider cost multipliers) + * + * Based on the Supervisor pattern from multi-agent orchestration best practices. + * Reference: https://blog.langchain.com/langgraph-multi-agent-workflows/ + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import { + AgentRegistryService, + AgentInfo, + AgentStatus, +} from './agent-registry.service'; +import { minimatch } from 'minimatch'; + +// Routing strategies +export enum RoutingStrategy { + ROUND_ROBIN = 'round_robin', + LEAST_LOADED = 'least_loaded', + CAPABILITY_MATCH = 'capability_match', + WEIGHTED = 'weighted', + AFFINITY = 'affinity', +} + +// Routing result +export interface RoutingResult { + agent: AgentInfo; + reason: string; + score: number; + alternativeAgents: Array<{ agent: AgentInfo; score: number }>; +} + +// Task routing request +export interface RoutingRequest { + nodeId: string; + workflowId: string; + workspaceId: string; + requiredTools: string[]; + preferredAgentId?: string; + affinityNodeIds?: string[]; + requiresExclusiveWorkspace?: boolean; + maxCost?: number; +} + +@Injectable() +export class AgentRouterService { + private readonly logger = new Logger(AgentRouterService.name); + private roundRobinIndex = 0; + private readonly defaultStrategy: RoutingStrategy; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly agentRegistry: AgentRegistryService, + ) { + this.defaultStrategy = this.configService.get( + 'AGENT_ROUTING_STRATEGY', + RoutingStrategy.CAPABILITY_MATCH, + ); + } + + /** + * Route a task to the best available agent + * + * This is the main entry point for task routing. + * It evaluates all available agents and selects the best one based on: + * 1. Capability matching (can the agent handle the required tools?) + * 2. Availability (is the agent healthy and has capacity?) + * 3. Load balancing (distribute work evenly across agents) + * 4. Affinity (prefer agents that handled related tasks) + * 5. Cost (consider cost multipliers if specified) + */ + async routeTask(request: RoutingRequest): Promise { + this.logger.debug(`Routing task for node ${request.nodeId}`); + + // Get all available agents + const agents = await this.agentRegistry.getAvailableAgents(); + + if (agents.length === 0) { + this.logger.warn('No available agents for task routing'); + return null; + } + + // If preferred agent is specified and available, use it + if (request.preferredAgentId) { + const preferredAgent = agents.find( + (a) => a.id === request.preferredAgentId, + ); + if (preferredAgent) { + return { + agent: preferredAgent, + reason: 'preferred_agent', + score: 1.0, + alternativeAgents: [], + }; + } + } + + // Score each agent + const scoredAgents = await Promise.all( + agents.map(async (agent) => { + const score = await this.scoreAgent(agent, request); + return { agent, score }; + }), + ); + + // Filter out agents that can't handle the task (score = 0) + const eligibleAgents = scoredAgents.filter((sa) => sa.score > 0); + + if (eligibleAgents.length === 0) { + this.logger.warn( + `No agents capable of handling task with tools: ${request.requiredTools.join(', ')}`, + ); + return null; + } + + // Sort by score (descending) + eligibleAgents.sort((a, b) => b.score - a.score); + + const selectedAgent = eligibleAgents[0]; + const alternativeAgents = eligibleAgents.slice(1, 4); // Top 3 alternatives + + this.logger.log( + `Routed task ${request.nodeId} to agent ${selectedAgent.agent.name} ` + + `(score: ${selectedAgent.score.toFixed(2)}, reason: capability_match)`, + ); + + return { + agent: selectedAgent.agent, + reason: this.determineReason(selectedAgent, request), + score: selectedAgent.score, + alternativeAgents, + }; + } + + /** + * Score an agent for a given task + * + * Returns a score from 0.0 (cannot handle) to 1.0 (perfect match) + */ + private async scoreAgent( + agent: AgentInfo, + request: RoutingRequest, + ): Promise { + let score = 1.0; + + // 1. Capability matching (most important) + const capabilityScore = this.scoreCapabilities( + agent, + request.requiredTools, + ); + if (capabilityScore === 0) { + return 0; // Agent cannot handle required tools + } + score *= capabilityScore; + + // 2. Load factor (prefer less loaded agents) + const loadScore = this.scoreLoad(agent); + score *= loadScore; + + // 3. Weight factor (agent-level priority) + const weightScore = agent.weight / 100; + score *= weightScore; + + // 4. Affinity bonus (prefer agents that handled related tasks) + if (request.affinityNodeIds && request.affinityNodeIds.length > 0) { + const affinityScore = await this.scoreAffinity( + agent, + request.affinityNodeIds, + ); + score *= 1 + affinityScore * 0.2; // Up to 20% bonus + } + + // 5. Cost consideration + if (request.maxCost !== undefined) { + const costScore = this.scoreCost(agent, request.requiredTools); + if (costScore === 0) { + return 0; // Agent is too expensive + } + score *= costScore; + } + + // 6. Exclusive workspace check + if (request.requiresExclusiveWorkspace) { + const hasExclusiveCapability = agent.capabilities.some( + (cap) => + cap.requiresExclusiveWorkspace && + this.matchesTools(cap.toolPattern, request.requiredTools), + ); + if (!hasExclusiveCapability) { + return 0; + } + } + + return Math.min(score, 1.0); + } + + /** + * Score agent capabilities against required tools + */ + private scoreCapabilities( + agent: AgentInfo, + requiredTools: string[], + ): number { + if (requiredTools.length === 0) { + return 1.0; // No specific tools required + } + + // Check if agent can handle all required tools + let matchedCount = 0; + let totalPriority = 0; + + for (const tool of requiredTools) { + const matchingCapability = agent.capabilities.find((cap) => + this.matchTool(tool, cap.toolPattern), + ); + + if (matchingCapability) { + matchedCount++; + totalPriority += matchingCapability.priority; + } + } + + if (matchedCount < requiredTools.length) { + return 0; // Cannot handle all required tools + } + + // Score based on capability priority + const avgPriority = totalPriority / requiredTools.length; + return avgPriority / 100; // Normalize to 0-1 + } + + /** + * Score agent load (prefer less loaded agents) + */ + private scoreLoad(agent: AgentInfo): number { + if (agent.maxConcurrentTasks === 0) { + return 0; + } + + const utilization = agent.currentTaskCount / agent.maxConcurrentTasks; + // Exponential decay: heavily penalize near-capacity agents + return Math.exp(-2 * utilization); + } + + /** + * Score affinity (preference for agents that handled related tasks) + */ + private async scoreAffinity( + agent: AgentInfo, + affinityNodeIds: string[], + ): Promise { + // Check how many of the affinity nodes were handled by this agent + const assignments = await this.prisma.taskAssignment.findMany({ + where: { + nodeRunId: { in: affinityNodeIds }, + agentId: agent.id, + status: { in: ['COMPLETED', 'RUNNING'] }, + }, + }); + + return assignments.length / affinityNodeIds.length; + } + + /** + * Score cost (prefer lower cost agents) + */ + private scoreCost(agent: AgentInfo, requiredTools: string[]): number { + let totalCost = 0; + + for (const tool of requiredTools) { + const matchingCapability = agent.capabilities.find((cap) => + this.matchTool(tool, cap.toolPattern), + ); + + if (matchingCapability) { + totalCost += matchingCapability.costMultiplier; + } else { + totalCost += 1.0; // Default cost + } + } + + const avgCost = totalCost / Math.max(requiredTools.length, 1); + // Inverse: lower cost = higher score + return 1 / avgCost; + } + + /** + * Match a tool against a pattern (glob-style) + */ + private matchTool(tool: string, pattern: string): boolean { + if (pattern === '*') { + return true; + } + return minimatch(tool, pattern); + } + + /** + * Check if any required tools match the pattern + */ + private matchesTools(pattern: string, tools: string[]): boolean { + if (pattern === '*') { + return true; + } + return tools.some((tool) => minimatch(tool, pattern)); + } + + /** + * Determine the primary routing reason + */ + private determineReason( + selectedAgent: { agent: AgentInfo; score: number }, + request: RoutingRequest, + ): string { + if (request.preferredAgentId === selectedAgent.agent.id) { + return 'preferred_agent'; + } + if (request.affinityNodeIds && request.affinityNodeIds.length > 0) { + return 'affinity'; + } + if (request.requiredTools.length > 0) { + return 'capability_match'; + } + return 'load_balance'; + } + + /** + * Simple round-robin routing (fallback) + */ + async routeRoundRobin(): Promise { + const agents = await this.agentRegistry.getAvailableAgents(); + if (agents.length === 0) { + return null; + } + + const agent = agents[this.roundRobinIndex % agents.length]; + this.roundRobinIndex++; + + return agent; + } + + /** + * Get agent with least current load + */ + async routeLeastLoaded(): Promise { + const agents = await this.agentRegistry.getAvailableAgents(); + if (agents.length === 0) { + return null; + } + + // Sort by utilization (ascending) + agents.sort((a, b) => { + const utilizationA = a.currentTaskCount / a.maxConcurrentTasks; + const utilizationB = b.currentTaskCount / b.maxConcurrentTasks; + return utilizationA - utilizationB; + }); + + return agents[0]; + } + + /** + * Record a task assignment + */ + async recordAssignment( + nodeRunId: string, + agentId: string, + routingReason: string, + ): Promise { + // Get the attempt number + const existingAssignments = await this.prisma.taskAssignment.count({ + where: { nodeRunId }, + }); + + await this.prisma.taskAssignment.create({ + data: { + nodeRunId, + agentId, + routingReason, + status: 'ASSIGNED', + attempt: existingAssignments + 1, + dispatchedAt: new Date(), + }, + }); + + // Increment agent task count + await this.agentRegistry.incrementTaskCount(agentId); + } + + /** + * Complete a task assignment + */ + async completeAssignment( + nodeRunId: string, + agentId: string, + success: boolean, + result?: any, + error?: string, + ): Promise { + await this.prisma.taskAssignment.updateMany({ + where: { + nodeRunId, + agentId, + status: { in: ['ASSIGNED', 'RUNNING'] }, + }, + data: { + status: success ? 'COMPLETED' : 'FAILED', + completedAt: new Date(), + result, + error, + }, + }); + + // Decrement agent task count + await this.agentRegistry.decrementTaskCount(agentId); + } + + /** + * Reassign a task to a different agent + */ + async reassignTask( + nodeRunId: string, + fromAgentId: string, + toAgentId: string, + reason: string, + ): Promise { + // Get the current assignment + const currentAssignment = await this.prisma.taskAssignment.findFirst({ + where: { + nodeRunId, + agentId: fromAgentId, + status: { in: ['ASSIGNED', 'RUNNING'] }, + }, + }); + + if (currentAssignment) { + // Mark current assignment as reassigned + await this.prisma.taskAssignment.update({ + where: { id: currentAssignment.id }, + data: { status: 'REASSIGNED' }, + }); + + // Create new assignment + await this.prisma.taskAssignment.create({ + data: { + nodeRunId, + agentId: toAgentId, + routingReason: reason, + status: 'ASSIGNED', + attempt: currentAssignment.attempt + 1, + previousAssignmentId: currentAssignment.id, + dispatchedAt: new Date(), + }, + }); + + // Update task counts + await this.agentRegistry.decrementTaskCount(fromAgentId); + await this.agentRegistry.incrementTaskCount(toAgentId); + + this.logger.log( + `Reassigned task ${nodeRunId} from agent ${fromAgentId} to ${toAgentId}: ${reason}`, + ); + } + } + + /** + * Get routing statistics for monitoring + */ + async getStats(): Promise<{ + totalAssignments: number; + completedAssignments: number; + failedAssignments: number; + reassignments: number; + avgCompletionTimeMs: number; + routingReasons: Record; + }> { + const assignments = await this.prisma.taskAssignment.findMany({ + select: { + status: true, + routingReason: true, + dispatchedAt: true, + completedAt: true, + }, + }); + + const stats = { + totalAssignments: assignments.length, + completedAssignments: 0, + failedAssignments: 0, + reassignments: 0, + avgCompletionTimeMs: 0, + routingReasons: {} as Record, + }; + + let totalCompletionTime = 0; + let completionCount = 0; + + for (const assignment of assignments) { + switch (assignment.status) { + case 'COMPLETED': + stats.completedAssignments++; + if (assignment.dispatchedAt && assignment.completedAt) { + totalCompletionTime += + assignment.completedAt.getTime() - + assignment.dispatchedAt.getTime(); + completionCount++; + } + break; + case 'FAILED': + stats.failedAssignments++; + break; + case 'REASSIGNED': + stats.reassignments++; + break; + } + + // Count routing reasons + const reason = assignment.routingReason || 'unknown'; + stats.routingReasons[reason] = (stats.routingReasons[reason] || 0) + 1; + } + + if (completionCount > 0) { + stats.avgCompletionTimeMs = Math.round( + totalCompletionTime / completionCount, + ); + } + + return stats; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/analytics-query.service.ts b/packages/bytebot-workflow-orchestrator/src/services/analytics-query.service.ts new file mode 100644 index 000000000..165a4377c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/analytics-query.service.ts @@ -0,0 +1,1035 @@ +/** + * Analytics Query Service + * v2.0.0: Phase 7 Enhanced Features - Goal Run Analytics + * + * Provides optimized queries for dashboard visualization and analytics. + * Uses pre-aggregated metrics snapshots for fast queries at scale. + * + * Key responsibilities: + * - Time-series data retrieval for charts + * - KPI calculations + * - Comparative analysis (period-over-period) + * - Top-N rankings (workflows, agents, etc.) + * - Goal run execution insights (Phase 7) + * - Template analytics (Phase 7) + * - Batch execution analytics (Phase 7) + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { PrismaService } from './prisma.service'; +import { AggregationPeriod } from './metrics-aggregation.service'; + +// Time range presets +export enum TimeRange { + LAST_HOUR = '1h', + LAST_6_HOURS = '6h', + LAST_24_HOURS = '24h', + LAST_7_DAYS = '7d', + LAST_30_DAYS = '30d', + CUSTOM = 'custom', +} + +// KPI summary result +export interface KPISummary { + totalExecutions: number; + completedExecutions: number; + failedExecutions: number; + successRate: number; + avgDurationMs: number; + p50DurationMs: number; + p95DurationMs: number; + p99DurationMs: number; + totalSteps: number; + avgStepsPerWorkflow: number; + activeAgents: number; +} + +// Time-series data point +export interface TimeSeriesPoint { + timestamp: Date; + value: number; + metadata?: Record; +} + +// Dashboard summary +export interface DashboardSummary { + kpis: KPISummary; + trends: { + executionsTrend: TimeSeriesPoint[]; + successRateTrend: TimeSeriesPoint[]; + durationTrend: TimeSeriesPoint[]; + }; + topWorkflows: Array<{ + workflowName: string; + count: number; + successRate: number; + avgDurationMs: number; + }>; + topAgents: Array<{ + agentId: string; + agentName: string; + tasksCompleted: number; + avgDurationMs: number; + }>; + recentErrors: Array<{ + workflowRunId: string; + workflowName: string; + errorType: string | null; + errorMessage: string | null; + timestamp: Date; + }>; +} + +@Injectable() +export class AnalyticsQueryService { + private readonly logger = new Logger(AnalyticsQueryService.name); + + constructor(private readonly prisma: PrismaService) {} + + /** + * Get KPI summary for a tenant + */ + async getKPISummary( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise { + const { start, end } = this.getTimeRangeDates( + timeRange, + customStart, + customEnd, + ); + + // Get workflow execution aggregates + const workflowStats = await this.prisma.workflowExecutionMetric.aggregate({ + where: { + tenantId, + timestamp: { gte: start, lte: end }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + _count: true, + _avg: { durationMs: true, nodeCount: true }, + }); + + // Get success/failure counts + const statusCounts = await this.prisma.workflowExecutionMetric.groupBy({ + by: ['status'], + where: { + tenantId, + timestamp: { gte: start, lte: end }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + _count: true, + }); + + // Get percentiles from recent snapshot + const latestSnapshot = await this.prisma.metricsSnapshot.findFirst({ + where: { + tenantId, + metricName: 'workflow_execution_duration', + bucketStart: { gte: start }, + }, + orderBy: { bucketStart: 'desc' }, + }); + + // Get step counts + const stepStats = await this.prisma.workflowStepMetric.aggregate({ + where: { + tenantId, + timestamp: { gte: start, lte: end }, + }, + _count: true, + }); + + // Get active agents count + const activeAgents = await this.prisma.agent.count({ + where: { + status: { in: ['HEALTHY', 'STARTING'] }, + }, + }); + + const completed = + statusCounts.find((s) => s.status === 'COMPLETED')?._count ?? 0; + const failed = statusCounts.find((s) => s.status === 'FAILED')?._count ?? 0; + const total = completed + failed; + + return { + totalExecutions: total, + completedExecutions: completed, + failedExecutions: failed, + successRate: total > 0 ? (completed / total) * 100 : 0, + avgDurationMs: workflowStats._avg?.durationMs ?? 0, + p50DurationMs: latestSnapshot?.percentile50 ?? 0, + p95DurationMs: latestSnapshot?.percentile95 ?? 0, + p99DurationMs: latestSnapshot?.percentile99 ?? 0, + totalSteps: stepStats._count ?? 0, + avgStepsPerWorkflow: workflowStats._avg?.nodeCount ?? 0, + activeAgents, + }; + } + + /** + * Get time-series data for a metric + */ + async getTimeSeries( + tenantId: string, + metricName: string, + timeRange: TimeRange, + period: AggregationPeriod = '5m', + customStart?: Date, + customEnd?: Date, + ): Promise { + const { start, end } = this.getTimeRangeDates( + timeRange, + customStart, + customEnd, + ); + + const snapshots = await this.prisma.metricsSnapshot.findMany({ + where: { + tenantId, + metricName, + period, + bucketStart: { gte: start, lte: end }, + }, + orderBy: { bucketStart: 'asc' }, + select: { + bucketStart: true, + avg: true, + count: true, + successCount: true, + failureCount: true, + }, + }); + + return snapshots.map((s) => ({ + timestamp: s.bucketStart, + value: s.avg, + metadata: { + count: s.count, + successCount: s.successCount, + failureCount: s.failureCount, + }, + })); + } + + /** + * Get execution trends (count over time) + */ + async getExecutionTrends( + tenantId: string, + timeRange: TimeRange, + period: AggregationPeriod = '1h', + customStart?: Date, + customEnd?: Date, + ): Promise { + const { start, end } = this.getTimeRangeDates( + timeRange, + customStart, + customEnd, + ); + + const snapshots = await this.prisma.metricsSnapshot.findMany({ + where: { + tenantId, + metricName: 'workflow_execution_duration', + period, + bucketStart: { gte: start, lte: end }, + }, + orderBy: { bucketStart: 'asc' }, + select: { + bucketStart: true, + count: true, + successCount: true, + failureCount: true, + }, + }); + + return snapshots.map((s) => ({ + timestamp: s.bucketStart, + value: s.count, + metadata: { + successCount: s.successCount, + failureCount: s.failureCount, + }, + })); + } + + /** + * Get top workflows by execution count + */ + async getTopWorkflows( + tenantId: string, + timeRange: TimeRange, + limit: number = 10, + customStart?: Date, + customEnd?: Date, + ): Promise< + Array<{ + workflowName: string; + templateId: string | null; + count: number; + successCount: number; + failureCount: number; + successRate: number; + avgDurationMs: number; + }> + > { + const { start, end } = this.getTimeRangeDates( + timeRange, + customStart, + customEnd, + ); + + const workflows = await this.prisma.workflowExecutionMetric.groupBy({ + by: ['workflowName', 'templateId'], + where: { + tenantId, + timestamp: { gte: start, lte: end }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + _count: true, + _avg: { durationMs: true }, + }); + + // Get success/failure breakdowns + const results = await Promise.all( + workflows.slice(0, limit * 2).map(async (w) => { + const statusCounts = + await this.prisma.workflowExecutionMetric.groupBy({ + by: ['status'], + where: { + tenantId, + workflowName: w.workflowName, + timestamp: { gte: start, lte: end }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + _count: true, + }); + + const successCount = + statusCounts.find((s) => s.status === 'COMPLETED')?._count ?? 0; + const failureCount = + statusCounts.find((s) => s.status === 'FAILED')?._count ?? 0; + const total = successCount + failureCount; + + return { + workflowName: w.workflowName, + templateId: w.templateId, + count: w._count, + successCount, + failureCount, + successRate: total > 0 ? (successCount / total) * 100 : 0, + avgDurationMs: w._avg?.durationMs ?? 0, + }; + }), + ); + + return results.sort((a, b) => b.count - a.count).slice(0, limit); + } + + /** + * Get top agents by task completion + */ + async getTopAgents( + tenantId: string, + timeRange: TimeRange, + limit: number = 10, + customStart?: Date, + customEnd?: Date, + ): Promise< + Array<{ + agentId: string; + agentName: string; + tasksCompleted: number; + tasksFailed: number; + avgDurationMs: number; + }> + > { + const { start, end } = this.getTimeRangeDates( + timeRange, + customStart, + customEnd, + ); + + const agents = await this.prisma.workflowStepMetric.groupBy({ + by: ['agentId', 'agentName'], + where: { + tenantId, + timestamp: { gte: start, lte: end }, + agentId: { not: null }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + _count: true, + _avg: { durationMs: true }, + }); + + const results = await Promise.all( + agents.slice(0, limit * 2).map(async (a) => { + const statusCounts = await this.prisma.workflowStepMetric.groupBy({ + by: ['status'], + where: { + tenantId, + agentId: a.agentId, + timestamp: { gte: start, lte: end }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + _count: true, + }); + + const completed = + statusCounts.find((s) => s.status === 'COMPLETED')?._count ?? 0; + const failed = + statusCounts.find((s) => s.status === 'FAILED')?._count ?? 0; + + return { + agentId: a.agentId!, + agentName: a.agentName ?? 'Unknown', + tasksCompleted: completed, + tasksFailed: failed, + avgDurationMs: a._avg?.durationMs ?? 0, + }; + }), + ); + + return results.sort((a, b) => b.tasksCompleted - a.tasksCompleted).slice(0, limit); + } + + /** + * Get recent errors + */ + async getRecentErrors( + tenantId: string, + limit: number = 10, + ): Promise< + Array<{ + workflowRunId: string; + workflowName: string; + errorType: string | null; + errorMessage: string | null; + timestamp: Date; + }> + > { + const errors = await this.prisma.workflowExecutionMetric.findMany({ + where: { + tenantId, + status: 'FAILED', + errorMessage: { not: null }, + }, + orderBy: { timestamp: 'desc' }, + take: limit, + select: { + workflowRunId: true, + workflowName: true, + errorType: true, + errorMessage: true, + timestamp: true, + }, + }); + + return errors; + } + + /** + * Get complete dashboard summary + */ + async getDashboardSummary( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise { + const [ + kpis, + executionsTrend, + successRateTrend, + durationTrend, + topWorkflows, + topAgents, + recentErrors, + ] = await Promise.all([ + this.getKPISummary(tenantId, timeRange, customStart, customEnd), + this.getExecutionTrends( + tenantId, + timeRange, + this.getOptimalPeriod(timeRange), + customStart, + customEnd, + ), + this.getTimeSeries( + tenantId, + 'workflow_success_rate', + timeRange, + this.getOptimalPeriod(timeRange), + customStart, + customEnd, + ), + this.getTimeSeries( + tenantId, + 'workflow_execution_duration', + timeRange, + this.getOptimalPeriod(timeRange), + customStart, + customEnd, + ), + this.getTopWorkflows(tenantId, timeRange, 5, customStart, customEnd), + this.getTopAgents(tenantId, timeRange, 5, customStart, customEnd), + this.getRecentErrors(tenantId, 5), + ]); + + return { + kpis, + trends: { + executionsTrend, + successRateTrend, + durationTrend, + }, + topWorkflows, + topAgents, + recentErrors, + }; + } + + /** + * Get period-over-period comparison + */ + async getComparison( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise<{ + current: KPISummary; + previous: KPISummary; + changes: { + executionsChange: number; + successRateChange: number; + durationChange: number; + }; + }> { + const { start, end } = this.getTimeRangeDates( + timeRange, + customStart, + customEnd, + ); + const periodMs = end.getTime() - start.getTime(); + + const previousStart = new Date(start.getTime() - periodMs); + const previousEnd = new Date(start.getTime()); + + const [current, previous] = await Promise.all([ + this.getKPISummary(tenantId, TimeRange.CUSTOM, start, end), + this.getKPISummary(tenantId, TimeRange.CUSTOM, previousStart, previousEnd), + ]); + + return { + current, + previous, + changes: { + executionsChange: + previous.totalExecutions > 0 + ? ((current.totalExecutions - previous.totalExecutions) / + previous.totalExecutions) * + 100 + : 0, + successRateChange: current.successRate - previous.successRate, + durationChange: + previous.avgDurationMs > 0 + ? ((current.avgDurationMs - previous.avgDurationMs) / + previous.avgDurationMs) * + 100 + : 0, + }, + }; + } + + // ========================================================================= + // Utility Methods + // ========================================================================= + + private getTimeRangeDates( + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): { start: Date; end: Date } { + const end = customEnd ?? new Date(); + let start: Date; + + switch (timeRange) { + case TimeRange.LAST_HOUR: + start = new Date(end.getTime() - 60 * 60 * 1000); + break; + case TimeRange.LAST_6_HOURS: + start = new Date(end.getTime() - 6 * 60 * 60 * 1000); + break; + case TimeRange.LAST_24_HOURS: + start = new Date(end.getTime() - 24 * 60 * 60 * 1000); + break; + case TimeRange.LAST_7_DAYS: + start = new Date(end.getTime() - 7 * 24 * 60 * 60 * 1000); + break; + case TimeRange.LAST_30_DAYS: + start = new Date(end.getTime() - 30 * 24 * 60 * 60 * 1000); + break; + case TimeRange.CUSTOM: + start = customStart ?? new Date(end.getTime() - 24 * 60 * 60 * 1000); + break; + default: + start = new Date(end.getTime() - 24 * 60 * 60 * 1000); + } + + return { start, end }; + } + + private getOptimalPeriod(timeRange: TimeRange): AggregationPeriod { + switch (timeRange) { + case TimeRange.LAST_HOUR: + return '1m'; + case TimeRange.LAST_6_HOURS: + return '5m'; + case TimeRange.LAST_24_HOURS: + return '15m'; + case TimeRange.LAST_7_DAYS: + return '1h'; + case TimeRange.LAST_30_DAYS: + return '1d'; + default: + return '1h'; + } + } + + // ========================================================================= + // Phase 7: Goal Run Analytics + // ========================================================================= + + /** + * Get goal run KPI summary + */ + async getGoalRunKPIs( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise<{ + totalGoalRuns: number; + completedGoalRuns: number; + failedGoalRuns: number; + cancelledGoalRuns: number; + successRate: number; + avgDurationMs: number; + avgStepsPerGoal: number; + avgReplanCount: number; + templateUsageRate: number; + }> { + const { start, end } = this.getTimeRangeDates(timeRange, customStart, customEnd); + + // Get goal run counts by status + const statusCounts = await this.prisma.goalRun.groupBy({ + by: ['status'], + where: { + tenantId, + createdAt: { gte: start, lte: end }, + }, + _count: true, + }); + + const completed = statusCounts.find((s) => s.status === 'COMPLETED')?._count ?? 0; + const failed = statusCounts.find((s) => s.status === 'FAILED')?._count ?? 0; + const cancelled = statusCounts.find((s) => s.status === 'CANCELLED')?._count ?? 0; + const total = statusCounts.reduce((acc, s) => acc + s._count, 0); + + // Get duration metrics for completed goal runs + const completedRuns = await this.prisma.goalRun.findMany({ + where: { + tenantId, + status: 'COMPLETED', + createdAt: { gte: start, lte: end }, + completedAt: { not: null }, + startedAt: { not: null }, + }, + select: { + startedAt: true, + completedAt: true, + currentPlanVersion: true, + }, + }); + + let avgDurationMs = 0; + let avgReplanCount = 0; + + if (completedRuns.length > 0) { + const durations = completedRuns.map( + (r) => r.completedAt!.getTime() - r.startedAt!.getTime(), + ); + avgDurationMs = durations.reduce((a, b) => a + b, 0) / durations.length; + avgReplanCount = + completedRuns.reduce((a, r) => a + (r.currentPlanVersion - 1), 0) / + completedRuns.length; + } + + // Get average steps per goal + const stepStats = await this.prisma.checklistItem.groupBy({ + by: ['planVersionId'], + _count: true, + }); + + const avgStepsPerGoal = + stepStats.length > 0 + ? stepStats.reduce((a, s) => a + s._count, 0) / stepStats.length + : 0; + + // Get template usage rate + const templateUsageCount = await this.prisma.goalRunFromTemplate.count({ + where: { + createdAt: { gte: start, lte: end }, + }, + }); + + return { + totalGoalRuns: total, + completedGoalRuns: completed, + failedGoalRuns: failed, + cancelledGoalRuns: cancelled, + successRate: total > 0 ? (completed / total) * 100 : 0, + avgDurationMs, + avgStepsPerGoal, + avgReplanCount, + templateUsageRate: total > 0 ? (templateUsageCount / total) * 100 : 0, + }; + } + + /** + * Get goal run trends over time + */ + async getGoalRunTrends( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise { + const { start, end } = this.getTimeRangeDates(timeRange, customStart, customEnd); + const period = this.getOptimalPeriod(timeRange); + + // Get goal runs grouped by time bucket + const goalRuns = await this.prisma.goalRun.findMany({ + where: { + tenantId, + createdAt: { gte: start, lte: end }, + }, + select: { + createdAt: true, + status: true, + }, + orderBy: { createdAt: 'asc' }, + }); + + // Group by time bucket + const buckets = new Map(); + const periodMs = this.getPeriodMs(period); + + for (const run of goalRuns) { + const bucketTime = new Date( + Math.floor(run.createdAt.getTime() / periodMs) * periodMs, + ).toISOString(); + + if (!buckets.has(bucketTime)) { + buckets.set(bucketTime, { total: 0, completed: 0, failed: 0 }); + } + + const bucket = buckets.get(bucketTime)!; + bucket.total++; + if (run.status === 'COMPLETED') bucket.completed++; + if (run.status === 'FAILED') bucket.failed++; + } + + return Array.from(buckets.entries()).map(([timestamp, data]) => ({ + timestamp: new Date(timestamp), + value: data.total, + metadata: { + completed: data.completed, + failed: data.failed, + successRate: data.total > 0 ? (data.completed / data.total) * 100 : 0, + }, + })); + } + + /** + * Get top templates by usage + */ + async getTopTemplates( + tenantId: string, + timeRange: TimeRange, + limit: number = 10, + customStart?: Date, + customEnd?: Date, + ): Promise< + Array<{ + templateId: string; + templateName: string; + usageCount: number; + successRate: number; + avgDurationMs: number; + }> + > { + const { start, end } = this.getTimeRangeDates(timeRange, customStart, customEnd); + + // Get template usage from junction table + const templateUsage = await this.prisma.goalRunFromTemplate.groupBy({ + by: ['templateId'], + where: { + createdAt: { gte: start, lte: end }, + }, + _count: true, + }); + + // Get template details and success rates + const results = await Promise.all( + templateUsage.slice(0, limit * 2).map(async (t) => { + const template = await this.prisma.goalTemplate.findUnique({ + where: { id: t.templateId }, + select: { name: true }, + }); + + // Get goal run IDs from this template + const goalRunIds = await this.prisma.goalRunFromTemplate.findMany({ + where: { + templateId: t.templateId, + createdAt: { gte: start, lte: end }, + }, + select: { goalRunId: true }, + }); + + // Get success rate + const statusCounts = await this.prisma.goalRun.groupBy({ + by: ['status'], + where: { + id: { in: goalRunIds.map((g) => g.goalRunId) }, + }, + _count: true, + }); + + const completed = statusCounts.find((s) => s.status === 'COMPLETED')?._count ?? 0; + const total = statusCounts.reduce((a, s) => a + s._count, 0); + + // Calculate average duration from completed goal runs + const completedRuns = await this.prisma.goalRun.findMany({ + where: { + id: { in: goalRunIds.map((g) => g.goalRunId) }, + status: 'COMPLETED', + startedAt: { not: null }, + completedAt: { not: null }, + }, + select: { startedAt: true, completedAt: true }, + }); + + let avgDurationMs = 0; + if (completedRuns.length > 0) { + const totalMs = completedRuns.reduce((sum, run) => { + if (run.startedAt && run.completedAt) { + return sum + (run.completedAt.getTime() - run.startedAt.getTime()); + } + return sum; + }, 0); + avgDurationMs = Math.round(totalMs / completedRuns.length); + } + + return { + templateId: t.templateId, + templateName: template?.name ?? 'Unknown', + usageCount: t._count, + successRate: total > 0 ? (completed / total) * 100 : 0, + avgDurationMs, + }; + }), + ); + + return results.sort((a, b) => b.usageCount - a.usageCount).slice(0, limit); + } + + /** + * Get batch execution statistics + */ + async getBatchStats( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise<{ + totalBatches: number; + completedBatches: number; + partiallyCompletedBatches: number; + failedBatches: number; + totalGoalsInBatches: number; + avgGoalsPerBatch: number; + avgBatchDurationMs: number; + }> { + const { start, end } = this.getTimeRangeDates(timeRange, customStart, customEnd); + + const statusCounts = await this.prisma.goalRunBatch.groupBy({ + by: ['status'], + where: { + tenantId, + createdAt: { gte: start, lte: end }, + }, + _count: true, + }); + + const completed = statusCounts.find((s) => s.status === 'COMPLETED')?._count ?? 0; + const partial = + statusCounts.find((s) => s.status === 'PARTIALLY_COMPLETED')?._count ?? 0; + const failed = statusCounts.find((s) => s.status === 'FAILED')?._count ?? 0; + const total = statusCounts.reduce((a, s) => a + s._count, 0); + + // Get batch size stats + const batchStats = await this.prisma.goalRunBatch.aggregate({ + where: { + tenantId, + createdAt: { gte: start, lte: end }, + }, + _avg: { totalGoals: true }, + _sum: { totalGoals: true }, + }); + + // Calculate average batch duration from completed batches + const completedBatches = await this.prisma.goalRunBatch.findMany({ + where: { + tenantId, + createdAt: { gte: start, lte: end }, + status: { in: ['COMPLETED', 'PARTIALLY_COMPLETED'] }, + startedAt: { not: null }, + completedAt: { not: null }, + }, + select: { startedAt: true, completedAt: true }, + }); + + let avgBatchDurationMs = 0; + if (completedBatches.length > 0) { + const totalMs = completedBatches.reduce((sum, batch) => { + if (batch.startedAt && batch.completedAt) { + return sum + (batch.completedAt.getTime() - batch.startedAt.getTime()); + } + return sum; + }, 0); + avgBatchDurationMs = Math.round(totalMs / completedBatches.length); + } + + return { + totalBatches: total, + completedBatches: completed, + partiallyCompletedBatches: partial, + failedBatches: failed, + totalGoalsInBatches: batchStats._sum?.totalGoals ?? 0, + avgGoalsPerBatch: batchStats._avg?.totalGoals ?? 0, + avgBatchDurationMs, + }; + } + + /** + * Get goal run phase distribution + */ + async getPhaseDistribution( + tenantId: string, + ): Promise> { + const phaseCounts = await this.prisma.goalRun.groupBy({ + by: ['phase'], + where: { + tenantId, + status: 'RUNNING', + }, + _count: true, + }); + + const total = phaseCounts.reduce((a, p) => a + p._count, 0); + + return phaseCounts.map((p) => ({ + phase: p.phase, + count: p._count, + percentage: total > 0 ? (p._count / total) * 100 : 0, + })); + } + + /** + * Get execution insights dashboard + */ + async getExecutionInsights( + tenantId: string, + timeRange: TimeRange, + customStart?: Date, + customEnd?: Date, + ): Promise<{ + goalRunKPIs: Awaited>; + goalRunTrends: TimeSeriesPoint[]; + topTemplates: Awaited>; + batchStats: Awaited>; + phaseDistribution: Awaited>; + recentFailures: Array<{ + goalRunId: string; + goal: string; + error: string | null; + failedAt: Date; + }>; + }> { + const [ + goalRunKPIs, + goalRunTrends, + topTemplates, + batchStats, + phaseDistribution, + recentFailures, + ] = await Promise.all([ + this.getGoalRunKPIs(tenantId, timeRange, customStart, customEnd), + this.getGoalRunTrends(tenantId, timeRange, customStart, customEnd), + this.getTopTemplates(tenantId, timeRange, 5, customStart, customEnd), + this.getBatchStats(tenantId, timeRange, customStart, customEnd), + this.getPhaseDistribution(tenantId), + this.prisma.goalRun.findMany({ + where: { + tenantId, + status: 'FAILED', + }, + orderBy: { completedAt: 'desc' }, + take: 5, + select: { + id: true, + goal: true, + error: true, + completedAt: true, + }, + }).then((runs) => + runs.map((r) => ({ + goalRunId: r.id, + goal: r.goal.substring(0, 100), + error: r.error, + failedAt: r.completedAt ?? new Date(), + })), + ), + ]); + + return { + goalRunKPIs, + goalRunTrends, + topTemplates, + batchStats, + phaseDistribution, + recentFailures, + }; + } + + private getPeriodMs(period: AggregationPeriod): number { + switch (period) { + case '1m': + return 60 * 1000; + case '5m': + return 5 * 60 * 1000; + case '15m': + return 15 * 60 * 1000; + case '1h': + return 60 * 60 * 1000; + case '1d': + return 24 * 60 * 60 * 1000; + default: + return 60 * 60 * 1000; + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/approval.service.ts b/packages/bytebot-workflow-orchestrator/src/services/approval.service.ts new file mode 100644 index 000000000..1b99eb138 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/approval.service.ts @@ -0,0 +1,400 @@ +/** + * Approval Service + * v1.0.0 M5: Manages approval requests for high-risk actions + * + * Best Practices Applied: + * - Clear approval context with AI reasoning preserved + * - Workflow checkpointing for state persistence + * - Time-bounded approvals with expiry + * - Audit trail for all approval decisions + * + * References: + * - https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/multi-agent-workflow-with-human-approval-using-agent-framework/4465927 + * - https://www.permit.io/blog/human-in-the-loop-for-ai-agents-best-practices + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import { HighRiskService, ActionContext, ActionClassification, ActionPreview } from './high-risk.service'; + +/** + * Approval request status + */ +export enum ApprovalStatus { + PENDING = 'PENDING', + APPROVED = 'APPROVED', + REJECTED = 'REJECTED', + EXPIRED = 'EXPIRED', +} + +/** + * Approval request details + */ +export interface ApprovalRequestDetails { + id: string; + nodeRunId: string; + actionHash: string; + toolName: string; + toolParams: Record; + previewData: ActionPreview; + status: ApprovalStatus; + expiresAt: Date; + reason?: string; + approvedBy?: string; + approvedAt?: Date; + rejectedBy?: string; + rejectedAt?: Date; + createdAt: Date; +} + +/** + * Create approval request input + */ +export interface CreateApprovalInput { + nodeRunId: string; + actionContext: ActionContext; + classification: ActionClassification; + aiReasoning?: string; + confidenceScore?: number; +} + +/** + * Approval decision input + */ +export interface ApprovalDecision { + approvalId: string; + approved: boolean; + reviewerId: string; + reason?: string; +} + +/** + * Default approval expiry in minutes + */ +const DEFAULT_APPROVAL_EXPIRY_MINUTES = 60; + +@Injectable() +export class ApprovalService { + private readonly logger = new Logger(ApprovalService.name); + private readonly approvalExpiryMinutes: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly highRiskService: HighRiskService, + ) { + this.approvalExpiryMinutes = parseInt( + this.configService.get( + 'HIGH_RISK_APPROVAL_EXPIRY_MINUTES', + String(DEFAULT_APPROVAL_EXPIRY_MINUTES), + ), + 10, + ); + + this.logger.log(`Approval expiry: ${this.approvalExpiryMinutes} minutes`); + } + + /** + * Create an approval request for a high-risk action + */ + async createApprovalRequest(input: CreateApprovalInput): Promise { + const { nodeRunId, actionContext, classification, aiReasoning, confidenceScore } = input; + const { actionHash, previewData } = classification; + + // Check if approval already exists + const existing = await this.prisma.approvalRequest.findUnique({ + where: { + nodeRunId_actionHash: { nodeRunId, actionHash }, + }, + }); + + if (existing) { + this.logger.debug(`Approval request already exists: ${existing.id}`); + return this.mapToDetails(existing); + } + + // Calculate expiry time + const expiresAt = new Date(Date.now() + this.approvalExpiryMinutes * 60 * 1000); + + // Create approval request with full context + const approvalRequest = await this.prisma.approvalRequest.create({ + data: { + nodeRunId, + actionHash, + toolName: actionContext.toolName, + toolParams: actionContext.toolParams, + previewData: { + ...previewData, + aiReasoning, + confidenceScore, + riskLevel: classification.riskLevel, + riskReason: classification.reason, + workspaceId: actionContext.workspaceId, + tenantId: actionContext.tenantId, + currentUrl: actionContext.currentUrl, + }, + status: ApprovalStatus.PENDING, + expiresAt, + }, + }); + + this.logger.log( + `Created approval request ${approvalRequest.id} for ${actionContext.toolName} (expires: ${expiresAt.toISOString()})`, + ); + + // Update node run status to WAITING_FOR_APPROVAL + await this.updateNodeRunStatus(nodeRunId, 'WAITING_FOR_APPROVAL'); + + return this.mapToDetails(approvalRequest); + } + + /** + * Check if an action has been approved + */ + async checkApprovalStatus( + nodeRunId: string, + actionHash: string, + ): Promise<{ status: ApprovalStatus; approvalId?: string; reason?: string }> { + const approval = await this.prisma.approvalRequest.findUnique({ + where: { + nodeRunId_actionHash: { nodeRunId, actionHash }, + }, + }); + + if (!approval) { + return { status: ApprovalStatus.PENDING }; + } + + // Check for expiry + if (approval.status === ApprovalStatus.PENDING && approval.expiresAt < new Date()) { + await this.expireApproval(approval.id); + return { status: ApprovalStatus.EXPIRED, approvalId: approval.id }; + } + + return { + status: approval.status as ApprovalStatus, + approvalId: approval.id, + reason: approval.reason || undefined, + }; + } + + /** + * Get approval request by ID + */ + async getApprovalById(approvalId: string): Promise { + const approval = await this.prisma.approvalRequest.findUnique({ + where: { id: approvalId }, + }); + + if (!approval) { + return null; + } + + // Check for expiry + if (approval.status === ApprovalStatus.PENDING && approval.expiresAt < new Date()) { + await this.expireApproval(approval.id); + const updated = await this.prisma.approvalRequest.findUnique({ + where: { id: approvalId }, + }); + return updated ? this.mapToDetails(updated) : null; + } + + return this.mapToDetails(approval); + } + + /** + * Process approval decision + */ + async processDecision(decision: ApprovalDecision): Promise { + const { approvalId, approved, reviewerId, reason } = decision; + + const approval = await this.prisma.approvalRequest.findUnique({ + where: { id: approvalId }, + }); + + if (!approval) { + throw new Error(`Approval request ${approvalId} not found`); + } + + if (approval.status !== ApprovalStatus.PENDING) { + throw new Error(`Approval request ${approvalId} is not pending (status: ${approval.status})`); + } + + if (approval.expiresAt < new Date()) { + await this.expireApproval(approvalId); + throw new Error(`Approval request ${approvalId} has expired`); + } + + const now = new Date(); + + const updated = await this.prisma.approvalRequest.update({ + where: { id: approvalId }, + data: { + status: approved ? ApprovalStatus.APPROVED : ApprovalStatus.REJECTED, + ...(approved + ? { approvedBy: reviewerId, approvedAt: now } + : { rejectedBy: reviewerId, rejectedAt: now }), + reason, + }, + }); + + this.logger.log( + `Approval ${approvalId} ${approved ? 'APPROVED' : 'REJECTED'} by ${reviewerId}${reason ? `: ${reason}` : ''}`, + ); + + // Update node run status based on decision + if (approved) { + await this.updateNodeRunStatus(approval.nodeRunId, 'RUNNING'); + } else { + await this.updateNodeRunStatus(approval.nodeRunId, 'FAILED'); + } + + return this.mapToDetails(updated); + } + + /** + * Get pending approvals for a tenant + */ + async getPendingApprovals(tenantId: string, options?: { + limit?: number; + offset?: number; + }): Promise<{ approvals: ApprovalRequestDetails[]; total: number }> { + const { limit = 20, offset = 0 } = options || {}; + + // First, expire any old approvals + await this.expireOldApprovals(); + + const where = { + status: ApprovalStatus.PENDING, + nodeRun: { + node: { + workflowRun: { + tenantId, + }, + }, + }, + }; + + const [approvals, total] = await Promise.all([ + this.prisma.approvalRequest.findMany({ + where, + orderBy: { createdAt: 'desc' }, + take: limit, + skip: offset, + }), + this.prisma.approvalRequest.count({ where }), + ]); + + return { + approvals: approvals.map((a) => this.mapToDetails(a)), + total, + }; + } + + /** + * Get count of approvals by status + * v1.0.1: Added to support frontend badge display + */ + async getApprovalCount( + status: string, + tenantId?: string, + ): Promise { + // First, expire any old approvals + await this.expireOldApprovals(); + + const where: any = { status }; + + // Filter by tenant if provided + if (tenantId) { + where.nodeRun = { + node: { + workflowRun: { + tenantId, + }, + }, + }; + } + + return this.prisma.approvalRequest.count({ where }); + } + + /** + * Get approvals for a specific node run + */ + async getApprovalsForNodeRun(nodeRunId: string): Promise { + const approvals = await this.prisma.approvalRequest.findMany({ + where: { nodeRunId }, + orderBy: { createdAt: 'desc' }, + }); + + return approvals.map((a) => this.mapToDetails(a)); + } + + /** + * Expire a single approval + */ + private async expireApproval(approvalId: string): Promise { + await this.prisma.approvalRequest.update({ + where: { id: approvalId }, + data: { status: ApprovalStatus.EXPIRED }, + }); + + this.logger.log(`Approval ${approvalId} expired`); + } + + /** + * Expire all old pending approvals + */ + async expireOldApprovals(): Promise { + const result = await this.prisma.approvalRequest.updateMany({ + where: { + status: ApprovalStatus.PENDING, + expiresAt: { lt: new Date() }, + }, + data: { status: ApprovalStatus.EXPIRED }, + }); + + if (result.count > 0) { + this.logger.log(`Expired ${result.count} old approval requests`); + } + + return result.count; + } + + /** + * Update node run status + */ + private async updateNodeRunStatus(nodeRunId: string, status: string): Promise { + try { + await this.prisma.workflowNodeRun.update({ + where: { id: nodeRunId }, + data: { status }, + }); + } catch (error: any) { + this.logger.warn(`Failed to update node run ${nodeRunId} status: ${error.message}`); + } + } + + /** + * Map Prisma model to details interface + */ + private mapToDetails(approval: any): ApprovalRequestDetails { + return { + id: approval.id, + nodeRunId: approval.nodeRunId, + actionHash: approval.actionHash, + toolName: approval.toolName, + toolParams: approval.toolParams, + previewData: approval.previewData as ActionPreview, + status: approval.status as ApprovalStatus, + expiresAt: approval.expiresAt, + reason: approval.reason || undefined, + approvedBy: approval.approvedBy || undefined, + approvedAt: approval.approvedAt || undefined, + rejectedBy: approval.rejectedBy || undefined, + rejectedAt: approval.rejectedAt || undefined, + createdAt: approval.createdAt, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/audit-export.service.ts b/packages/bytebot-workflow-orchestrator/src/services/audit-export.service.ts new file mode 100644 index 000000000..04d7c23dc --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/audit-export.service.ts @@ -0,0 +1,521 @@ +/** + * Audit Export Service + * Phase 10 (v5.5.0): Enterprise Features - Enhanced Audit Log Export + * + * Provides enterprise-grade audit log export capabilities: + * - Streaming export for large datasets + * - Multiple formats (CSV, JSON, NDJSON for SIEM) + * - Scheduled export jobs + * - Retention policy enforcement + * - SIEM integration formats (Splunk, Elasticsearch) + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { Readable } from 'stream'; + +// ============================================================================ +// Types and Interfaces +// ============================================================================ + +export enum ExportFormat { + JSON = 'json', + CSV = 'csv', + NDJSON = 'ndjson', // Newline-delimited JSON for SIEM + SPLUNK = 'splunk', // Splunk HEC format + ELASTICSEARCH = 'elasticsearch', // Elasticsearch bulk format +} + +export interface ExportOptions { + tenantId: string; + format: ExportFormat; + startDate?: Date; + endDate?: Date; + eventTypes?: string[]; + resourceTypes?: string[]; + actorIds?: string[]; + includeMetadata?: boolean; + batchSize?: number; +} + +export interface ScheduledExportConfig { + tenantId: string; + name: string; + format: ExportFormat; + schedule: string; // Cron expression + filters: { + eventTypes?: string[]; + resourceTypes?: string[]; + }; + destination: { + type: 'email' | 'webhook' | 's3' | 'azure_blob' | 'gcs'; + config: Record; + }; + retentionDays?: number; + enabled: boolean; +} + +export interface ExportResult { + success: boolean; + recordCount: number; + format: ExportFormat; + exportedAt: Date; + sizeBytes?: number; + downloadUrl?: string; +} + +// ============================================================================ +// Audit Export Service +// ============================================================================ + +@Injectable() +export class AuditExportService { + private readonly logger = new Logger(AuditExportService.name); + private readonly defaultBatchSize: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.defaultBatchSize = parseInt( + this.configService.get('AUDIT_EXPORT_BATCH_SIZE', '1000'), + 10, + ); + this.logger.log('AuditExportService initialized'); + } + + // ========================================================================== + // Streaming Export + // ========================================================================== + + /** + * Create a readable stream for audit log export + * Handles large datasets efficiently with cursor-based pagination + */ + createExportStream(options: ExportOptions): Readable { + const self = this; + const batchSize = options.batchSize || this.defaultBatchSize; + let cursor: string | null = null; + let isFirst = true; + let recordCount = 0; + + const stream = new Readable({ + objectMode: options.format !== ExportFormat.CSV, + async read() { + try { + const records = await self.fetchBatch(options, batchSize, cursor); + + if (records.length === 0) { + // End of data + if (options.format === ExportFormat.JSON) { + this.push(']'); + } + this.push(null); + self.logger.log(`Export complete: ${recordCount} records`); + return; + } + + recordCount += records.length; + cursor = records[records.length - 1].id; + + // Format and push records + const formatted = self.formatRecords(records, options.format, isFirst); + this.push(formatted); + isFirst = false; + } catch (error: any) { + self.logger.error(`Export stream error: ${error.message}`); + this.destroy(error); + } + }, + }); + + // Write format header + if (options.format === ExportFormat.JSON) { + stream.push('['); + } else if (options.format === ExportFormat.CSV) { + stream.push(this.getCSVHeader() + '\n'); + } + + return stream; + } + + /** + * Export audit logs to a string (for smaller exports) + */ + async exportToString(options: ExportOptions): Promise<{ data: string; count: number }> { + const allRecords: any[] = []; + let cursor: string | null = null; + const batchSize = options.batchSize || this.defaultBatchSize; + + // Fetch all records + while (true) { + const records = await this.fetchBatch(options, batchSize, cursor); + if (records.length === 0) break; + + allRecords.push(...records); + cursor = records[records.length - 1].id; + + // Safety limit + if (allRecords.length > 100000) { + this.logger.warn('Export limit reached (100k records), consider using streaming'); + break; + } + } + + // Format all records + const formatted = this.formatAllRecords(allRecords, options.format); + + return { + data: formatted, + count: allRecords.length, + }; + } + + // ========================================================================== + // Format-Specific Methods + // ========================================================================== + + /** + * Format records based on export format + */ + private formatRecords(records: any[], format: ExportFormat, isFirst: boolean): string { + switch (format) { + case ExportFormat.JSON: + const prefix = isFirst ? '' : ','; + return prefix + records.map(r => JSON.stringify(this.transformRecord(r))).join(','); + + case ExportFormat.CSV: + return records.map(r => this.toCSVRow(r)).join('\n') + '\n'; + + case ExportFormat.NDJSON: + return records.map(r => JSON.stringify(this.transformRecord(r))).join('\n') + '\n'; + + case ExportFormat.SPLUNK: + return records.map(r => this.toSplunkFormat(r)).join('\n') + '\n'; + + case ExportFormat.ELASTICSEARCH: + return records.map(r => this.toElasticsearchFormat(r)).join('\n') + '\n'; + + default: + return JSON.stringify(records); + } + } + + /** + * Format all records at once (for smaller exports) + */ + private formatAllRecords(records: any[], format: ExportFormat): string { + switch (format) { + case ExportFormat.JSON: + return JSON.stringify(records.map(r => this.transformRecord(r)), null, 2); + + case ExportFormat.CSV: + const header = this.getCSVHeader(); + const rows = records.map(r => this.toCSVRow(r)); + return [header, ...rows].join('\n'); + + case ExportFormat.NDJSON: + return records.map(r => JSON.stringify(this.transformRecord(r))).join('\n'); + + case ExportFormat.SPLUNK: + return records.map(r => this.toSplunkFormat(r)).join('\n'); + + case ExportFormat.ELASTICSEARCH: + return records.map(r => this.toElasticsearchFormat(r)).join('\n'); + + default: + return JSON.stringify(records); + } + } + + /** + * Transform database record to export format + */ + private transformRecord(record: any): any { + return { + id: record.id, + timestamp: record.timestamp.toISOString(), + eventType: record.eventType, + actor: { + type: record.actorType, + id: record.actorId, + email: record.actorEmail, + name: record.actorName, + ipAddress: record.actorIpAddress, + }, + resource: { + type: record.resourceType, + id: record.resourceId, + name: record.resourceName, + }, + action: { + type: record.actionType, + reason: record.actionReason, + previousState: record.previousState, + newState: record.newState, + }, + context: { + tenantId: record.tenantId, + workspaceId: record.workspaceId, + workflowRunId: record.workflowRunId, + nodeRunId: record.nodeRunId, + }, + metadata: record.metadata, + }; + } + + /** + * Get CSV header row + */ + private getCSVHeader(): string { + return [ + 'id', + 'timestamp', + 'eventType', + 'actorType', + 'actorId', + 'actorEmail', + 'actorName', + 'actorIpAddress', + 'resourceType', + 'resourceId', + 'resourceName', + 'actionType', + 'actionReason', + 'previousState', + 'newState', + 'tenantId', + 'workspaceId', + 'workflowRunId', + ].join(','); + } + + /** + * Convert record to CSV row + */ + private toCSVRow(record: any): string { + const escape = (val: any): string => { + if (val === null || val === undefined) return ''; + const str = String(val); + if (str.includes(',') || str.includes('"') || str.includes('\n')) { + return `"${str.replace(/"/g, '""')}"`; + } + return str; + }; + + return [ + record.id, + record.timestamp?.toISOString(), + record.eventType, + record.actorType, + record.actorId, + record.actorEmail, + record.actorName, + record.actorIpAddress, + record.resourceType, + record.resourceId, + record.resourceName, + record.actionType, + record.actionReason, + record.previousState, + record.newState, + record.tenantId, + record.workspaceId, + record.workflowRunId, + ].map(escape).join(','); + } + + /** + * Convert record to Splunk HEC format + */ + private toSplunkFormat(record: any): string { + const event = { + time: Math.floor(record.timestamp.getTime() / 1000), + host: 'bytebot-orchestrator', + source: 'audit-log', + sourcetype: 'bytebot:audit', + event: this.transformRecord(record), + }; + return JSON.stringify(event); + } + + /** + * Convert record to Elasticsearch bulk format + */ + private toElasticsearchFormat(record: any): string { + const index = `bytebot-audit-${record.timestamp.toISOString().slice(0, 7)}`; // Monthly index + const action = JSON.stringify({ index: { _index: index, _id: record.id } }); + const doc = JSON.stringify({ + '@timestamp': record.timestamp.toISOString(), + ...this.transformRecord(record), + }); + return `${action}\n${doc}`; + } + + // ========================================================================== + // Database Operations + // ========================================================================== + + /** + * Fetch a batch of audit records with cursor pagination + */ + private async fetchBatch( + options: ExportOptions, + limit: number, + cursor: string | null, + ): Promise { + const where: any = { + tenantId: options.tenantId, + }; + + if (options.startDate || options.endDate) { + where.timestamp = {}; + if (options.startDate) { + where.timestamp.gte = options.startDate; + } + if (options.endDate) { + where.timestamp.lte = options.endDate; + } + } + + if (options.eventTypes?.length) { + where.eventType = { in: options.eventTypes }; + } + + if (options.resourceTypes?.length) { + where.resourceType = { in: options.resourceTypes }; + } + + if (options.actorIds?.length) { + where.actorId = { in: options.actorIds }; + } + + const queryOptions: any = { + where, + take: limit, + orderBy: { id: 'asc' }, + }; + + if (cursor) { + queryOptions.cursor = { id: cursor }; + queryOptions.skip = 1; // Skip the cursor record + } + + return this.prisma.auditLog.findMany(queryOptions); + } + + // ========================================================================== + // Export Statistics + // ========================================================================== + + /** + * Get export statistics for a tenant + */ + async getExportStats(tenantId: string, days: number = 30): Promise<{ + totalRecords: number; + recordsByEventType: Record; + recordsByDay: Array<{ date: string; count: number }>; + estimatedExportSize: { csv: number; json: number }; + }> { + const startDate = new Date(); + startDate.setDate(startDate.getDate() - days); + + const [total, byEventType, byDay] = await Promise.all([ + this.prisma.auditLog.count({ + where: { tenantId, timestamp: { gte: startDate } }, + }), + this.prisma.auditLog.groupBy({ + by: ['eventType'], + where: { tenantId, timestamp: { gte: startDate } }, + _count: true, + }), + this.prisma.$queryRaw` + SELECT DATE(timestamp) as date, COUNT(*)::int as count + FROM workflow_orchestrator.audit_logs + WHERE tenant_id = ${tenantId} AND timestamp >= ${startDate} + GROUP BY DATE(timestamp) + ORDER BY date DESC + ` as Promise>, + ]); + + // Estimate export sizes (rough estimates) + const avgRecordSizeCSV = 300; // bytes + const avgRecordSizeJSON = 500; // bytes + + return { + totalRecords: total, + recordsByEventType: byEventType.reduce( + (acc, item) => ({ ...acc, [item.eventType]: item._count }), + {}, + ), + recordsByDay: byDay.map(d => ({ + date: d.date.toISOString().split('T')[0], + count: d.count, + })), + estimatedExportSize: { + csv: total * avgRecordSizeCSV, + json: total * avgRecordSizeJSON, + }, + }; + } + + // ========================================================================== + // Retention Management + // ========================================================================== + + /** + * Apply retention policy to audit logs + */ + async applyRetentionPolicy(tenantId: string, retentionDays: number): Promise { + const cutoffDate = new Date(); + cutoffDate.setDate(cutoffDate.getDate() - retentionDays); + + const result = await this.prisma.auditLog.deleteMany({ + where: { + tenantId, + timestamp: { lt: cutoffDate }, + }, + }); + + if (result.count > 0) { + this.logger.log(`Deleted ${result.count} audit logs for tenant ${tenantId} (retention: ${retentionDays} days)`); + this.eventEmitter.emit('audit.retention.applied', { + tenantId, + deletedCount: result.count, + retentionDays, + }); + } + + return result.count; + } + + /** + * Archive old audit logs before deletion + */ + async archiveBeforeDelete( + tenantId: string, + retentionDays: number, + archiveFormat: ExportFormat = ExportFormat.NDJSON, + ): Promise<{ archived: number; archiveData: string }> { + const cutoffDate = new Date(); + cutoffDate.setDate(cutoffDate.getDate() - retentionDays); + + // Export records that will be deleted + const exportResult = await this.exportToString({ + tenantId, + format: archiveFormat, + endDate: cutoffDate, + }); + + // Delete the records + await this.applyRetentionPolicy(tenantId, retentionDays); + + return { + archived: exportResult.count, + archiveData: exportResult.data, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/audit.service.ts b/packages/bytebot-workflow-orchestrator/src/services/audit.service.ts new file mode 100644 index 000000000..25c3645fb --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/audit.service.ts @@ -0,0 +1,636 @@ +/** + * Audit Logging Service + * Post-M5 Enhancement: Compliance-ready audit trail for approval actions + * + * Best Practices Applied: + * - Immutable audit records (no update/delete) + * - Capture: WHO, WHAT, WHEN, WHERE, WHY + * - Structured JSON format for searchability + * - Retention policy support + * - SOC2/GDPR compliance considerations + * + * Audit Event Types: + * - APPROVAL_REQUESTED: High-risk action awaiting approval + * - APPROVAL_VIEWED: Approver viewed the request + * - APPROVAL_APPROVED: Action was approved + * - APPROVAL_REJECTED: Action was rejected + * - APPROVAL_EXPIRED: Request expired without decision + * - APPROVAL_EXECUTED: Approved action was executed + * - USER_PROMPT_CREATED: External input request created + * - USER_PROMPT_RESOLVED: External input request resolved + * - USER_PROMPT_CANCELLED: External input request cancelled/superseded + * - USER_PROMPT_EXPIRED: External input request expired + * - WEBHOOK_SENT: Notification webhook delivered + * - WEBHOOK_FAILED: Notification webhook failed + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; + +/** + * Audit event types for approval workflow + */ +export enum AuditEventType { + // Approval lifecycle + APPROVAL_REQUESTED = 'APPROVAL_REQUESTED', + APPROVAL_VIEWED = 'APPROVAL_VIEWED', + APPROVAL_APPROVED = 'APPROVAL_APPROVED', + APPROVAL_REJECTED = 'APPROVAL_REJECTED', + APPROVAL_EXPIRED = 'APPROVAL_EXPIRED', + APPROVAL_EXECUTED = 'APPROVAL_EXECUTED', + + // Webhook events + WEBHOOK_SENT = 'WEBHOOK_SENT', + WEBHOOK_FAILED = 'WEBHOOK_FAILED', + + // External input requests (EIR / prompts) + USER_PROMPT_CREATED = 'USER_PROMPT_CREATED', + USER_PROMPT_RESOLVED = 'USER_PROMPT_RESOLVED', + USER_PROMPT_CANCELLED = 'USER_PROMPT_CANCELLED', + USER_PROMPT_EXPIRED = 'USER_PROMPT_EXPIRED', + + // Administrative events + WEBHOOK_CREATED = 'WEBHOOK_CREATED', + WEBHOOK_UPDATED = 'WEBHOOK_UPDATED', + WEBHOOK_DELETED = 'WEBHOOK_DELETED', + WEBHOOK_SECRET_ROTATED = 'WEBHOOK_SECRET_ROTATED', +} + +/** + * Audit log entry - immutable record + */ +export interface AuditLogEntry { + id: string; + timestamp: Date; + eventType: AuditEventType; + + // WHO - Actor information + actor: { + type: 'user' | 'system' | 'agent'; + id?: string; + email?: string; + name?: string; + ipAddress?: string; + userAgent?: string; + }; + + // WHAT - Resource being acted upon + resource: { + type: 'approval' | 'webhook' | 'workflow' | 'node' | 'prompt' | 'goal_spec' | 'desktop_lease'; + id: string; + name?: string; + }; + + // WHERE - Context + context: { + tenantId: string; + workspaceId?: string; + workflowRunId?: string; + nodeRunId?: string; + requestId?: string; + }; + + // WHY - Action details + action: { + type: string; + reason?: string; + previousState?: string; + newState?: string; + }; + + // Additional metadata + metadata?: Record; +} + +/** + * Input for creating audit log entry + */ +export interface CreateAuditLogInput { + eventType: AuditEventType; + actor: AuditLogEntry['actor']; + resource: AuditLogEntry['resource']; + context: AuditLogEntry['context']; + action: AuditLogEntry['action']; + metadata?: Record; +} + +/** + * Query options for audit logs + */ +export interface AuditLogQuery { + tenantId: string; + eventTypes?: AuditEventType[]; + resourceType?: string; + resourceId?: string; + actorId?: string; + startDate?: Date; + endDate?: Date; + limit?: number; + offset?: number; +} + +@Injectable() +export class AuditService { + private readonly logger = new Logger(AuditService.name); + private readonly retentionDays: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + ) { + this.retentionDays = parseInt( + this.configService.get('AUDIT_LOG_RETENTION_DAYS', '365'), + 10, + ); + + this.logger.log(`AuditService initialized (retention: ${this.retentionDays} days)`); + } + + /** + * Create an immutable audit log entry + */ + async log(input: CreateAuditLogInput): Promise { + const entry = await this.prisma.auditLog.create({ + data: { + eventType: input.eventType, + actorType: input.actor.type, + actorId: input.actor.id, + actorEmail: input.actor.email, + actorName: input.actor.name, + actorIpAddress: input.actor.ipAddress, + actorUserAgent: input.actor.userAgent, + resourceType: input.resource.type, + resourceId: input.resource.id, + resourceName: input.resource.name, + tenantId: input.context.tenantId, + workspaceId: input.context.workspaceId, + workflowRunId: input.context.workflowRunId, + nodeRunId: input.context.nodeRunId, + requestId: input.context.requestId, + actionType: input.action.type, + actionReason: input.action.reason, + previousState: input.action.previousState, + newState: input.action.newState, + metadata: input.metadata, + expiresAt: new Date(Date.now() + this.retentionDays * 24 * 60 * 60 * 1000), + }, + }); + + this.logger.debug( + `Audit log created: ${input.eventType} on ${input.resource.type}:${input.resource.id}`, + ); + + return this.mapToEntry(entry); + } + + /** + * Log approval request created + */ + async logApprovalRequested( + approval: { + id: string; + nodeRunId: string; + toolName: string; + previewData?: any; + }, + context: { + tenantId: string; + workspaceId?: string; + workflowRunId?: string; + }, + ): Promise { + await this.log({ + eventType: AuditEventType.APPROVAL_REQUESTED, + actor: { type: 'agent', id: 'bytebot-agent' }, + resource: { type: 'approval', id: approval.id, name: approval.toolName }, + context: { + tenantId: context.tenantId, + workspaceId: context.workspaceId, + workflowRunId: context.workflowRunId, + nodeRunId: approval.nodeRunId, + }, + action: { + type: 'create', + newState: 'PENDING', + }, + metadata: { + toolName: approval.toolName, + riskLevel: approval.previewData?.riskLevel, + summary: approval.previewData?.summary, + recipient: approval.previewData?.recipient, + }, + }); + } + + /** + * Log approval decision (approved or rejected) + */ + async logApprovalDecision( + approval: { + id: string; + nodeRunId: string; + toolName: string; + status: string; + reason?: string; + }, + reviewer: { + id: string; + email?: string; + name?: string; + ipAddress?: string; + userAgent?: string; + }, + context: { + tenantId: string; + workspaceId?: string; + workflowRunId?: string; + }, + approved: boolean, + ): Promise { + await this.log({ + eventType: approved + ? AuditEventType.APPROVAL_APPROVED + : AuditEventType.APPROVAL_REJECTED, + actor: { + type: 'user', + id: reviewer.id, + email: reviewer.email, + name: reviewer.name, + ipAddress: reviewer.ipAddress, + userAgent: reviewer.userAgent, + }, + resource: { type: 'approval', id: approval.id, name: approval.toolName }, + context: { + tenantId: context.tenantId, + workspaceId: context.workspaceId, + workflowRunId: context.workflowRunId, + nodeRunId: approval.nodeRunId, + }, + action: { + type: approved ? 'approve' : 'reject', + reason: approval.reason, + previousState: 'PENDING', + newState: approval.status, + }, + }); + } + + /** + * Log approval expiration + */ + async logApprovalExpired( + approval: { + id: string; + nodeRunId: string; + toolName: string; + }, + context: { + tenantId: string; + workspaceId?: string; + workflowRunId?: string; + }, + ): Promise { + await this.log({ + eventType: AuditEventType.APPROVAL_EXPIRED, + actor: { type: 'system', id: 'cleanup-service' }, + resource: { type: 'approval', id: approval.id, name: approval.toolName }, + context: { + tenantId: context.tenantId, + workspaceId: context.workspaceId, + workflowRunId: context.workflowRunId, + nodeRunId: approval.nodeRunId, + }, + action: { + type: 'expire', + previousState: 'PENDING', + newState: 'EXPIRED', + }, + }); + } + + /** + * Log approved action execution + */ + async logApprovalExecuted( + approval: { + id: string; + nodeRunId: string; + toolName: string; + }, + context: { + tenantId: string; + workspaceId?: string; + workflowRunId?: string; + }, + executionResult: { + success: boolean; + error?: string; + }, + ): Promise { + await this.log({ + eventType: AuditEventType.APPROVAL_EXECUTED, + actor: { type: 'agent', id: 'bytebot-agent' }, + resource: { type: 'approval', id: approval.id, name: approval.toolName }, + context: { + tenantId: context.tenantId, + workspaceId: context.workspaceId, + workflowRunId: context.workflowRunId, + nodeRunId: approval.nodeRunId, + }, + action: { + type: 'execute', + previousState: 'APPROVED', + newState: executionResult.success ? 'EXECUTED' : 'FAILED', + }, + metadata: { + success: executionResult.success, + error: executionResult.error, + }, + }); + } + + /** + * Log webhook delivery + */ + async logWebhookDelivery( + webhook: { id: string; url: string }, + event: { id: string; type: string }, + result: { success: boolean; error?: string; statusCode?: number }, + context: { tenantId: string; approvalId?: string }, + ): Promise { + await this.log({ + eventType: result.success + ? AuditEventType.WEBHOOK_SENT + : AuditEventType.WEBHOOK_FAILED, + actor: { type: 'system', id: 'webhook-service' }, + resource: { type: 'webhook', id: webhook.id }, + context: { + tenantId: context.tenantId, + requestId: event.id, + }, + action: { + type: 'deliver', + newState: result.success ? 'delivered' : 'failed', + }, + metadata: { + eventType: event.type, + webhookUrl: this.maskUrl(webhook.url), + statusCode: result.statusCode, + error: result.error, + approvalId: context.approvalId, + }, + }); + } + + /** + * Query audit logs with filtering + */ + async query(options: AuditLogQuery): Promise<{ + logs: AuditLogEntry[]; + total: number; + }> { + const where: any = { + tenantId: options.tenantId, + }; + + if (options.eventTypes?.length) { + where.eventType = { in: options.eventTypes }; + } + + if (options.resourceType) { + where.resourceType = options.resourceType; + } + + if (options.resourceId) { + where.resourceId = options.resourceId; + } + + if (options.actorId) { + where.actorId = options.actorId; + } + + if (options.startDate || options.endDate) { + where.timestamp = {}; + if (options.startDate) { + where.timestamp.gte = options.startDate; + } + if (options.endDate) { + where.timestamp.lte = options.endDate; + } + } + + const [logs, total] = await Promise.all([ + this.prisma.auditLog.findMany({ + where, + orderBy: { timestamp: 'desc' }, + take: options.limit || 50, + skip: options.offset || 0, + }), + this.prisma.auditLog.count({ where }), + ]); + + return { + logs: logs.map((l) => this.mapToEntry(l)), + total, + }; + } + + /** + * Get audit trail for a specific approval + */ + async getApprovalAuditTrail( + approvalId: string, + tenantId: string, + ): Promise { + const logs = await this.prisma.auditLog.findMany({ + where: { + tenantId, + resourceType: 'approval', + resourceId: approvalId, + }, + orderBy: { timestamp: 'asc' }, + }); + + return logs.map((l) => this.mapToEntry(l)); + } + + /** + * Export audit logs for compliance reporting + * Returns CSV-formatted data + */ + async exportLogs( + options: AuditLogQuery & { format?: 'json' | 'csv' }, + ): Promise { + const { logs } = await this.query({ ...options, limit: 10000 }); + + if (options.format === 'csv') { + return this.toCSV(logs); + } + + return JSON.stringify(logs, null, 2); + } + + /** + * Cleanup expired audit logs + * Called by scheduled cleanup service + */ + async cleanupExpired(): Promise { + const result = await this.prisma.auditLog.deleteMany({ + where: { + expiresAt: { lt: new Date() }, + }, + }); + + if (result.count > 0) { + this.logger.log(`Cleaned up ${result.count} expired audit logs`); + } + + return result.count; + } + + /** + * Get audit log statistics + */ + async getStats( + tenantId: string, + startDate?: Date, + endDate?: Date, + ): Promise<{ + totalEvents: number; + byEventType: Record; + byResourceType: Record; + }> { + const where: any = { tenantId }; + + if (startDate || endDate) { + where.timestamp = {}; + if (startDate) where.timestamp.gte = startDate; + if (endDate) where.timestamp.lte = endDate; + } + + const [total, byEventType, byResourceType] = await Promise.all([ + this.prisma.auditLog.count({ where }), + this.prisma.auditLog.groupBy({ + by: ['eventType'], + where, + _count: true, + }), + this.prisma.auditLog.groupBy({ + by: ['resourceType'], + where, + _count: true, + }), + ]); + + return { + totalEvents: total, + byEventType: byEventType.reduce( + (acc, item) => ({ ...acc, [item.eventType]: item._count }), + {}, + ), + byResourceType: byResourceType.reduce( + (acc, item) => ({ ...acc, [item.resourceType]: item._count }), + {}, + ), + }; + } + + /** + * Map database record to audit log entry + */ + private mapToEntry(record: any): AuditLogEntry { + return { + id: record.id, + timestamp: record.timestamp, + eventType: record.eventType as AuditEventType, + actor: { + type: record.actorType, + id: record.actorId, + email: record.actorEmail, + name: record.actorName, + ipAddress: record.actorIpAddress, + userAgent: record.actorUserAgent, + }, + resource: { + type: record.resourceType, + id: record.resourceId, + name: record.resourceName, + }, + context: { + tenantId: record.tenantId, + workspaceId: record.workspaceId, + workflowRunId: record.workflowRunId, + nodeRunId: record.nodeRunId, + requestId: record.requestId, + }, + action: { + type: record.actionType, + reason: record.actionReason, + previousState: record.previousState, + newState: record.newState, + }, + metadata: record.metadata, + }; + } + + /** + * Convert logs to CSV format + */ + private toCSV(logs: AuditLogEntry[]): string { + const headers = [ + 'timestamp', + 'eventType', + 'actorType', + 'actorId', + 'actorEmail', + 'resourceType', + 'resourceId', + 'tenantId', + 'actionType', + 'actionReason', + 'previousState', + 'newState', + ]; + + const rows = logs.map((log) => [ + log.timestamp.toISOString(), + log.eventType, + log.actor.type, + log.actor.id || '', + log.actor.email || '', + log.resource.type, + log.resource.id, + log.context.tenantId, + log.action.type, + log.action.reason || '', + log.action.previousState || '', + log.action.newState || '', + ]); + + const csvContent = [ + headers.join(','), + ...rows.map((row) => row.map((cell) => `"${String(cell).replace(/"/g, '""')}"`).join(',')), + ].join('\n'); + + return csvContent; + } + + /** + * Mask URL for security (hide credentials) + */ + private maskUrl(url: string): string { + try { + const parsed = new URL(url); + if (parsed.password) { + parsed.password = '***'; + } + if (parsed.username) { + parsed.username = '***'; + } + return parsed.toString(); + } catch { + return url.replace(/:[^@]*@/, ':***@'); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/background-mode.service.ts b/packages/bytebot-workflow-orchestrator/src/services/background-mode.service.ts new file mode 100644 index 000000000..101ced5bd --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/background-mode.service.ts @@ -0,0 +1,634 @@ +/** + * Background Mode Service + * v1.0.0: OpenAI-Style Background Mode for Long-Running Operations + * + * Implements industry-standard patterns for asynchronous task execution: + * - OpenAI: Assistants API with async runs and polling + * - Anthropic: Claude batch API pattern + * - AWS: Step Functions async execution with callbacks + * + * Key Features: + * 1. Async task submission with immediate response + * 2. Progress polling and streaming updates + * 3. Webhook callbacks on completion + * 4. Timeout handling and graceful degradation + * 5. Task queuing with priority support + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { SchedulerRegistry } from '@nestjs/schedule'; + +// Task states following OpenAI Assistants API pattern +export type BackgroundTaskStatus = + | 'queued' // Task waiting to start + | 'in_progress' // Task actively running + | 'completed' // Task finished successfully + | 'failed' // Task failed with error + | 'cancelled' // Task was cancelled + | 'expired'; // Task timed out + +export interface BackgroundTask { + id: string; + goalRunId: string; + type: 'goal_execution' | 'batch_processing' | 'analysis' | 'export'; + status: BackgroundTaskStatus; + createdAt: Date; + startedAt?: Date; + completedAt?: Date; + expiresAt: Date; + progress: { + current: number; + total: number; + message: string; + }; + result?: { + success: boolean; + data?: any; + error?: string; + }; + metadata: { + priority: 'low' | 'normal' | 'high'; + callbackUrl?: string; + webhookSecret?: string; + estimatedDurationMs?: number; + }; +} + +export interface TaskSubmissionResult { + taskId: string; + status: BackgroundTaskStatus; + estimatedCompletionTime?: Date; + pollingUrl: string; + webhookConfigured: boolean; +} + +export interface TaskProgressUpdate { + taskId: string; + status: BackgroundTaskStatus; + progress: { + current: number; + total: number; + message: string; + percentComplete: number; + }; + estimatedTimeRemainingMs?: number; + result?: any; +} + +@Injectable() +export class BackgroundModeService { + private readonly logger = new Logger(BackgroundModeService.name); + private readonly enabled: boolean; + + // In-memory task storage (production would use Redis/database) + private tasks: Map = new Map(); + + // Task queue by priority + private taskQueue: { + high: string[]; + normal: string[]; + low: string[]; + } = { high: [], normal: [], low: [] }; + + // Configuration + private readonly maxConcurrentTasks: number; + private readonly defaultTimeoutMs: number; + private readonly maxTimeoutMs: number; + private readonly progressPollIntervalMs: number; + private readonly baseUrl: string; + + // Active task count + private activeTaskCount: number = 0; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly schedulerRegistry: SchedulerRegistry, + ) { + this.enabled = this.configService.get('BACKGROUND_MODE_ENABLED', 'true') === 'true'; + this.maxConcurrentTasks = parseInt(this.configService.get('MAX_CONCURRENT_BACKGROUND_TASKS', '10'), 10); + this.defaultTimeoutMs = parseInt(this.configService.get('BACKGROUND_TASK_TIMEOUT_MS', '3600000'), 10); // 1 hour + this.maxTimeoutMs = parseInt(this.configService.get('MAX_BACKGROUND_TASK_TIMEOUT_MS', '86400000'), 10); // 24 hours + this.progressPollIntervalMs = parseInt(this.configService.get('PROGRESS_POLL_INTERVAL_MS', '5000'), 10); + this.baseUrl = this.configService.get('API_BASE_URL', 'http://localhost:8080'); + + this.logger.log( + `Background mode ${this.enabled ? 'enabled' : 'disabled'} ` + + `(max concurrent: ${this.maxConcurrentTasks}, timeout: ${this.defaultTimeoutMs}ms)` + ); + + // Start the task processor + if (this.enabled) { + this.startTaskProcessor(); + this.startExpirationChecker(); + } + } + + /** + * Submit a task for background execution + */ + async submitTask( + goalRunId: string, + type: BackgroundTask['type'], + options: { + priority?: 'low' | 'normal' | 'high'; + timeoutMs?: number; + callbackUrl?: string; + webhookSecret?: string; + estimatedDurationMs?: number; + } = {}, + ): Promise { + if (!this.enabled) { + throw new Error('Background mode is disabled'); + } + + const taskId = this.generateTaskId(); + const now = new Date(); + const timeoutMs = Math.min(options.timeoutMs || this.defaultTimeoutMs, this.maxTimeoutMs); + const priority = options.priority || 'normal'; + + const task: BackgroundTask = { + id: taskId, + goalRunId, + type, + status: 'queued', + createdAt: now, + expiresAt: new Date(now.getTime() + timeoutMs), + progress: { + current: 0, + total: 100, + message: 'Task queued', + }, + metadata: { + priority, + callbackUrl: options.callbackUrl, + webhookSecret: options.webhookSecret, + estimatedDurationMs: options.estimatedDurationMs, + }, + }; + + // Store task + this.tasks.set(taskId, task); + + // Add to queue + this.taskQueue[priority].push(taskId); + + this.logger.log(`Task ${taskId} submitted for goal ${goalRunId} (priority: ${priority})`); + + // Emit event + this.eventEmitter.emit('background.task.submitted', { + taskId, + goalRunId, + type, + priority, + }); + + // Calculate estimated completion time + const queuePosition = this.calculateQueuePosition(taskId); + const estimatedWaitMs = queuePosition * (options.estimatedDurationMs || 60000); + const estimatedCompletionTime = new Date(now.getTime() + estimatedWaitMs); + + return { + taskId, + status: 'queued', + estimatedCompletionTime, + pollingUrl: `${this.baseUrl}/api/v1/background-tasks/${taskId}`, + webhookConfigured: !!options.callbackUrl, + }; + } + + /** + * Get task status and progress + */ + getTaskStatus(taskId: string): TaskProgressUpdate | null { + const task = this.tasks.get(taskId); + if (!task) { + return null; + } + + const percentComplete = Math.round((task.progress.current / task.progress.total) * 100); + + // Estimate remaining time based on progress + let estimatedTimeRemainingMs: number | undefined; + if (task.status === 'in_progress' && task.startedAt && task.progress.current > 0) { + const elapsedMs = Date.now() - task.startedAt.getTime(); + const progressPerMs = task.progress.current / elapsedMs; + const remaining = task.progress.total - task.progress.current; + estimatedTimeRemainingMs = Math.round(remaining / progressPerMs); + } + + return { + taskId, + status: task.status, + progress: { + ...task.progress, + percentComplete, + }, + estimatedTimeRemainingMs, + result: task.result, + }; + } + + /** + * Update task progress (called by executing service) + */ + updateProgress( + taskId: string, + current: number, + total: number, + message: string, + ): void { + const task = this.tasks.get(taskId); + if (!task) { + this.logger.warn(`Cannot update progress for unknown task ${taskId}`); + return; + } + + task.progress = { current, total, message }; + + // Emit progress event for streaming updates + this.eventEmitter.emit('background.task.progress', { + taskId, + goalRunId: task.goalRunId, + current, + total, + message, + percentComplete: Math.round((current / total) * 100), + }); + } + + /** + * Mark task as completed + */ + async completeTask(taskId: string, result: any): Promise { + const task = this.tasks.get(taskId); + if (!task) { + this.logger.warn(`Cannot complete unknown task ${taskId}`); + return; + } + + task.status = 'completed'; + task.completedAt = new Date(); + task.result = { success: true, data: result }; + task.progress = { + current: task.progress.total, + total: task.progress.total, + message: 'Task completed successfully', + }; + + this.activeTaskCount--; + + this.logger.log(`Task ${taskId} completed successfully`); + + // Emit completion event + this.eventEmitter.emit('background.task.completed', { + taskId, + goalRunId: task.goalRunId, + result, + }); + + // Send webhook callback if configured + if (task.metadata.callbackUrl) { + await this.sendWebhookCallback(task, 'completed'); + } + } + + /** + * Mark task as failed + */ + async failTask(taskId: string, error: string): Promise { + const task = this.tasks.get(taskId); + if (!task) { + this.logger.warn(`Cannot fail unknown task ${taskId}`); + return; + } + + task.status = 'failed'; + task.completedAt = new Date(); + task.result = { success: false, error }; + task.progress.message = `Failed: ${error}`; + + this.activeTaskCount--; + + this.logger.error(`Task ${taskId} failed: ${error}`); + + // Emit failure event + this.eventEmitter.emit('background.task.failed', { + taskId, + goalRunId: task.goalRunId, + error, + }); + + // Send webhook callback if configured + if (task.metadata.callbackUrl) { + await this.sendWebhookCallback(task, 'failed'); + } + } + + /** + * Cancel a queued or running task + */ + async cancelTask(taskId: string): Promise { + const task = this.tasks.get(taskId); + if (!task) { + return false; + } + + if (task.status === 'completed' || task.status === 'failed' || task.status === 'cancelled') { + return false; // Already terminal state + } + + task.status = 'cancelled'; + task.completedAt = new Date(); + task.result = { success: false, error: 'Task cancelled by user' }; + + // Remove from queue if queued + if (task.metadata.priority) { + const queue = this.taskQueue[task.metadata.priority]; + const index = queue.indexOf(taskId); + if (index !== -1) { + queue.splice(index, 1); + } + } + + if (task.startedAt) { + this.activeTaskCount--; + } + + this.logger.log(`Task ${taskId} cancelled`); + + // Emit cancellation event + this.eventEmitter.emit('background.task.cancelled', { + taskId, + goalRunId: task.goalRunId, + }); + + // Send webhook callback if configured + if (task.metadata.callbackUrl) { + await this.sendWebhookCallback(task, 'cancelled'); + } + + return true; + } + + /** + * Get all tasks for a goal + */ + getTasksForGoal(goalRunId: string): BackgroundTask[] { + return Array.from(this.tasks.values()) + .filter(t => t.goalRunId === goalRunId) + .sort((a, b) => b.createdAt.getTime() - a.createdAt.getTime()); + } + + /** + * Get queue statistics + */ + getQueueStats(): { + queuedTasks: number; + activeTasks: number; + completedTasks: number; + failedTasks: number; + queueByPriority: { high: number; normal: number; low: number }; + } { + const stats = { + queuedTasks: 0, + activeTasks: 0, + completedTasks: 0, + failedTasks: 0, + queueByPriority: { + high: this.taskQueue.high.length, + normal: this.taskQueue.normal.length, + low: this.taskQueue.low.length, + }, + }; + + for (const task of this.tasks.values()) { + switch (task.status) { + case 'queued': + stats.queuedTasks++; + break; + case 'in_progress': + stats.activeTasks++; + break; + case 'completed': + stats.completedTasks++; + break; + case 'failed': + case 'expired': + case 'cancelled': + stats.failedTasks++; + break; + } + } + + return stats; + } + + /** + * Start the task processor interval + */ + private startTaskProcessor(): void { + const callback = async () => { + await this.processQueue(); + }; + + const interval = setInterval(callback, 1000); // Check every second + this.schedulerRegistry.addInterval('background-task-processor', interval); + this.logger.debug('Background task processor started'); + } + + /** + * Start the expiration checker interval + */ + private startExpirationChecker(): void { + const callback = async () => { + await this.checkExpirations(); + }; + + const interval = setInterval(callback, 30000); // Check every 30 seconds + this.schedulerRegistry.addInterval('background-expiration-checker', interval); + this.logger.debug('Background expiration checker started'); + } + + /** + * Process task queue - start next task if capacity available + */ + private async processQueue(): Promise { + if (this.activeTaskCount >= this.maxConcurrentTasks) { + return; // At capacity + } + + // Get next task from queue (priority order: high > normal > low) + let nextTaskId: string | undefined; + + if (this.taskQueue.high.length > 0) { + nextTaskId = this.taskQueue.high.shift(); + } else if (this.taskQueue.normal.length > 0) { + nextTaskId = this.taskQueue.normal.shift(); + } else if (this.taskQueue.low.length > 0) { + nextTaskId = this.taskQueue.low.shift(); + } + + if (!nextTaskId) { + return; // No tasks in queue + } + + const task = this.tasks.get(nextTaskId); + if (!task || task.status !== 'queued') { + return; + } + + // Start the task + task.status = 'in_progress'; + task.startedAt = new Date(); + task.progress.message = 'Task started'; + this.activeTaskCount++; + + this.logger.log(`Starting background task ${nextTaskId}`); + + // Emit start event - the actual execution is handled by event listeners + this.eventEmitter.emit('background.task.started', { + taskId: nextTaskId, + goalRunId: task.goalRunId, + type: task.type, + }); + } + + /** + * Check for expired tasks + */ + private async checkExpirations(): Promise { + const now = new Date(); + + for (const task of this.tasks.values()) { + if ( + (task.status === 'queued' || task.status === 'in_progress') && + task.expiresAt < now + ) { + // Decrement active count before changing status + const wasInProgress = task.status === 'in_progress'; + + task.status = 'expired'; + task.completedAt = now; + task.result = { success: false, error: 'Task exceeded timeout' }; + + if (wasInProgress) { + this.activeTaskCount--; + } + + this.logger.warn(`Task ${task.id} expired`); + + // Emit expiration event + this.eventEmitter.emit('background.task.expired', { + taskId: task.id, + goalRunId: task.goalRunId, + }); + + // Send webhook callback if configured + if (task.metadata.callbackUrl) { + await this.sendWebhookCallback(task, 'expired'); + } + } + } + + // Clean up old completed tasks (older than 1 hour) + const cleanupThreshold = new Date(now.getTime() - 3600000); + for (const [taskId, task] of this.tasks.entries()) { + if ( + task.completedAt && + task.completedAt < cleanupThreshold && + ['completed', 'failed', 'cancelled', 'expired'].includes(task.status) + ) { + this.tasks.delete(taskId); + this.logger.debug(`Cleaned up old task ${taskId}`); + } + } + } + + /** + * Send webhook callback + */ + private async sendWebhookCallback( + task: BackgroundTask, + event: 'completed' | 'failed' | 'cancelled' | 'expired', + ): Promise { + if (!task.metadata.callbackUrl) { + return; + } + + try { + const payload = { + event: `task.${event}`, + taskId: task.id, + goalRunId: task.goalRunId, + status: task.status, + result: task.result, + timestamp: new Date().toISOString(), + }; + + const headers: Record = { + 'Content-Type': 'application/json', + }; + + // Add HMAC signature if secret configured + if (task.metadata.webhookSecret) { + const crypto = require('crypto'); + const signature = crypto + .createHmac('sha256', task.metadata.webhookSecret) + .update(JSON.stringify(payload)) + .digest('hex'); + headers['X-Webhook-Signature'] = `sha256=${signature}`; + } + + await fetch(task.metadata.callbackUrl, { + method: 'POST', + headers, + body: JSON.stringify(payload), + }); + + this.logger.debug(`Webhook callback sent for task ${task.id}`); + } catch (error) { + this.logger.warn(`Webhook callback failed for task ${task.id}: ${(error as Error).message}`); + } + } + + /** + * Generate unique task ID + */ + private generateTaskId(): string { + return `bg-${Date.now()}-${Math.random().toString(36).substring(2, 10)}`; + } + + /** + * Calculate queue position for a task + */ + private calculateQueuePosition(taskId: string): number { + let position = 0; + + // Count all higher priority tasks ahead + position += this.taskQueue.high.indexOf(taskId) === -1 + ? this.taskQueue.high.length + : this.taskQueue.high.indexOf(taskId); + + const normalIndex = this.taskQueue.normal.indexOf(taskId); + if (normalIndex !== -1) { + position += normalIndex; + } else { + position += this.taskQueue.normal.length; + } + + const lowIndex = this.taskQueue.low.indexOf(taskId); + if (lowIndex !== -1) { + position += lowIndex; + } + + // Add currently active tasks + position += this.activeTaskCount; + + return position; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/batch.service.ts b/packages/bytebot-workflow-orchestrator/src/services/batch.service.ts new file mode 100644 index 000000000..0bc0dcb1e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/batch.service.ts @@ -0,0 +1,812 @@ +/** + * Batch Service + * Phase 7: Enhanced Features + * + * Responsibilities: + * - Create and manage batch goal runs + * - Execute multiple goals in parallel or sequence + * - Track batch progress and completion + * - Handle batch-level error policies (stop on failure, etc.) + * - Rate limit batch execution to respect system limits + */ + +import { + Injectable, + Logger, + NotFoundException, + BadRequestException, +} from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { GoalRunService, GoalConstraints, GoalRunStatus } from './goal-run.service'; +import { GoalTemplateService } from './goal-template.service'; +import { createId } from '@paralleldrive/cuid2'; +import { Prisma } from '@prisma/client'; + +// Input types +export interface CreateBatchInput { + tenantId: string; + name: string; + description?: string; + executionMode?: 'PARALLEL' | 'SEQUENTIAL'; + maxConcurrency?: number; + stopOnFailure?: boolean; + goals: BatchGoalInput[]; +} + +export interface BatchGoalInput { + goal: string; + constraints?: GoalConstraints; + templateId?: string; + variableValues?: Record; +} + +export interface BatchFilters { + status?: string; + page?: number; + pageSize?: number; +} + +// Status constants +export type BatchStatus = + | 'PENDING' + | 'RUNNING' + | 'COMPLETED' + | 'PARTIALLY_COMPLETED' + | 'FAILED' + | 'CANCELLED'; + +export type BatchItemStatus = + | 'PENDING' + | 'QUEUED' + | 'RUNNING' + | 'COMPLETED' + | 'FAILED' + | 'CANCELLED' + | 'SKIPPED'; + +// Response types +export interface BatchResponse { + id: string; + tenantId: string; + name: string; + description?: string | null; + executionMode: string; + maxConcurrency: number; + stopOnFailure: boolean; + status: BatchStatus; + totalGoals: number; + completedGoals: number; + failedGoals: number; + cancelledGoals: number; + error?: string | null; + createdAt: Date; + updatedAt: Date; + startedAt?: Date | null; + completedAt?: Date | null; + progress: number; // 0-100 +} + +export interface BatchItemResponse { + id: string; + batchId: string; + goal: string; + constraints: GoalConstraints; + templateId?: string | null; + variableValues: Record; + order: number; + status: BatchItemStatus; + goalRunId?: string | null; + error?: string | null; + createdAt: Date; + updatedAt: Date; + startedAt?: Date | null; + completedAt?: Date | null; +} + +export interface BatchWithItemsResponse extends BatchResponse { + items: BatchItemResponse[]; +} + +export interface PaginatedResponse { + data: T[]; + total: number; + page: number; + pageSize: number; + hasMore: boolean; +} + +@Injectable() +export class BatchService { + private readonly logger = new Logger(BatchService.name); + private runningBatches: Map = new Map(); // Track running batch loops + + constructor( + private prisma: PrismaService, + private goalRunService: GoalRunService, + private goalTemplateService: GoalTemplateService, + private eventEmitter: EventEmitter2, + ) {} + + /** + * Create a new batch + */ + async create(input: CreateBatchInput): Promise { + const batchId = `batch-${createId()}`; + + this.logger.log(`Creating batch ${batchId}: "${input.name}" with ${input.goals.length} goals`); + + if (!input.goals || input.goals.length === 0) { + throw new BadRequestException('Batch must contain at least one goal'); + } + + if (input.goals.length > 100) { + throw new BadRequestException('Batch cannot contain more than 100 goals'); + } + + // Create batch with items in a transaction + const batch = await this.prisma.$transaction(async (tx) => { + // Create batch + const newBatch = await tx.goalRunBatch.create({ + data: { + id: batchId, + tenantId: input.tenantId, + name: input.name, + description: input.description, + executionMode: input.executionMode || 'PARALLEL', + maxConcurrency: input.maxConcurrency || 5, + stopOnFailure: input.stopOnFailure || false, + status: 'PENDING', + totalGoals: input.goals.length, + }, + }); + + // Create batch items + const itemsData = input.goals.map((goal, index) => ({ + id: `bi-${createId()}`, + batchId, + goal: goal.goal, + constraints: (goal.constraints || {}) as object, + templateId: goal.templateId, + variableValues: (goal.variableValues || {}) as object, + order: index + 1, + status: 'PENDING', + })); + + await tx.goalRunBatchItem.createMany({ + data: itemsData, + }); + + return newBatch; + }); + + // Fetch items + const items = await this.prisma.goalRunBatchItem.findMany({ + where: { batchId }, + orderBy: { order: 'asc' }, + }); + + this.eventEmitter.emit('batch.created', { + batchId, + tenantId: input.tenantId, + goalCount: input.goals.length, + }); + + return { + ...this.toBatchResponse(batch), + items: items.map(this.toItemResponse), + }; + } + + /** + * Get batch by ID + */ + async findById(batchId: string): Promise { + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (!batch) { + throw new NotFoundException(`Batch ${batchId} not found`); + } + + return this.toBatchResponse(batch); + } + + /** + * Get batch with items + */ + async findByIdWithItems(batchId: string): Promise { + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + include: { + items: { + orderBy: { order: 'asc' }, + }, + }, + }); + + if (!batch) { + throw new NotFoundException(`Batch ${batchId} not found`); + } + + return { + ...this.toBatchResponse(batch), + items: batch.items.map(this.toItemResponse), + }; + } + + /** + * List batches for a tenant + */ + async findByTenant( + tenantId: string, + filters?: BatchFilters, + ): Promise> { + const page = filters?.page || 1; + const pageSize = filters?.pageSize || 20; + const skip = (page - 1) * pageSize; + + const where: Prisma.GoalRunBatchWhereInput = { + tenantId, + ...(filters?.status && { status: filters.status }), + }; + + const [batches, total] = await Promise.all([ + this.prisma.goalRunBatch.findMany({ + where, + orderBy: { createdAt: 'desc' }, + skip, + take: pageSize, + }), + this.prisma.goalRunBatch.count({ where }), + ]); + + return { + data: batches.map(this.toBatchResponse), + total, + page, + pageSize, + hasMore: skip + batches.length < total, + }; + } + + /** + * Start batch execution + */ + async start(batchId: string): Promise { + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (!batch) { + throw new NotFoundException(`Batch ${batchId} not found`); + } + + if (batch.status !== 'PENDING') { + throw new BadRequestException(`Batch is already ${batch.status.toLowerCase()}`); + } + + this.logger.log(`Starting batch ${batchId}`); + + const updatedBatch = await this.prisma.goalRunBatch.update({ + where: { id: batchId }, + data: { + status: 'RUNNING', + startedAt: new Date(), + }, + }); + + // Mark all items as queued + await this.prisma.goalRunBatchItem.updateMany({ + where: { batchId }, + data: { status: 'QUEUED' }, + }); + + // Start the execution loop + this.executeBatch(batchId); + + this.eventEmitter.emit('batch.started', { batchId }); + + return this.toBatchResponse(updatedBatch); + } + + /** + * Cancel batch execution + */ + async cancel(batchId: string, reason?: string): Promise { + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + include: { items: true }, + }); + + if (!batch) { + throw new NotFoundException(`Batch ${batchId} not found`); + } + + if (batch.status === 'COMPLETED' || batch.status === 'CANCELLED') { + throw new BadRequestException(`Batch is already ${batch.status.toLowerCase()}`); + } + + this.logger.log(`Cancelling batch ${batchId}: ${reason}`); + + // Stop the execution loop + this.runningBatches.set(batchId, false); + + // Cancel running goal runs + for (const item of batch.items) { + if (item.goalRunId && item.status === 'RUNNING') { + try { + await this.goalRunService.cancelGoalRun(item.goalRunId, 'Batch cancelled'); + } catch (error: any) { + this.logger.warn(`Failed to cancel goal run ${item.goalRunId}: ${error.message}`); + } + } + } + + // Update pending/queued items to cancelled + await this.prisma.goalRunBatchItem.updateMany({ + where: { + batchId, + status: { in: ['PENDING', 'QUEUED', 'RUNNING'] }, + }, + data: { + status: 'CANCELLED', + completedAt: new Date(), + }, + }); + + // Update batch + const cancelledCount = batch.items.filter( + (i) => i.status === 'PENDING' || i.status === 'QUEUED' || i.status === 'RUNNING', + ).length; + + const updatedBatch = await this.prisma.goalRunBatch.update({ + where: { id: batchId }, + data: { + status: 'CANCELLED', + error: reason || 'Cancelled by user', + cancelledGoals: { increment: cancelledCount }, + completedAt: new Date(), + }, + }); + + this.eventEmitter.emit('batch.cancelled', { batchId, reason }); + + return this.toBatchResponse(updatedBatch); + } + + /** + * Get batch item details + */ + async getItem(batchId: string, itemId: string): Promise { + const item = await this.prisma.goalRunBatchItem.findFirst({ + where: { id: itemId, batchId }, + }); + + if (!item) { + throw new NotFoundException(`Batch item ${itemId} not found`); + } + + return this.toItemResponse(item); + } + + /** + * Retry failed items in a batch + */ + async retryFailed(batchId: string): Promise { + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (!batch) { + throw new NotFoundException(`Batch ${batchId} not found`); + } + + if (batch.status !== 'PARTIALLY_COMPLETED' && batch.status !== 'FAILED') { + throw new BadRequestException('Can only retry failed or partially completed batches'); + } + + this.logger.log(`Retrying failed items in batch ${batchId}`); + + // Reset failed items + const result = await this.prisma.goalRunBatchItem.updateMany({ + where: { + batchId, + status: 'FAILED', + }, + data: { + status: 'QUEUED', + error: null, + goalRunId: null, + startedAt: null, + completedAt: null, + }, + }); + + // Update batch status + const updatedBatch = await this.prisma.goalRunBatch.update({ + where: { id: batchId }, + data: { + status: 'RUNNING', + failedGoals: { decrement: result.count }, + error: null, + completedAt: null, + }, + }); + + // Restart execution + this.executeBatch(batchId); + + this.eventEmitter.emit('batch.retrying', { batchId, itemCount: result.count }); + + return this.toBatchResponse(updatedBatch); + } + + /** + * Execute batch (internal loop) + */ + private async executeBatch(batchId: string): Promise { + this.runningBatches.set(batchId, true); + + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (!batch) { + this.runningBatches.delete(batchId); + return; + } + + const isParallel = batch.executionMode === 'PARALLEL'; + const maxConcurrency = isParallel ? batch.maxConcurrency : 1; + + try { + while (this.runningBatches.get(batchId)) { + // Get queued items + const queuedItems = await this.prisma.goalRunBatchItem.findMany({ + where: { + batchId, + status: 'QUEUED', + }, + orderBy: { order: 'asc' }, + take: maxConcurrency, + }); + + if (queuedItems.length === 0) { + // Check if there are still running items + const runningCount = await this.prisma.goalRunBatchItem.count({ + where: { batchId, status: 'RUNNING' }, + }); + + if (runningCount === 0) { + // Batch is complete + await this.completeBatch(batchId); + break; + } + + // Wait for running items to complete + await this.sleep(1000); + continue; + } + + // Get currently running count + const currentRunning = await this.prisma.goalRunBatchItem.count({ + where: { batchId, status: 'RUNNING' }, + }); + + const canStart = Math.min( + queuedItems.length, + maxConcurrency - currentRunning, + ); + + if (canStart <= 0) { + // At max concurrency, wait + await this.sleep(1000); + continue; + } + + // Start items + const itemsToStart = queuedItems.slice(0, canStart); + + if (isParallel) { + // Start all in parallel + await Promise.all( + itemsToStart.map((item) => this.executeItem(batchId, item)), + ); + } else { + // Start one at a time (sequential) + for (const item of itemsToStart) { + if (!this.runningBatches.get(batchId)) break; + await this.executeItem(batchId, item); + await this.waitForItemCompletion(item.id); + } + } + + // Rate limit: wait between batches + await this.sleep(500); + } + } catch (error: any) { + this.logger.error(`Batch ${batchId} execution error: ${error.message}`); + await this.prisma.goalRunBatch.update({ + where: { id: batchId }, + data: { + status: 'FAILED', + error: error.message, + completedAt: new Date(), + }, + }); + } finally { + this.runningBatches.delete(batchId); + } + } + + /** + * Execute a single batch item + */ + private async executeItem(batchId: string, item: any): Promise { + this.logger.log(`Executing batch item ${item.id}`); + + try { + // Update item status + await this.prisma.goalRunBatchItem.update({ + where: { id: item.id }, + data: { + status: 'RUNNING', + startedAt: new Date(), + }, + }); + + // Get batch for tenant ID + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (!batch) return; + + // Create goal run + let goalRun; + + if (item.templateId) { + // Create from template + goalRun = await this.goalTemplateService.createGoalRunFromTemplate({ + tenantId: batch.tenantId, + templateId: item.templateId, + variableValues: item.variableValues as Record, + constraintOverrides: item.constraints as any, + autoStart: true, + }); + } else { + // Create directly + goalRun = await this.goalRunService.createFromGoal({ + tenantId: batch.tenantId, + goal: item.goal, + constraints: item.constraints as any, + autoStart: true, + }); + } + + // Link goal run to item + await this.prisma.goalRunBatchItem.update({ + where: { id: item.id }, + data: { goalRunId: goalRun.id }, + }); + } catch (error: any) { + this.logger.error(`Batch item ${item.id} failed to start: ${error.message}`); + await this.handleItemFailure(batchId, item.id, error.message); + } + } + + /** + * Wait for item completion + */ + private async waitForItemCompletion(itemId: string): Promise { + const maxWaitMs = 30 * 60 * 1000; // 30 minutes max + const startTime = Date.now(); + + while (Date.now() - startTime < maxWaitMs) { + const item = await this.prisma.goalRunBatchItem.findUnique({ + where: { id: itemId }, + }); + + if (!item || item.status !== 'RUNNING') { + return; + } + + await this.sleep(2000); + } + + // Timeout - mark as failed + await this.prisma.goalRunBatchItem.update({ + where: { id: itemId }, + data: { + status: 'FAILED', + error: 'Execution timeout', + completedAt: new Date(), + }, + }); + } + + /** + * Handle item failure + */ + private async handleItemFailure( + batchId: string, + itemId: string, + error: string, + ): Promise { + await this.prisma.goalRunBatchItem.update({ + where: { id: itemId }, + data: { + status: 'FAILED', + error, + completedAt: new Date(), + }, + }); + + // Update batch counters + await this.prisma.goalRunBatch.update({ + where: { id: batchId }, + data: { failedGoals: { increment: 1 } }, + }); + + // Check if should stop on failure + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (batch?.stopOnFailure) { + this.logger.log(`Batch ${batchId} stopping due to failure`); + await this.cancel(batchId, 'Stopped due to item failure'); + } + + this.eventEmitter.emit('batch.item-failed', { batchId, itemId, error }); + } + + /** + * Complete the batch + */ + private async completeBatch(batchId: string): Promise { + const batch = await this.prisma.goalRunBatch.findUnique({ + where: { id: batchId }, + }); + + if (!batch) return; + + let status: BatchStatus; + if (batch.failedGoals === 0 && batch.cancelledGoals === 0) { + status = 'COMPLETED'; + } else if (batch.completedGoals > 0) { + status = 'PARTIALLY_COMPLETED'; + } else { + status = 'FAILED'; + } + + await this.prisma.goalRunBatch.update({ + where: { id: batchId }, + data: { + status, + completedAt: new Date(), + }, + }); + + this.eventEmitter.emit('batch.completed', { batchId, status }); + } + + /** + * Handle goal run completion events + */ + @OnEvent('goal-run.completed') + async handleGoalRunCompleted(payload: { goalRunId: string }): Promise { + const item = await this.prisma.goalRunBatchItem.findFirst({ + where: { goalRunId: payload.goalRunId }, + }); + + if (!item) return; + + await this.prisma.goalRunBatchItem.update({ + where: { id: item.id }, + data: { + status: 'COMPLETED', + completedAt: new Date(), + }, + }); + + await this.prisma.goalRunBatch.update({ + where: { id: item.batchId }, + data: { completedGoals: { increment: 1 } }, + }); + } + + /** + * Handle goal run failure events + */ + @OnEvent('goal-run.failed') + async handleGoalRunFailed(payload: { goalRunId: string; error: string }): Promise { + const item = await this.prisma.goalRunBatchItem.findFirst({ + where: { goalRunId: payload.goalRunId }, + }); + + if (!item) return; + + await this.handleItemFailure(item.batchId, item.id, payload.error); + } + + /** + * Handle goal run cancellation events + */ + @OnEvent('goal-run.cancelled') + async handleGoalRunCancelled(payload: { goalRunId: string }): Promise { + const item = await this.prisma.goalRunBatchItem.findFirst({ + where: { goalRunId: payload.goalRunId }, + }); + + if (!item) return; + + await this.prisma.goalRunBatchItem.update({ + where: { id: item.id }, + data: { + status: 'CANCELLED', + completedAt: new Date(), + }, + }); + + await this.prisma.goalRunBatch.update({ + where: { id: item.batchId }, + data: { cancelledGoals: { increment: 1 } }, + }); + } + + // Helper methods + + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } + + private toBatchResponse(batch: any): BatchResponse { + const total = batch.totalGoals || 0; + const completed = batch.completedGoals + batch.failedGoals + batch.cancelledGoals; + const progress = total > 0 ? Math.round((completed / total) * 100) : 0; + + return { + id: batch.id, + tenantId: batch.tenantId, + name: batch.name, + description: batch.description, + executionMode: batch.executionMode, + maxConcurrency: batch.maxConcurrency, + stopOnFailure: batch.stopOnFailure, + status: batch.status as BatchStatus, + totalGoals: batch.totalGoals, + completedGoals: batch.completedGoals, + failedGoals: batch.failedGoals, + cancelledGoals: batch.cancelledGoals, + error: batch.error, + createdAt: batch.createdAt, + updatedAt: batch.updatedAt, + startedAt: batch.startedAt, + completedAt: batch.completedAt, + progress, + }; + } + + private toItemResponse(item: any): BatchItemResponse { + return { + id: item.id, + batchId: item.batchId, + goal: item.goal, + constraints: item.constraints as GoalConstraints, + templateId: item.templateId, + variableValues: item.variableValues as Record, + order: item.order, + status: item.status as BatchItemStatus, + goalRunId: item.goalRunId, + error: item.error, + createdAt: item.createdAt, + updatedAt: item.updatedAt, + startedAt: item.startedAt, + completedAt: item.completedAt, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/checkpoint-persistence.service.ts b/packages/bytebot-workflow-orchestrator/src/services/checkpoint-persistence.service.ts new file mode 100644 index 000000000..58378b27b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/checkpoint-persistence.service.ts @@ -0,0 +1,537 @@ +/** + * Checkpoint Persistence Service + * v1.0.0: Cross-Session Checkpoint Persistence for Agent Recovery + * + * Implements industry-standard patterns for durable checkpoint storage: + * - LangGraph: Thread-level persistence with full state serialization + * - Manus: External file-based checkpoint (todo.md pattern) + * - OpenAI: Thread state persistence across API calls + * + * Key Features: + * 1. Database-backed checkpoint storage (survives server restarts) + * 2. Automatic checkpoint versioning and history + * 3. Point-in-time recovery for failed runs + * 4. Checkpoint compression for long-running goals + * 5. Cross-session context restoration + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS.md + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { GoalCheckpoint } from './goal-checkpoint.service'; +import { KnowledgeGraph } from './knowledge-extraction.service'; + +// Persisted checkpoint structure (extends GoalCheckpoint with persistence metadata) +export interface PersistedCheckpoint { + id: string; + goalRunId: string; + version: number; + checkpoint: GoalCheckpoint; + knowledgeGraph?: KnowledgeGraph; + contextSummary?: string; + createdAt: Date; + expiresAt: Date; + sizeBytes: number; + compressed: boolean; +} + +// Recovery result +export interface CheckpointRecoveryResult { + success: boolean; + checkpoint?: GoalCheckpoint; + knowledgeGraph?: KnowledgeGraph; + contextSummary?: string; + recoveredFromVersion: number; + message: string; +} + +// Checkpoint stats +export interface CheckpointStats { + totalCheckpoints: number; + totalSizeBytes: number; + oldestCheckpoint?: Date; + newestCheckpoint?: Date; + checkpointsByGoal: Record; +} + +@Injectable() +export class CheckpointPersistenceService implements OnModuleInit { + private readonly logger = new Logger(CheckpointPersistenceService.name); + private readonly enabled: boolean; + + // Configuration + private readonly retentionDays: number; + private readonly maxVersionsPerGoal: number; + private readonly compressionThreshold: number; // bytes + private readonly cleanupIntervalMs: number; + + // In-memory cache for fast reads + private checkpointCache: Map = new Map(); + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly prisma: PrismaService, + ) { + this.enabled = this.configService.get('CHECKPOINT_PERSISTENCE_ENABLED', 'true') === 'true'; + this.retentionDays = parseInt(this.configService.get('CHECKPOINT_RETENTION_DAYS', '30'), 10); + this.maxVersionsPerGoal = parseInt(this.configService.get('MAX_CHECKPOINT_VERSIONS', '10'), 10); + this.compressionThreshold = parseInt(this.configService.get('CHECKPOINT_COMPRESSION_THRESHOLD', '10000'), 10); + this.cleanupIntervalMs = parseInt(this.configService.get('CHECKPOINT_CLEANUP_INTERVAL_MS', '3600000'), 10); // 1 hour + + this.logger.log( + `Checkpoint persistence ${this.enabled ? 'enabled' : 'disabled'} ` + + `(retention: ${this.retentionDays} days, max versions: ${this.maxVersionsPerGoal})` + ); + } + + async onModuleInit(): Promise { + if (!this.enabled) return; + + // Start periodic cleanup + setInterval(() => this.cleanupExpiredCheckpoints(), this.cleanupIntervalMs); + + // Load recent checkpoints into cache + await this.warmCache(); + + this.logger.log('Checkpoint persistence service initialized'); + } + + /** + * Persist a checkpoint to database + */ + async persistCheckpoint( + goalRunId: string, + checkpoint: GoalCheckpoint, + knowledgeGraph?: KnowledgeGraph, + contextSummary?: string, + ): Promise { + if (!this.enabled) { + throw new Error('Checkpoint persistence is disabled'); + } + + const checkpointJson = JSON.stringify(checkpoint); + const knowledgeJson = knowledgeGraph ? JSON.stringify(knowledgeGraph) : null; + const totalSize = checkpointJson.length + (knowledgeJson?.length || 0) + (contextSummary?.length || 0); + + // Determine if compression needed + const shouldCompress = totalSize > this.compressionThreshold; + + // Prepare data (in production, would actually compress) + const checkpointData = shouldCompress + ? await this.compressData(checkpointJson) + : checkpointJson; + + const knowledgeData = shouldCompress && knowledgeJson + ? await this.compressData(knowledgeJson) + : knowledgeJson; + + // Get current version number + const currentVersion = await this.getLatestVersion(goalRunId); + const newVersion = currentVersion + 1; + + // Calculate expiration + const expiresAt = new Date(Date.now() + this.retentionDays * 24 * 60 * 60 * 1000); + + // Persist to database (using goal_run constraints JSON field) + try { + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + constraints: { + checkpoint: { + version: newVersion, + data: checkpointData, + knowledge: knowledgeData, + contextSummary, + compressed: shouldCompress, + persistedAt: new Date().toISOString(), + expiresAt: expiresAt.toISOString(), + }, + }, + }, + }); + } catch (error) { + this.logger.warn(`Failed to persist checkpoint to database: ${(error as Error).message}`); + // Continue with in-memory storage + } + + const persisted: PersistedCheckpoint = { + id: `cp-${goalRunId}-v${newVersion}`, + goalRunId, + version: newVersion, + checkpoint, + knowledgeGraph, + contextSummary, + createdAt: new Date(), + expiresAt, + sizeBytes: totalSize, + compressed: shouldCompress, + }; + + // Update cache + this.checkpointCache.set(goalRunId, persisted); + + // Clean up old versions + await this.cleanupOldVersions(goalRunId); + + this.logger.debug( + `Persisted checkpoint v${newVersion} for goal ${goalRunId} ` + + `(${totalSize} bytes, compressed: ${shouldCompress})` + ); + + // Emit event + this.eventEmitter.emit('checkpoint.persisted', { + goalRunId, + version: newVersion, + sizeBytes: totalSize, + }); + + return persisted; + } + + /** + * Recover checkpoint from persistence + */ + async recoverCheckpoint(goalRunId: string, version?: number): Promise { + if (!this.enabled) { + return { + success: false, + recoveredFromVersion: 0, + message: 'Checkpoint persistence is disabled', + }; + } + + // Try cache first + const cached = this.checkpointCache.get(goalRunId); + if (cached && (!version || cached.version === version)) { + this.logger.debug(`Recovered checkpoint v${cached.version} for goal ${goalRunId} from cache`); + return { + success: true, + checkpoint: cached.checkpoint, + knowledgeGraph: cached.knowledgeGraph, + contextSummary: cached.contextSummary, + recoveredFromVersion: cached.version, + message: 'Recovered from cache', + }; + } + + // Load from database + try { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { constraints: true }, + }); + + if (!goalRun?.constraints) { + return { + success: false, + recoveredFromVersion: 0, + message: 'No checkpoint found for goal', + }; + } + + const constraints = goalRun.constraints as any; + const checkpointData = constraints.checkpoint; + + if (!checkpointData) { + return { + success: false, + recoveredFromVersion: 0, + message: 'No checkpoint data in constraints', + }; + } + + // Decompress if needed + let checkpointJson = checkpointData.data; + let knowledgeJson = checkpointData.knowledge; + + if (checkpointData.compressed) { + checkpointJson = await this.decompressData(checkpointJson); + if (knowledgeJson) { + knowledgeJson = await this.decompressData(knowledgeJson); + } + } + + const checkpoint = JSON.parse(checkpointJson) as GoalCheckpoint; + const knowledgeGraph = knowledgeJson ? JSON.parse(knowledgeJson) as KnowledgeGraph : undefined; + + // Update cache + const persisted: PersistedCheckpoint = { + id: `cp-${goalRunId}-v${checkpointData.version}`, + goalRunId, + version: checkpointData.version, + checkpoint, + knowledgeGraph, + contextSummary: checkpointData.contextSummary, + createdAt: new Date(checkpointData.persistedAt), + expiresAt: new Date(checkpointData.expiresAt), + sizeBytes: checkpointJson.length + (knowledgeJson?.length || 0), + compressed: checkpointData.compressed, + }; + + this.checkpointCache.set(goalRunId, persisted); + + this.logger.log(`Recovered checkpoint v${checkpointData.version} for goal ${goalRunId} from database`); + + // Emit event + this.eventEmitter.emit('checkpoint.recovered', { + goalRunId, + version: checkpointData.version, + }); + + return { + success: true, + checkpoint, + knowledgeGraph, + contextSummary: checkpointData.contextSummary, + recoveredFromVersion: checkpointData.version, + message: 'Recovered from database', + }; + } catch (error) { + this.logger.error(`Failed to recover checkpoint: ${(error as Error).message}`); + return { + success: false, + recoveredFromVersion: 0, + message: `Recovery failed: ${(error as Error).message}`, + }; + } + } + + /** + * Check if checkpoint exists for a goal + */ + async hasCheckpoint(goalRunId: string): Promise { + if (this.checkpointCache.has(goalRunId)) { + return true; + } + + try { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { constraints: true }, + }); + + const constraints = goalRun?.constraints as any; + return !!constraints?.checkpoint; + } catch { + return false; + } + } + + /** + * Delete checkpoint for a goal + */ + async deleteCheckpoint(goalRunId: string): Promise { + // Remove from cache + this.checkpointCache.delete(goalRunId); + + // Remove from database + try { + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + constraints: { + checkpoint: null, + }, + }, + }); + + this.logger.debug(`Deleted checkpoint for goal ${goalRunId}`); + return true; + } catch (error) { + this.logger.warn(`Failed to delete checkpoint: ${(error as Error).message}`); + return false; + } + } + + /** + * Get checkpoint statistics + */ + async getStats(): Promise { + const stats: CheckpointStats = { + totalCheckpoints: 0, + totalSizeBytes: 0, + checkpointsByGoal: {}, + }; + + // From cache + for (const [goalRunId, cp] of this.checkpointCache.entries()) { + stats.totalCheckpoints++; + stats.totalSizeBytes += cp.sizeBytes; + stats.checkpointsByGoal[goalRunId] = cp.version; + + if (!stats.oldestCheckpoint || cp.createdAt < stats.oldestCheckpoint) { + stats.oldestCheckpoint = cp.createdAt; + } + if (!stats.newestCheckpoint || cp.createdAt > stats.newestCheckpoint) { + stats.newestCheckpoint = cp.createdAt; + } + } + + return stats; + } + + /** + * Event handler: Persist checkpoint when updated + */ + @OnEvent('checkpoint.updated') + async handleCheckpointUpdated(payload: { + goalRunId: string; + checkpoint: GoalCheckpoint; + knowledgeGraph?: KnowledgeGraph; + contextSummary?: string; + }): Promise { + if (!this.enabled) return; + + try { + await this.persistCheckpoint( + payload.goalRunId, + payload.checkpoint, + payload.knowledgeGraph, + payload.contextSummary, + ); + } catch (error) { + this.logger.error(`Failed to persist checkpoint on update: ${(error as Error).message}`); + } + } + + /** + * Event handler: Clean up checkpoint when goal completes + */ + @OnEvent('goal.completed') + async handleGoalCompleted(payload: { goalRunId: string }): Promise { + // Keep checkpoint for completed goals (for analysis), but mark for cleanup + this.logger.debug(`Goal ${payload.goalRunId} completed, checkpoint retained`); + } + + /** + * Get latest checkpoint version for a goal + */ + private async getLatestVersion(goalRunId: string): Promise { + const cached = this.checkpointCache.get(goalRunId); + if (cached) { + return cached.version; + } + + try { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { constraints: true }, + }); + + const constraints = goalRun?.constraints as any; + return constraints?.checkpoint?.version || 0; + } catch { + return 0; + } + } + + /** + * Clean up old checkpoint versions for a goal + */ + private async cleanupOldVersions(goalRunId: string): Promise { + // For now, we only keep the latest version per goal + // In production, could maintain version history + this.logger.debug(`Checkpoint version cleanup not needed (single version per goal)`); + } + + /** + * Clean up expired checkpoints + */ + private async cleanupExpiredCheckpoints(): Promise { + const now = new Date(); + let cleaned = 0; + + for (const [goalRunId, cp] of this.checkpointCache.entries()) { + if (cp.expiresAt < now) { + this.checkpointCache.delete(goalRunId); + cleaned++; + } + } + + if (cleaned > 0) { + this.logger.log(`Cleaned up ${cleaned} expired checkpoints`); + } + } + + /** + * Warm the cache with recent checkpoints + */ + private async warmCache(): Promise { + try { + // Load recent goal runs with checkpoints + const recentGoals = await this.prisma.goalRun.findMany({ + where: { + status: { in: ['RUNNING', 'PENDING'] }, + NOT: { + constraints: { equals: {} }, + }, + }, + select: { + id: true, + constraints: true, + }, + take: 100, + }); + + let loaded = 0; + for (const goal of recentGoals) { + const constraints = goal.constraints as any; + if (constraints?.checkpoint) { + // Create cache entry without full deserialization + const checkpointData = constraints.checkpoint; + try { + const checkpoint = JSON.parse( + checkpointData.compressed + ? await this.decompressData(checkpointData.data) + : checkpointData.data + ); + + this.checkpointCache.set(goal.id, { + id: `cp-${goal.id}-v${checkpointData.version}`, + goalRunId: goal.id, + version: checkpointData.version, + checkpoint, + contextSummary: checkpointData.contextSummary, + createdAt: new Date(checkpointData.persistedAt), + expiresAt: new Date(checkpointData.expiresAt), + sizeBytes: checkpointData.data.length, + compressed: checkpointData.compressed, + }); + loaded++; + } catch (error) { + this.logger.warn(`Failed to warm cache for goal ${goal.id}: ${(error as Error).message}`); + } + } + } + + this.logger.log(`Warmed checkpoint cache with ${loaded} entries`); + } catch (error) { + this.logger.warn(`Failed to warm checkpoint cache: ${(error as Error).message}`); + } + } + + /** + * Compress data (placeholder - in production use zlib/gzip) + */ + private async compressData(data: string): Promise { + // In production, use actual compression + // For now, just return the data with a marker + return `__compressed__${data}`; + } + + /** + * Decompress data (placeholder - in production use zlib/gzip) + */ + private async decompressData(data: string): Promise { + // In production, use actual decompression + if (data.startsWith('__compressed__')) { + return data.substring(14); + } + return data; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/circuit-breaker.service.ts b/packages/bytebot-workflow-orchestrator/src/services/circuit-breaker.service.ts new file mode 100644 index 000000000..7551b6724 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/circuit-breaker.service.ts @@ -0,0 +1,881 @@ +/** + * Circuit Breaker Service + * v1.0.0: Phase 9 Self-Healing & Auto-Recovery + * v2.0.0: Agent Routing Enhancements (v5.10.0) + * + * Implements resilience patterns using Cockatiel library: + * - Circuit breaker: Prevents cascading failures + * - Retry with exponential backoff: Handles transient failures + * - Timeout: Prevents hanging calls + * - Bulkhead: Isolates failures per service + * - Agent routing: Health-aware agent selection with automatic failover + * + * Each external service gets its own policy chain for isolation. + * v2.0.0 adds agent routing with health scoring and automatic failover. + */ + +import { Injectable, Logger, OnModuleInit, OnModuleDestroy } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { + CircuitBreakerPolicy, + ConsecutiveBreaker, + ExponentialBackoff, + handleAll, + retry, + timeout, + wrap, + circuitBreaker, + bulkhead, + BulkheadPolicy, + TimeoutPolicy, + RetryPolicy, + IPolicy, + CircuitState, + TimeoutStrategy, +} from 'cockatiel'; + +// Circuit breaker states +export enum CircuitBreakerStateEnum { + CLOSED = 'CLOSED', + OPEN = 'OPEN', + HALF_OPEN = 'HALF_OPEN', +} + +// Service configuration +export interface ServiceConfig { + name: string; + // Circuit breaker settings + failureThreshold: number; // Failures before opening + successThreshold: number; // Successes in half-open to close + resetTimeoutMs: number; // Time before trying half-open + // Retry settings + maxRetries: number; + initialDelayMs: number; + maxDelayMs: number; + // Timeout settings + timeoutMs: number; + // Bulkhead settings + maxConcurrent: number; + maxQueue: number; +} + +// Default configurations for known services +const DEFAULT_CONFIGS: Record> = { + 'task-controller': { + failureThreshold: 5, + successThreshold: 3, + resetTimeoutMs: 30000, + maxRetries: 3, + initialDelayMs: 1000, + maxDelayMs: 30000, + timeoutMs: 60000, + maxConcurrent: 10, + maxQueue: 50, + }, + 'agent': { + failureThreshold: 3, + successThreshold: 2, + resetTimeoutMs: 15000, + maxRetries: 2, + initialDelayMs: 500, + maxDelayMs: 10000, + timeoutMs: 30000, + maxConcurrent: 5, + maxQueue: 20, + }, + 'default': { + failureThreshold: 5, + successThreshold: 3, + resetTimeoutMs: 30000, + maxRetries: 3, + initialDelayMs: 1000, + maxDelayMs: 30000, + timeoutMs: 60000, + maxConcurrent: 10, + maxQueue: 50, + }, +}; + +// Combined policy for a service +interface ServicePolicy { + config: ServiceConfig; + circuitBreaker: CircuitBreakerPolicy; + retry: RetryPolicy; + timeout: TimeoutPolicy; + bulkhead: BulkheadPolicy; + combined: IPolicy; + stats: { + totalRequests: number; + totalFailures: number; + totalSuccesses: number; + totalTimeouts: number; + }; +} + +@Injectable() +export class CircuitBreakerService implements OnModuleInit, OnModuleDestroy { + private readonly logger = new Logger(CircuitBreakerService.name); + private readonly policies: Map = new Map(); + private persistInterval: NodeJS.Timeout | null = null; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) {} + + async onModuleInit() { + this.logger.log('Initializing Circuit Breaker Service'); + + // Load persisted circuit breaker states + await this.loadPersistedStates(); + + // Start periodic persistence (every 30 seconds) + this.persistInterval = setInterval( + () => this.persistAllStates(), + 30000, + ); + + this.logger.log('Circuit Breaker Service initialized'); + } + + async onModuleDestroy() { + // Clear persistence interval + if (this.persistInterval) { + clearInterval(this.persistInterval); + } + + // Persist final states + await this.persistAllStates(); + + this.logger.log('Circuit Breaker Service destroyed'); + } + + /** + * Get or create a policy chain for a service + */ + getPolicy(serviceName: string): ServicePolicy { + let policy = this.policies.get(serviceName); + if (!policy) { + policy = this.createPolicy(serviceName); + this.policies.set(serviceName, policy); + } + return policy; + } + + /** + * Execute a function with circuit breaker protection + */ + async execute( + serviceName: string, + fn: () => Promise, + context?: Record, + ): Promise { + const policy = this.getPolicy(serviceName); + policy.stats.totalRequests++; + + try { + const result = await policy.combined.execute(fn); + policy.stats.totalSuccesses++; + return result; + } catch (error) { + policy.stats.totalFailures++; + + // Check if it was a timeout + if (error.name === 'TaskCancelledError') { + policy.stats.totalTimeouts++; + } + + // Emit event for monitoring + this.eventEmitter.emit('circuit-breaker.failure', { + serviceName, + error: error.message, + state: this.getCircuitState(serviceName), + context, + }); + + throw error; + } + } + + /** + * Execute with fallback on failure + */ + async executeWithFallback( + serviceName: string, + fn: () => Promise, + fallback: (error: Error) => T | Promise, + context?: Record, + ): Promise { + try { + return await this.execute(serviceName, fn, context); + } catch (error) { + this.logger.warn( + `Circuit breaker fallback triggered for ${serviceName}: ${error.message}`, + ); + + this.eventEmitter.emit('circuit-breaker.fallback', { + serviceName, + error: error.message, + context, + }); + + return fallback(error); + } + } + + /** + * Get current circuit state for a service + */ + getCircuitState(serviceName: string): CircuitBreakerStateEnum { + const policy = this.policies.get(serviceName); + if (!policy) { + return CircuitBreakerStateEnum.CLOSED; + } + + const state = policy.circuitBreaker.state; + switch (state) { + case CircuitState.Open: + return CircuitBreakerStateEnum.OPEN; + case CircuitState.HalfOpen: + return CircuitBreakerStateEnum.HALF_OPEN; + default: + return CircuitBreakerStateEnum.CLOSED; + } + } + + /** + * Get statistics for a service + */ + getStats(serviceName: string): { + state: CircuitBreakerStateEnum; + stats: ServicePolicy['stats']; + config: ServiceConfig; + } | null { + const policy = this.policies.get(serviceName); + if (!policy) { + return null; + } + + return { + state: this.getCircuitState(serviceName), + stats: { ...policy.stats }, + config: policy.config, + }; + } + + /** + * Get all circuit breaker statuses + */ + getAllStats(): Array<{ + serviceName: string; + state: CircuitBreakerStateEnum; + stats: ServicePolicy['stats']; + }> { + const result: Array<{ + serviceName: string; + state: CircuitBreakerStateEnum; + stats: ServicePolicy['stats']; + }> = []; + + for (const [serviceName, policy] of this.policies) { + result.push({ + serviceName, + state: this.getCircuitState(serviceName), + stats: { ...policy.stats }, + }); + } + + return result; + } + + /** + * Manually reset a circuit breaker + */ + async resetCircuit(serviceName: string): Promise { + const policy = this.policies.get(serviceName); + if (policy) { + // Cockatiel doesn't have a direct reset method, so we recreate the policy + this.policies.delete(serviceName); + this.policies.set(serviceName, this.createPolicy(serviceName)); + + // Log recovery action + await this.logRecoveryAction( + serviceName, + 'CIRCUIT_RESET', + this.getCircuitState(serviceName).toString(), + CircuitBreakerStateEnum.CLOSED, + 'Manual circuit breaker reset', + ); + + this.logger.log(`Circuit breaker reset for ${serviceName}`); + } + } + + /** + * Check if circuit is open (failing) + */ + isCircuitOpen(serviceName: string): boolean { + return this.getCircuitState(serviceName) === CircuitBreakerStateEnum.OPEN; + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + /** + * Create a policy chain for a service + */ + private createPolicy(serviceName: string): ServicePolicy { + const config = this.getServiceConfig(serviceName); + + // Create circuit breaker + const cb = circuitBreaker(handleAll, { + halfOpenAfter: config.resetTimeoutMs, + breaker: new ConsecutiveBreaker(config.failureThreshold), + }); + + // Subscribe to circuit breaker events + cb.onStateChange((state) => { + const stateStr = this.circuitStateToString(state); + this.logger.log(`Circuit breaker state changed for ${serviceName}: ${stateStr}`); + + this.eventEmitter.emit('circuit-breaker.state-change', { + serviceName, + state: stateStr, + }); + + // Persist state change + this.persistState(serviceName, stateStr).catch((err) => { + this.logger.error(`Failed to persist circuit state: ${err.message}`); + }); + }); + + // Create retry policy with exponential backoff + const retryPolicy = retry(handleAll, { + maxAttempts: config.maxRetries, + backoff: new ExponentialBackoff({ + initialDelay: config.initialDelayMs, + maxDelay: config.maxDelayMs, + }), + }); + + // Create timeout policy (Aggressive = cancel immediately on timeout) + const timeoutPolicy = timeout(config.timeoutMs, TimeoutStrategy.Aggressive); + + // Create bulkhead for concurrency limiting + const bulkheadPolicy = bulkhead(config.maxConcurrent, config.maxQueue); + + // Combine policies: timeout -> bulkhead -> circuit breaker -> retry + // Order matters: timeout is outermost, retry is innermost + const combined = wrap(timeoutPolicy, bulkheadPolicy, cb, retryPolicy); + + return { + config, + circuitBreaker: cb, + retry: retryPolicy, + timeout: timeoutPolicy, + bulkhead: bulkheadPolicy, + combined, + stats: { + totalRequests: 0, + totalFailures: 0, + totalSuccesses: 0, + totalTimeouts: 0, + }, + }; + } + + /** + * Get configuration for a service + */ + private getServiceConfig(serviceName: string): ServiceConfig { + // Try to find a matching config prefix + let baseConfig = DEFAULT_CONFIGS['default']; + for (const [prefix, config] of Object.entries(DEFAULT_CONFIGS)) { + if (serviceName.startsWith(prefix)) { + baseConfig = config; + break; + } + } + + // Allow overrides from environment + const envPrefix = `CIRCUIT_BREAKER_${serviceName.toUpperCase().replace(/-/g, '_')}`; + + return { + name: serviceName, + failureThreshold: this.configService.get( + `${envPrefix}_FAILURE_THRESHOLD`, + baseConfig.failureThreshold!, + ), + successThreshold: this.configService.get( + `${envPrefix}_SUCCESS_THRESHOLD`, + baseConfig.successThreshold!, + ), + resetTimeoutMs: this.configService.get( + `${envPrefix}_RESET_TIMEOUT_MS`, + baseConfig.resetTimeoutMs!, + ), + maxRetries: this.configService.get( + `${envPrefix}_MAX_RETRIES`, + baseConfig.maxRetries!, + ), + initialDelayMs: this.configService.get( + `${envPrefix}_INITIAL_DELAY_MS`, + baseConfig.initialDelayMs!, + ), + maxDelayMs: this.configService.get( + `${envPrefix}_MAX_DELAY_MS`, + baseConfig.maxDelayMs!, + ), + timeoutMs: this.configService.get( + `${envPrefix}_TIMEOUT_MS`, + baseConfig.timeoutMs!, + ), + maxConcurrent: this.configService.get( + `${envPrefix}_MAX_CONCURRENT`, + baseConfig.maxConcurrent!, + ), + maxQueue: this.configService.get( + `${envPrefix}_MAX_QUEUE`, + baseConfig.maxQueue!, + ), + }; + } + + /** + * Convert Cockatiel circuit state to string + */ + private circuitStateToString(state: CircuitState): CircuitBreakerStateEnum { + switch (state) { + case CircuitState.Open: + return CircuitBreakerStateEnum.OPEN; + case CircuitState.HalfOpen: + return CircuitBreakerStateEnum.HALF_OPEN; + default: + return CircuitBreakerStateEnum.CLOSED; + } + } + + /** + * Load persisted circuit breaker states from database + */ + private async loadPersistedStates(): Promise { + try { + const states = await this.prisma.circuitBreakerState.findMany(); + + for (const state of states) { + // Initialize policy with persisted stats + const policy = this.getPolicy(state.serviceName); + policy.stats.totalRequests = state.totalRequests; + policy.stats.totalFailures = state.totalFailures; + policy.stats.totalSuccesses = state.totalSuccesses; + policy.stats.totalTimeouts = state.totalTimeouts; + + this.logger.debug( + `Loaded circuit breaker state for ${state.serviceName}: ${state.state}`, + ); + } + } catch (error) { + this.logger.warn(`Failed to load persisted circuit states: ${error.message}`); + } + } + + /** + * Persist circuit breaker state to database + */ + private async persistState( + serviceName: string, + state: CircuitBreakerStateEnum, + ): Promise { + const policy = this.policies.get(serviceName); + if (!policy) return; + + const now = new Date(); + + await this.prisma.circuitBreakerState.upsert({ + where: { serviceName }, + create: { + serviceName, + state, + failureCount: policy.stats.totalFailures, + successCount: policy.stats.totalSuccesses, + failureThreshold: policy.config.failureThreshold, + successThreshold: policy.config.successThreshold, + resetTimeoutMs: policy.config.resetTimeoutMs, + totalRequests: policy.stats.totalRequests, + totalFailures: policy.stats.totalFailures, + totalSuccesses: policy.stats.totalSuccesses, + totalTimeouts: policy.stats.totalTimeouts, + lastFailureAt: state === CircuitBreakerStateEnum.OPEN ? now : null, + openedAt: state === CircuitBreakerStateEnum.OPEN ? now : null, + halfOpenAt: state === CircuitBreakerStateEnum.HALF_OPEN ? now : null, + }, + update: { + state, + failureCount: policy.stats.totalFailures, + successCount: policy.stats.totalSuccesses, + totalRequests: policy.stats.totalRequests, + totalFailures: policy.stats.totalFailures, + totalSuccesses: policy.stats.totalSuccesses, + totalTimeouts: policy.stats.totalTimeouts, + lastFailureAt: + state === CircuitBreakerStateEnum.OPEN ? now : undefined, + lastSuccessAt: + state === CircuitBreakerStateEnum.CLOSED ? now : undefined, + openedAt: + state === CircuitBreakerStateEnum.OPEN ? now : undefined, + halfOpenAt: + state === CircuitBreakerStateEnum.HALF_OPEN ? now : undefined, + }, + }); + } + + /** + * Persist all circuit breaker states + */ + private async persistAllStates(): Promise { + for (const [serviceName] of this.policies) { + await this.persistState(serviceName, this.getCircuitState(serviceName)); + } + } + + /** + * Log a recovery action + */ + private async logRecoveryAction( + targetId: string, + actionType: string, + previousState: string, + newState: string, + reason: string, + ): Promise { + try { + await this.prisma.recoveryLog.create({ + data: { + actionType, + targetType: 'CIRCUIT_BREAKER', + targetId, + previousState, + newState, + reason, + actorType: 'SYSTEM', + success: true, + }, + }); + } catch (error) { + this.logger.error(`Failed to log recovery action: ${error.message}`); + } + } + + // ========================================================================= + // v2.0.0: Agent Routing with Health-Aware Selection + // ========================================================================= + + /** + * Agent health tracking for routing decisions + */ + private agentHealth: Map = new Map(); + + /** + * Select the healthiest available agent from a list + * Uses circuit breaker state + recent success rate for scoring + */ + selectHealthyAgent(agents: string[]): AgentSelectionResult { + if (agents.length === 0) { + return { selected: null, reason: 'No agents available' }; + } + + // Calculate health scores for all agents + const scored = agents.map(agentId => ({ + agentId, + score: this.calculateAgentHealthScore(agentId), + state: this.getCircuitState(agentId), + })); + + // Filter out open circuit breakers (completely unavailable) + const available = scored.filter(a => a.state !== CircuitBreakerStateEnum.OPEN); + + if (available.length === 0) { + // All agents have open circuits - use least recently failed + const leastRecentlyFailed = scored.sort((a, b) => + (this.agentHealth.get(a.agentId)?.lastFailureAt?.getTime() || 0) - + (this.agentHealth.get(b.agentId)?.lastFailureAt?.getTime() || 0) + )[0]; + + return { + selected: leastRecentlyFailed?.agentId || null, + reason: 'All agents have open circuits, selecting least recently failed', + allUnavailable: true, + }; + } + + // Sort by health score (higher is better) + available.sort((a, b) => b.score - a.score); + + const selected = available[0]; + + // Log selection decision for debugging + this.logger.debug( + `Agent selection: ${selected.agentId} (score: ${selected.score.toFixed(2)}, state: ${selected.state})`, + ); + + return { + selected: selected.agentId, + score: selected.score, + reason: `Selected healthiest agent with score ${selected.score.toFixed(2)}`, + alternatives: available.slice(1).map(a => a.agentId), + }; + } + + /** + * Execute with automatic agent failover + * Tries agents in order of health until one succeeds + */ + async executeWithAgentFailover( + agents: string[], + fn: (agentId: string) => Promise, + maxAttempts: number = 3, + ): Promise> { + const attemptedAgents: string[] = []; + let lastError: Error | null = null; + + // Sort agents by health + const sortedAgents = [...agents].sort((a, b) => + this.calculateAgentHealthScore(b) - this.calculateAgentHealthScore(a) + ); + + for (let attempt = 0; attempt < Math.min(maxAttempts, sortedAgents.length); attempt++) { + const agentId = sortedAgents[attempt]; + attemptedAgents.push(agentId); + + try { + // Execute with circuit breaker protection + const result = await this.execute(agentId, () => fn(agentId)); + + // Record success + this.recordAgentSuccess(agentId); + + return { + success: true, + result, + usedAgent: agentId, + attemptedAgents, + failoverCount: attempt, + }; + } catch (error) { + lastError = error as Error; + + // Record failure + this.recordAgentFailure(agentId, lastError.message); + + this.logger.warn( + `Agent ${agentId} failed (attempt ${attempt + 1}/${maxAttempts}): ${lastError.message}`, + ); + + // Emit failover event + this.eventEmitter.emit('agent.failover', { + fromAgent: agentId, + toAgent: sortedAgents[attempt + 1] || null, + error: lastError.message, + attempt: attempt + 1, + }); + } + } + + return { + success: false, + error: lastError?.message || 'All agents failed', + usedAgent: null, + attemptedAgents, + failoverCount: attemptedAgents.length - 1, + }; + } + + /** + * Record a successful agent interaction + */ + recordAgentSuccess(agentId: string): void { + const current = this.agentHealth.get(agentId) || this.createDefaultHealthScore(agentId); + + current.successCount++; + current.consecutiveFailures = 0; + current.lastSuccessAt = new Date(); + current.responseTimeMs.push(Date.now()); // Would track actual response time in production + + // Keep only last 100 response times + if (current.responseTimeMs.length > 100) { + current.responseTimeMs.shift(); + } + + this.agentHealth.set(agentId, current); + } + + /** + * Record a failed agent interaction + */ + recordAgentFailure(agentId: string, error: string): void { + const current = this.agentHealth.get(agentId) || this.createDefaultHealthScore(agentId); + + current.failureCount++; + current.consecutiveFailures++; + current.lastFailureAt = new Date(); + current.lastError = error; + + this.agentHealth.set(agentId, current); + } + + /** + * Get health score for a specific agent + */ + getAgentHealth(agentId: string): AgentHealthScore | null { + return this.agentHealth.get(agentId) || null; + } + + /** + * Get health overview for all tracked agents + */ + getAllAgentHealth(): AgentHealthOverview { + const agents: AgentHealthScore[] = []; + let healthyCount = 0; + let unhealthyCount = 0; + let degradedCount = 0; + + for (const [agentId, health] of this.agentHealth) { + agents.push({ ...health, agentId }); + + const circuitState = this.getCircuitState(agentId); + if (circuitState === CircuitBreakerStateEnum.OPEN) { + unhealthyCount++; + } else if (circuitState === CircuitBreakerStateEnum.HALF_OPEN || health.consecutiveFailures > 0) { + degradedCount++; + } else { + healthyCount++; + } + } + + return { + agents, + summary: { + total: agents.length, + healthy: healthyCount, + degraded: degradedCount, + unhealthy: unhealthyCount, + }, + }; + } + + /** + * Calculate health score for an agent (0-100) + */ + private calculateAgentHealthScore(agentId: string): number { + const health = this.agentHealth.get(agentId); + const circuitState = this.getCircuitState(agentId); + + // Base score + let score = 100; + + // Penalize based on circuit state + if (circuitState === CircuitBreakerStateEnum.OPEN) { + score -= 80; // Heavily penalize open circuit + } else if (circuitState === CircuitBreakerStateEnum.HALF_OPEN) { + score -= 30; // Moderate penalty for half-open + } + + if (!health) { + return score; // No history, use base score + } + + // Penalize based on consecutive failures + score -= Math.min(health.consecutiveFailures * 10, 40); + + // Factor in success rate + const total = health.successCount + health.failureCount; + if (total > 0) { + const successRate = health.successCount / total; + score *= successRate; + } + + // Recency bonus - agents that succeeded recently get a boost + if (health.lastSuccessAt) { + const timeSinceSuccess = Date.now() - health.lastSuccessAt.getTime(); + if (timeSinceSuccess < 60000) { // Success within last minute + score += 10; + } else if (timeSinceSuccess < 300000) { // Success within last 5 minutes + score += 5; + } + } + + // Recency penalty - recent failures reduce score + if (health.lastFailureAt) { + const timeSinceFailure = Date.now() - health.lastFailureAt.getTime(); + if (timeSinceFailure < 30000) { // Failure within last 30 seconds + score -= 20; + } else if (timeSinceFailure < 60000) { // Failure within last minute + score -= 10; + } + } + + return Math.max(0, Math.min(100, score)); + } + + /** + * Create default health score for new agent + */ + private createDefaultHealthScore(agentId: string): AgentHealthScore { + return { + agentId, + successCount: 0, + failureCount: 0, + consecutiveFailures: 0, + lastSuccessAt: null, + lastFailureAt: null, + lastError: null, + responseTimeMs: [], + }; + } +} + +// ========================================================================= +// v2.0.0: Agent Routing Types +// ========================================================================= + +export interface AgentHealthScore { + agentId: string; + successCount: number; + failureCount: number; + consecutiveFailures: number; + lastSuccessAt: Date | null; + lastFailureAt: Date | null; + lastError: string | null; + responseTimeMs: number[]; +} + +export interface AgentSelectionResult { + selected: string | null; + score?: number; + reason: string; + alternatives?: string[]; + allUnavailable?: boolean; +} + +export interface AgentExecutionResult { + success: boolean; + result?: T; + error?: string; + usedAgent: string | null; + attemptedAgents: string[]; + failoverCount: number; +} + +export interface AgentHealthOverview { + agents: AgentHealthScore[]; + summary: { + total: number; + healthy: number; + degraded: number; + unhealthy: number; + }; +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/cleanup.service.ts b/packages/bytebot-workflow-orchestrator/src/services/cleanup.service.ts new file mode 100644 index 000000000..5884e7529 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/cleanup.service.ts @@ -0,0 +1,145 @@ +/** + * Cleanup Service + * v1.0.0 M5: Scheduled cleanup of expired approvals and idempotency records + * Post-M5: Also cleans up expired audit logs and webhook deliveries + * + * Runs periodically to: + * - Expire old pending approvals + * - Clean up expired idempotency records + * - Clean up expired audit logs (based on retention policy) + * - Clean up old webhook delivery records + * - Reset stale processing records + */ + +import { Injectable, Logger, Optional } from '@nestjs/common'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { ApprovalService } from './approval.service'; +import { IdempotencyService } from './idempotency.service'; +import { AuditService } from './audit.service'; +import { PrismaService } from './prisma.service'; + +@Injectable() +export class CleanupService { + private readonly logger = new Logger(CleanupService.name); + private isRunning = false; + + constructor( + private readonly approvalService: ApprovalService, + private readonly idempotencyService: IdempotencyService, + private readonly prisma: PrismaService, + @Optional() private readonly auditService?: AuditService, + ) { + this.logger.log('CleanupService initialized'); + } + + /** + * Run cleanup every 5 minutes + */ + @Cron(CronExpression.EVERY_5_MINUTES) + async runCleanup() { + if (this.isRunning) { + this.logger.debug('Cleanup already running, skipping'); + return; + } + + this.isRunning = true; + + try { + this.logger.debug('Starting cleanup cycle'); + + // Expire old approvals + const expiredApprovals = await this.approvalService.expireOldApprovals(); + + // Clean up expired idempotency records + const expiredIdempotency = await this.idempotencyService.cleanupExpired(); + + // Post-M5: Clean up expired audit logs + let expiredAuditLogs = 0; + if (this.auditService) { + try { + expiredAuditLogs = await this.auditService.cleanupExpired(); + } catch (error: any) { + // Table might not exist yet + if (!error.message.includes('does not exist')) { + this.logger.warn(`Audit cleanup failed: ${error.message}`); + } + } + } + + // Post-M5: Clean up old webhook delivery records (older than 30 days) + let cleanedWebhookDeliveries = 0; + try { + const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000); + const result = await this.prisma.webhookDelivery.deleteMany({ + where: { + createdAt: { lt: thirtyDaysAgo }, + }, + }); + cleanedWebhookDeliveries = result.count; + } catch (error: any) { + // Table might not exist yet + if (!error.message.includes('does not exist')) { + this.logger.warn(`Webhook delivery cleanup failed: ${error.message}`); + } + } + + const totalCleaned = + expiredApprovals + expiredIdempotency + expiredAuditLogs + cleanedWebhookDeliveries; + + if (totalCleaned > 0) { + this.logger.log( + `Cleanup complete: ${expiredApprovals} approvals expired, ` + + `${expiredIdempotency} idempotency records, ` + + `${expiredAuditLogs} audit logs, ` + + `${cleanedWebhookDeliveries} webhook deliveries cleaned`, + ); + } + } catch (error: any) { + this.logger.error(`Cleanup failed: ${error.message}`); + } finally { + this.isRunning = false; + } + } + + /** + * Run full cleanup (can be called manually) + */ + async runFullCleanup(): Promise<{ + expiredApprovals: number; + expiredIdempotency: number; + expiredAuditLogs: number; + cleanedWebhookDeliveries: number; + }> { + const expiredApprovals = await this.approvalService.expireOldApprovals(); + const expiredIdempotency = await this.idempotencyService.cleanupExpired(); + + let expiredAuditLogs = 0; + if (this.auditService) { + try { + expiredAuditLogs = await this.auditService.cleanupExpired(); + } catch { + // Table might not exist + } + } + + let cleanedWebhookDeliveries = 0; + try { + const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000); + const result = await this.prisma.webhookDelivery.deleteMany({ + where: { + createdAt: { lt: thirtyDaysAgo }, + }, + }); + cleanedWebhookDeliveries = result.count; + } catch { + // Table might not exist + } + + return { + expiredApprovals, + expiredIdempotency, + expiredAuditLogs, + cleanedWebhookDeliveries, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/compliance.service.ts b/packages/bytebot-workflow-orchestrator/src/services/compliance.service.ts new file mode 100644 index 000000000..a3e5fee33 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/compliance.service.ts @@ -0,0 +1,739 @@ +/** + * Compliance Reporting Service + * Phase 10 (v5.5.0): Enterprise Features - SOC2/GDPR Compliance + * + * Provides compliance reporting capabilities: + * - SOC2 Trust Service Criteria reports + * - GDPR Article 30 data processing records + * - Data Subject Access Requests (DSAR) + * - Data retention policy enforcement + * - Automated compliance report generation + */ + +import { Injectable, Logger, NotFoundException, BadRequestException } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; + +// ============================================================================ +// Types and Interfaces +// ============================================================================ + +export enum ReportType { + SOC2_SECURITY = 'soc2_security', + SOC2_AVAILABILITY = 'soc2_availability', + SOC2_CONFIDENTIALITY = 'soc2_confidentiality', + SOC2_PROCESSING_INTEGRITY = 'soc2_processing_integrity', + SOC2_PRIVACY = 'soc2_privacy', + GDPR_ARTICLE30 = 'gdpr_article30', + GDPR_DSAR = 'gdpr_dsar', + DATA_RETENTION = 'data_retention', + ACCESS_REVIEW = 'access_review', +} + +export enum LegalBasis { + CONSENT = 'consent', + CONTRACT = 'contract', + LEGAL_OBLIGATION = 'legal_obligation', + VITAL_INTERESTS = 'vital_interests', + PUBLIC_TASK = 'public_task', + LEGITIMATE_INTERESTS = 'legitimate_interests', +} + +export interface GenerateReportInput { + tenantId: string; + reportType: ReportType; + reportName?: string; + startDate: Date; + endDate: Date; + generatedBy?: string; +} + +export interface DataProcessingRecordInput { + activityName: string; + activityDescription?: string; + dataSubjectCategories: string[]; + personalDataCategories: string[]; + legalBasis: LegalBasis; + legalBasisDetails?: string; + processingPurposes: string[]; + recipientCategories?: string[]; + thirdCountryTransfers?: string[]; + transferSafeguards?: string; + retentionPeriod?: string; + retentionCriteria?: string; + technicalMeasures?: string[]; + organizationalMeasures?: string[]; +} + +export interface DSARRequest { + tenantId: string; + subjectEmail: string; + subjectName?: string; + requestType: 'access' | 'rectification' | 'erasure' | 'portability' | 'restriction' | 'objection'; + requestDetails?: string; + verificationMethod?: string; + verifiedAt?: Date; +} + +// ============================================================================ +// SOC2 Trust Service Criteria +// ============================================================================ + +const SOC2_CRITERIA = { + security: [ + { id: 'CC1.1', name: 'Control Environment', description: 'Demonstrates commitment to integrity and ethical values' }, + { id: 'CC1.2', name: 'Board Oversight', description: 'Board exercises oversight responsibility' }, + { id: 'CC2.1', name: 'Information Communication', description: 'Internal communication of objectives' }, + { id: 'CC3.1', name: 'Risk Assessment', description: 'Identifies and analyzes risks' }, + { id: 'CC4.1', name: 'Monitoring', description: 'Evaluates and communicates deficiencies' }, + { id: 'CC5.1', name: 'Control Activities', description: 'Selects and develops control activities' }, + { id: 'CC6.1', name: 'Logical Access', description: 'Controls logical access to systems' }, + { id: 'CC6.6', name: 'System Operations', description: 'Manages changes to system components' }, + { id: 'CC7.1', name: 'Incident Detection', description: 'Detects and monitors security events' }, + { id: 'CC7.2', name: 'Incident Response', description: 'Responds to identified incidents' }, + ], + availability: [ + { id: 'A1.1', name: 'Capacity Management', description: 'Maintains current capacity' }, + { id: 'A1.2', name: 'Recovery Planning', description: 'Plans for system recovery' }, + ], + confidentiality: [ + { id: 'C1.1', name: 'Confidential Information', description: 'Identifies confidential information' }, + { id: 'C1.2', name: 'Disposal', description: 'Disposes of confidential information' }, + ], + processing_integrity: [ + { id: 'PI1.1', name: 'Processing Accuracy', description: 'Ensures processing accuracy and completeness' }, + ], + privacy: [ + { id: 'P1.1', name: 'Privacy Notice', description: 'Provides notice about privacy practices' }, + { id: 'P2.1', name: 'Choice and Consent', description: 'Obtains consent for data collection' }, + { id: 'P3.1', name: 'Collection', description: 'Collects personal information as disclosed' }, + { id: 'P4.1', name: 'Use and Retention', description: 'Uses and retains information appropriately' }, + { id: 'P5.1', name: 'Access', description: 'Provides access to personal information' }, + { id: 'P6.1', name: 'Disclosure', description: 'Discloses information to third parties as consented' }, + { id: 'P7.1', name: 'Quality', description: 'Maintains accurate personal information' }, + { id: 'P8.1', name: 'Monitoring', description: 'Monitors compliance with privacy policies' }, + ], +}; + +// ============================================================================ +// Compliance Service +// ============================================================================ + +@Injectable() +export class ComplianceService { + private readonly logger = new Logger(ComplianceService.name); + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.logger.log('ComplianceService initialized'); + } + + // ========================================================================== + // Report Generation + // ========================================================================== + + /** + * Generate a compliance report + */ + async generateReport(input: GenerateReportInput): Promise { + const reportPeriod = this.formatReportPeriod(input.startDate, input.endDate); + const reportName = input.reportName || `${input.reportType} Report - ${reportPeriod}`; + + // Create report record + const report = await this.prisma.complianceReport.create({ + data: { + tenantId: input.tenantId, + reportType: input.reportType, + reportName, + reportPeriod, + startDate: input.startDate, + endDate: input.endDate, + status: 'generating', + generatedBy: input.generatedBy || 'system', + }, + }); + + try { + // Generate report content based on type + let findings: any[] = []; + let metrics: Record = {}; + let summary = ''; + + switch (input.reportType) { + case ReportType.SOC2_SECURITY: + ({ findings, metrics, summary } = await this.generateSOC2SecurityReport(input)); + break; + case ReportType.SOC2_AVAILABILITY: + ({ findings, metrics, summary } = await this.generateSOC2AvailabilityReport(input)); + break; + case ReportType.GDPR_ARTICLE30: + ({ findings, metrics, summary } = await this.generateGDPRArticle30Report(input)); + break; + case ReportType.DATA_RETENTION: + ({ findings, metrics, summary } = await this.generateDataRetentionReport(input)); + break; + case ReportType.ACCESS_REVIEW: + ({ findings, metrics, summary } = await this.generateAccessReviewReport(input)); + break; + default: + ({ findings, metrics, summary } = await this.generateGenericReport(input)); + } + + // Update report with content + const updatedReport = await this.prisma.complianceReport.update({ + where: { id: report.id }, + data: { + findings, + metrics, + summary, + status: 'completed', + generatedAt: new Date(), + expiresAt: new Date(Date.now() + 90 * 24 * 60 * 60 * 1000), // 90 days + }, + }); + + this.eventEmitter.emit('compliance.report.generated', { + tenantId: input.tenantId, + reportId: report.id, + reportType: input.reportType, + }); + + return updatedReport; + } catch (error: any) { + // Mark report as failed + await this.prisma.complianceReport.update({ + where: { id: report.id }, + data: { + status: 'failed', + summary: `Report generation failed: ${error.message}`, + }, + }); + + throw error; + } + } + + /** + * Get a compliance report by ID + */ + async getReport(tenantId: string, reportId: string): Promise { + const report = await this.prisma.complianceReport.findFirst({ + where: { id: reportId, tenantId }, + }); + + if (!report) { + throw new NotFoundException(`Report ${reportId} not found`); + } + + return report; + } + + /** + * List compliance reports for a tenant + */ + async listReports( + tenantId: string, + options: { + reportType?: ReportType; + status?: string; + limit?: number; + offset?: number; + } = {}, + ): Promise<{ reports: any[]; total: number }> { + const where: any = { tenantId }; + + if (options.reportType) { + where.reportType = options.reportType; + } + + if (options.status) { + where.status = options.status; + } + + const [reports, total] = await Promise.all([ + this.prisma.complianceReport.findMany({ + where, + orderBy: { createdAt: 'desc' }, + take: options.limit || 50, + skip: options.offset || 0, + }), + this.prisma.complianceReport.count({ where }), + ]); + + return { reports, total }; + } + + // ========================================================================== + // GDPR Data Processing Records + // ========================================================================== + + /** + * Create a data processing record (GDPR Article 30) + */ + async createDataProcessingRecord(tenantId: string, input: DataProcessingRecordInput): Promise { + const record = await this.prisma.dataProcessingRecord.create({ + data: { + tenantId, + activityName: input.activityName, + activityDescription: input.activityDescription, + dataSubjectCategories: input.dataSubjectCategories, + personalDataCategories: input.personalDataCategories, + legalBasis: input.legalBasis, + legalBasisDetails: input.legalBasisDetails, + processingPurposes: input.processingPurposes, + recipientCategories: input.recipientCategories || [], + thirdCountryTransfers: input.thirdCountryTransfers || [], + transferSafeguards: input.transferSafeguards, + retentionPeriod: input.retentionPeriod, + retentionCriteria: input.retentionCriteria, + technicalMeasures: input.technicalMeasures || [], + organizationalMeasures: input.organizationalMeasures || [], + status: 'active', + }, + }); + + this.logger.log(`Created data processing record: ${record.id}`); + return record; + } + + /** + * Update a data processing record + */ + async updateDataProcessingRecord( + tenantId: string, + recordId: string, + input: Partial, + ): Promise { + const existing = await this.prisma.dataProcessingRecord.findFirst({ + where: { id: recordId, tenantId }, + }); + + if (!existing) { + throw new NotFoundException(`Data processing record ${recordId} not found`); + } + + return this.prisma.dataProcessingRecord.update({ + where: { id: recordId }, + data: input, + }); + } + + /** + * List data processing records + */ + async listDataProcessingRecords( + tenantId: string, + options: { status?: string; legalBasis?: LegalBasis } = {}, + ): Promise { + const where: any = { tenantId }; + + if (options.status) { + where.status = options.status; + } + + if (options.legalBasis) { + where.legalBasis = options.legalBasis; + } + + return this.prisma.dataProcessingRecord.findMany({ + where, + orderBy: { createdAt: 'desc' }, + }); + } + + // ========================================================================== + // Data Subject Access Requests (DSAR) + // ========================================================================== + + /** + * Process a Data Subject Access Request + */ + async processDSAR(request: DSARRequest): Promise<{ + requestId: string; + dataFound: boolean; + personalData: any; + processingActivities: any[]; + }> { + const requestId = `dsar-${Date.now()}`; + + this.logger.log(`Processing DSAR for ${request.subjectEmail} (type: ${request.requestType})`); + + // Find all data related to this subject + const personalData = await this.findSubjectData(request.tenantId, request.subjectEmail); + + // Find processing activities that apply to this subject + const processingActivities = await this.prisma.dataProcessingRecord.findMany({ + where: { + tenantId: request.tenantId, + status: 'active', + }, + }); + + // Create DSAR report + await this.prisma.complianceReport.create({ + data: { + tenantId: request.tenantId, + reportType: ReportType.GDPR_DSAR, + reportName: `DSAR - ${request.subjectEmail} - ${request.requestType}`, + reportPeriod: new Date().toISOString().split('T')[0], + startDate: new Date(), + endDate: new Date(), + status: 'completed', + generatedAt: new Date(), + summary: `Data Subject Access Request for ${request.subjectEmail}`, + findings: [ + { + requestType: request.requestType, + subjectEmail: request.subjectEmail, + dataCategories: Object.keys(personalData), + processingActivitiesCount: processingActivities.length, + }, + ], + metrics: { + dataPointsFound: this.countDataPoints(personalData), + processingActivities: processingActivities.length, + }, + generatedBy: 'system', + }, + }); + + this.eventEmitter.emit('compliance.dsar.processed', { + tenantId: request.tenantId, + requestId, + requestType: request.requestType, + subjectEmail: request.subjectEmail, + }); + + return { + requestId, + dataFound: Object.keys(personalData).length > 0, + personalData, + processingActivities: processingActivities.map(p => ({ + activity: p.activityName, + purpose: p.processingPurposes, + legalBasis: p.legalBasis, + retention: p.retentionPeriod, + })), + }; + } + + /** + * Execute data erasure (right to be forgotten) + */ + async executeDataErasure( + tenantId: string, + subjectEmail: string, + options: { dryRun?: boolean } = {}, + ): Promise<{ + erasedCategories: string[]; + retainedCategories: string[]; + retentionReasons: Record; + }> { + const erasedCategories: string[] = []; + const retainedCategories: string[] = []; + const retentionReasons: Record = {}; + + // Categories that must be retained for legal compliance + const mandatoryRetention = ['audit_logs', 'billing_records', 'legal_holds']; + + if (!options.dryRun) { + // Anonymize user data in various tables + // This is a simplified example - actual implementation would be more comprehensive + + // Anonymize audit logs related to this user (actor email) + await this.prisma.auditLog.updateMany({ + where: { tenantId, actorEmail: subjectEmail }, + data: { actorEmail: 'anonymized@example.com', actorName: 'Anonymized User' }, + }); + erasedCategories.push('audit_logs_actor_data'); + + // Note: Audit event records are retained for compliance, only PII is anonymized + retainedCategories.push('audit_log_events'); + retentionReasons['audit_log_events'] = 'Legal compliance requirement - events retained, PII anonymized'; + } + + this.logger.log(`Data erasure ${options.dryRun ? '(dry run)' : ''} for ${subjectEmail}: erased ${erasedCategories.length} categories`); + + return { erasedCategories, retainedCategories, retentionReasons }; + } + + // ========================================================================== + // Report Generation Helpers + // ========================================================================== + + private async generateSOC2SecurityReport(input: GenerateReportInput): Promise<{ + findings: any[]; + metrics: Record; + summary: string; + }> { + const criteria = SOC2_CRITERIA.security; + const findings: any[] = []; + + // Gather metrics from audit logs + const auditStats = await this.prisma.auditLog.groupBy({ + by: ['eventType'], + where: { + tenantId: input.tenantId, + timestamp: { gte: input.startDate, lte: input.endDate }, + }, + _count: true, + }); + + // Evaluate each criterion + for (const criterion of criteria) { + const status = await this.evaluateSOC2Criterion(input.tenantId, criterion.id, input.startDate, input.endDate); + findings.push({ + criterionId: criterion.id, + name: criterion.name, + description: criterion.description, + status: status.status, + evidence: status.evidence, + recommendations: status.recommendations, + }); + } + + const passedCount = findings.filter(f => f.status === 'pass').length; + const totalCount = findings.length; + + return { + findings, + metrics: { + criteriaEvaluated: totalCount, + criteriaPassed: passedCount, + criteriaFailed: totalCount - passedCount, + complianceScore: Math.round((passedCount / totalCount) * 100), + auditEvents: auditStats.reduce((acc, s) => ({ ...acc, [s.eventType]: s._count }), {}), + }, + summary: `SOC2 Security evaluation: ${passedCount}/${totalCount} criteria passed (${Math.round((passedCount / totalCount) * 100)}% compliant)`, + }; + } + + private async generateSOC2AvailabilityReport(input: GenerateReportInput): Promise<{ + findings: any[]; + metrics: Record; + summary: string; + }> { + const findings = SOC2_CRITERIA.availability.map(c => ({ + criterionId: c.id, + name: c.name, + description: c.description, + status: 'needs_review', + evidence: [], + recommendations: ['Manual review required'], + })); + + return { + findings, + metrics: { criteriaCount: findings.length }, + summary: `SOC2 Availability report generated with ${findings.length} criteria for review`, + }; + } + + private async generateGDPRArticle30Report(input: GenerateReportInput): Promise<{ + findings: any[]; + metrics: Record; + summary: string; + }> { + const records = await this.prisma.dataProcessingRecord.findMany({ + where: { tenantId: input.tenantId, status: 'active' }, + }); + + const findings = records.map(r => ({ + activityName: r.activityName, + dataCategories: r.personalDataCategories, + legalBasis: r.legalBasis, + purposes: r.processingPurposes, + retention: r.retentionPeriod, + transfers: r.thirdCountryTransfers, + safeguards: r.transferSafeguards, + })); + + const byLegalBasis = records.reduce((acc: Record, r) => { + acc[r.legalBasis] = (acc[r.legalBasis] || 0) + 1; + return acc; + }, {}); + + return { + findings, + metrics: { + totalProcessingActivities: records.length, + byLegalBasis, + hasThirdCountryTransfers: records.some(r => r.thirdCountryTransfers.length > 0), + }, + summary: `GDPR Article 30 report: ${records.length} processing activities documented`, + }; + } + + private async generateDataRetentionReport(input: GenerateReportInput): Promise<{ + findings: any[]; + metrics: Record; + summary: string; + }> { + // Get counts of data by age + const now = new Date(); + const thirtyDaysAgo = new Date(now.getTime() - 30 * 24 * 60 * 60 * 1000); + const ninetyDaysAgo = new Date(now.getTime() - 90 * 24 * 60 * 60 * 1000); + const oneYearAgo = new Date(now.getTime() - 365 * 24 * 60 * 60 * 1000); + + const [goalRunsRecent, goalRunsOld, auditLogsRecent, auditLogsOld] = await Promise.all([ + this.prisma.goalRun.count({ where: { tenantId: input.tenantId, createdAt: { gte: thirtyDaysAgo } } }), + this.prisma.goalRun.count({ where: { tenantId: input.tenantId, createdAt: { lt: ninetyDaysAgo } } }), + this.prisma.auditLog.count({ where: { tenantId: input.tenantId, timestamp: { gte: thirtyDaysAgo } } }), + this.prisma.auditLog.count({ where: { tenantId: input.tenantId, timestamp: { lt: oneYearAgo } } }), + ]); + + return { + findings: [ + { category: 'goal_runs', recent: goalRunsRecent, old: goalRunsOld, retentionPolicy: '90 days' }, + { category: 'audit_logs', recent: auditLogsRecent, old: auditLogsOld, retentionPolicy: '365 days' }, + ], + metrics: { + totalGoalRuns: goalRunsRecent + goalRunsOld, + totalAuditLogs: auditLogsRecent + auditLogsOld, + oldDataPercentage: { + goalRuns: goalRunsOld > 0 ? Math.round((goalRunsOld / (goalRunsRecent + goalRunsOld)) * 100) : 0, + auditLogs: auditLogsOld > 0 ? Math.round((auditLogsOld / (auditLogsRecent + auditLogsOld)) * 100) : 0, + }, + }, + summary: `Data retention report: ${goalRunsOld} goal runs and ${auditLogsOld} audit logs exceed retention policy`, + }; + } + + private async generateAccessReviewReport(input: GenerateReportInput): Promise<{ + findings: any[]; + metrics: Record; + summary: string; + }> { + // Get unique actors from audit logs + const actors = await this.prisma.auditLog.groupBy({ + by: ['actorId', 'actorEmail', 'actorType'], + where: { + tenantId: input.tenantId, + timestamp: { gte: input.startDate, lte: input.endDate }, + }, + _count: true, + }); + + const findings = actors + .filter(a => a.actorId) + .map(a => ({ + actorId: a.actorId, + actorEmail: a.actorEmail, + actorType: a.actorType, + actionCount: a._count, + status: 'needs_review', + })); + + return { + findings, + metrics: { + uniqueActors: findings.length, + totalActions: actors.reduce((sum, a) => sum + a._count, 0), + byActorType: actors.reduce((acc: Record, a) => { + acc[a.actorType] = (acc[a.actorType] || 0) + a._count; + return acc; + }, {}), + }, + summary: `Access review: ${findings.length} unique actors performed actions during the period`, + }; + } + + private async generateGenericReport(input: GenerateReportInput): Promise<{ + findings: any[]; + metrics: Record; + summary: string; + }> { + return { + findings: [], + metrics: {}, + summary: `Generic compliance report for ${input.reportType}`, + }; + } + + private async evaluateSOC2Criterion( + tenantId: string, + criterionId: string, + startDate: Date, + endDate: Date, + ): Promise<{ status: string; evidence: string[]; recommendations: string[] }> { + // This is a simplified evaluation - real implementation would be more comprehensive + const evidence: string[] = []; + const recommendations: string[] = []; + + switch (criterionId) { + case 'CC6.1': // Logical Access + const accessEvents = await this.prisma.auditLog.count({ + where: { + tenantId, + timestamp: { gte: startDate, lte: endDate }, + eventType: { in: ['APPROVAL_APPROVED', 'APPROVAL_REJECTED'] }, + }, + }); + evidence.push(`${accessEvents} access control events logged`); + return { status: accessEvents > 0 ? 'pass' : 'needs_evidence', evidence, recommendations }; + + case 'CC7.1': // Incident Detection + const auditCount = await this.prisma.auditLog.count({ + where: { tenantId, timestamp: { gte: startDate, lte: endDate } }, + }); + evidence.push(`${auditCount} audit events captured`); + return { status: auditCount > 0 ? 'pass' : 'needs_evidence', evidence, recommendations }; + + default: + recommendations.push('Manual review required for this criterion'); + return { status: 'needs_review', evidence, recommendations }; + } + } + + private async findSubjectData(tenantId: string, email: string): Promise> { + const data: Record = {}; + + // Find audit logs + const auditLogs = await this.prisma.auditLog.findMany({ + where: { tenantId, actorEmail: email }, + take: 100, + }); + if (auditLogs.length > 0) { + data.auditLogs = auditLogs.map(l => ({ + timestamp: l.timestamp, + eventType: l.eventType, + resourceType: l.resourceType, + })); + } + + // Note: GoalRun model doesn't have user attribution + // Goal runs are tenant-scoped, not user-scoped in the current schema + // If user attribution is needed, extend the GoalRun model with a userId field + + return data; + } + + private countDataPoints(data: Record): number { + return Object.values(data).reduce((sum, arr) => sum + (Array.isArray(arr) ? arr.length : 1), 0); + } + + private formatReportPeriod(startDate: Date, endDate: Date): string { + const start = startDate.toISOString().split('T')[0]; + const end = endDate.toISOString().split('T')[0]; + return `${start}_to_${end}`; + } + + /** + * Get available report types + */ + getReportTypes(): Array<{ type: ReportType; name: string; description: string }> { + return [ + { type: ReportType.SOC2_SECURITY, name: 'SOC2 Security', description: 'SOC2 Trust Service Criteria - Security' }, + { type: ReportType.SOC2_AVAILABILITY, name: 'SOC2 Availability', description: 'SOC2 Trust Service Criteria - Availability' }, + { type: ReportType.SOC2_CONFIDENTIALITY, name: 'SOC2 Confidentiality', description: 'SOC2 Trust Service Criteria - Confidentiality' }, + { type: ReportType.GDPR_ARTICLE30, name: 'GDPR Article 30', description: 'Records of processing activities' }, + { type: ReportType.GDPR_DSAR, name: 'GDPR DSAR', description: 'Data Subject Access Request report' }, + { type: ReportType.DATA_RETENTION, name: 'Data Retention', description: 'Data retention policy compliance' }, + { type: ReportType.ACCESS_REVIEW, name: 'Access Review', description: 'User access review report' }, + ]; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/confidence-assessment.service.ts b/packages/bytebot-workflow-orchestrator/src/services/confidence-assessment.service.ts new file mode 100644 index 000000000..d426cc670 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/confidence-assessment.service.ts @@ -0,0 +1,540 @@ +/** + * Confidence Assessment Service + * v1.0.0: Nice-to-Have Enhancement for Autonomous Decision Making + * + * Implements multi-dimensional confidence scoring for AI agent decisions: + * - Self-reported confidence from AI response analysis + * - Task clarity assessment + * - Context sufficiency evaluation + * - Risk-based threshold adjustment + * + * Used by the orchestrator to determine when tasks can proceed autonomously + * vs when human review is needed. + * + * @see /docs/CONTEXT_PROPAGATION_FIX_JAN_2026.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; + +// Confidence dimensions for multi-factor assessment +export interface ConfidenceDimensions { + taskClarity: number; // How well-defined is the task? (0-1) + contextSufficiency: number; // Is there enough context? (0-1) + domainFamiliarity: number; // Is this a well-understood domain? (0-1) + riskLevel: number; // What's the potential impact of errors? (0-1) + reversibility: number; // Can the action be undone? (0-1) +} + +// Composite confidence assessment result +export interface ConfidenceAssessment { + overall: number; // Combined score (0-1) + dimensions: ConfidenceDimensions; // Individual dimension scores + recommendation: 'proceed' | 'review' | 'escalate'; // Action recommendation + reasoning: string; // Explanation for the assessment + thresholds: { + autoApprove: number; + review: number; + escalate: number; + }; +} + +// Task context for assessment +export interface TaskAssessmentContext { + taskId?: string; + goalRunId?: string; + stepDescription: string; + goalContext?: string; + previousStepsCount: number; + previousStepsSuccessRate: number; + hasDesktopRequirement: boolean; + estimatedImpact: 'low' | 'medium' | 'high' | 'critical'; + isReversible: boolean; + retryCount: number; +} + +// Confidence metrics for monitoring +export interface ConfidenceMetrics { + totalAssessments: number; + autoApproveRate: number; + reviewRate: number; + escalateRate: number; + averageConfidence: number; + calibrationError: number; // How well confidence predicts success +} + +// Threshold configuration +interface ThresholdConfig { + autoApproveThreshold: number; + reviewThreshold: number; + escalateThreshold: number; + impactAdjustments: Record; + categoryThresholds: Record; +} + +@Injectable() +export class ConfidenceAssessmentService { + private readonly logger = new Logger(ConfidenceAssessmentService.name); + + // Default dimension weights for composite scoring + private readonly dimensionWeights: ConfidenceDimensions = { + taskClarity: 0.25, + contextSufficiency: 0.25, + domainFamiliarity: 0.20, + riskLevel: 0.15, + reversibility: 0.15, + }; + + // Threshold configuration (can be overridden via environment) + private readonly thresholdConfig: ThresholdConfig; + + // Metrics tracking + private metrics: { + assessments: Array<{ + confidence: number; + recommendation: string; + timestamp: Date; + outcome?: 'success' | 'failure'; + }>; + } = { assessments: [] }; + + constructor( + private readonly configService: ConfigService, + private readonly prisma: PrismaService, + private readonly eventEmitter: EventEmitter2, + ) { + // Load threshold configuration from environment + this.thresholdConfig = { + autoApproveThreshold: parseFloat( + this.configService.get('CONFIDENCE_AUTO_APPROVE_THRESHOLD', '0.85'), + ), + reviewThreshold: parseFloat( + this.configService.get('CONFIDENCE_REVIEW_THRESHOLD', '0.60'), + ), + escalateThreshold: parseFloat( + this.configService.get('CONFIDENCE_ESCALATE_THRESHOLD', '0.40'), + ), + impactAdjustments: { + low: -0.05, + medium: 0, + high: 0.05, + critical: 0.10, + }, + categoryThresholds: { + browser_automation: { autoApprove: 0.80, review: 0.55 }, + data_entry: { autoApprove: 0.85, review: 0.60 }, + api_interaction: { autoApprove: 0.90, review: 0.70 }, + file_manipulation: { autoApprove: 0.88, review: 0.65 }, + }, + }; + + this.logger.log( + `Confidence thresholds: auto=${this.thresholdConfig.autoApproveThreshold}, ` + + `review=${this.thresholdConfig.reviewThreshold}, ` + + `escalate=${this.thresholdConfig.escalateThreshold}`, + ); + } + + /** + * Assess confidence for a task execution decision + * + * This is the main entry point for confidence scoring. It: + * 1. Evaluates multiple confidence dimensions + * 2. Calculates a composite score + * 3. Applies risk-based threshold adjustments + * 4. Returns a recommendation (proceed/review/escalate) + */ + assessConfidence(context: TaskAssessmentContext): ConfidenceAssessment { + // Calculate individual dimensions + const dimensions = this.calculateDimensions(context); + + // Calculate composite confidence score + const dimensionScore = this.calculateCompositeScore(dimensions); + + // Get effective thresholds (adjusted for risk) + const thresholds = this.getEffectiveThresholds(context); + + // Make recommendation based on score and thresholds + const recommendation = this.makeRecommendation( + dimensionScore, + thresholds, + context, + ); + + // Generate reasoning + const reasoning = this.generateReasoning( + dimensionScore, + dimensions, + recommendation, + context, + ); + + const assessment: ConfidenceAssessment = { + overall: dimensionScore, + dimensions, + recommendation, + reasoning, + thresholds, + }; + + // Track for metrics + this.recordAssessment(assessment); + + // Emit event for monitoring + this.eventEmitter.emit('confidence.assessed', { + taskId: context.taskId, + goalRunId: context.goalRunId, + confidence: dimensionScore, + recommendation, + }); + + this.logger.debug( + `Confidence assessment for step "${context.stepDescription.slice(0, 50)}...": ` + + `${(dimensionScore * 100).toFixed(1)}% → ${recommendation}`, + ); + + return assessment; + } + + /** + * Record outcome for calibration (called when task completes) + */ + recordOutcome( + taskId: string, + outcome: 'success' | 'failure', + ): void { + // Find recent assessment for this task and update outcome + const recentAssessment = this.metrics.assessments.find( + (a) => !a.outcome && Date.now() - a.timestamp.getTime() < 3600000, // 1 hour + ); + + if (recentAssessment) { + recentAssessment.outcome = outcome; + } + + this.logger.debug(`Recorded outcome for task ${taskId}: ${outcome}`); + } + + /** + * Get current confidence metrics for monitoring + */ + getMetrics(): ConfidenceMetrics { + const assessments = this.metrics.assessments; + const total = assessments.length; + + if (total === 0) { + return { + totalAssessments: 0, + autoApproveRate: 0, + reviewRate: 0, + escalateRate: 0, + averageConfidence: 0, + calibrationError: 0, + }; + } + + const autoApproveCount = assessments.filter( + (a) => a.recommendation === 'proceed', + ).length; + const reviewCount = assessments.filter( + (a) => a.recommendation === 'review', + ).length; + const escalateCount = assessments.filter( + (a) => a.recommendation === 'escalate', + ).length; + + const avgConfidence = assessments.reduce( + (sum, a) => sum + a.confidence, 0, + ) / total; + + // Calculate calibration error (ECE) for assessments with outcomes + const calibrationError = this.calculateCalibrationError(); + + return { + totalAssessments: total, + autoApproveRate: autoApproveCount / total, + reviewRate: reviewCount / total, + escalateRate: escalateCount / total, + averageConfidence: avgConfidence, + calibrationError, + }; + } + + /** + * Update thresholds dynamically (for A/B testing or tuning) + */ + updateThresholds(updates: Partial): void { + Object.assign(this.thresholdConfig, updates); + + this.logger.log( + `Updated confidence thresholds: auto=${this.thresholdConfig.autoApproveThreshold}, ` + + `review=${this.thresholdConfig.reviewThreshold}`, + ); + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + /** + * Calculate individual confidence dimensions + */ + private calculateDimensions(context: TaskAssessmentContext): ConfidenceDimensions { + return { + taskClarity: this.assessTaskClarity(context), + contextSufficiency: this.assessContextSufficiency(context), + domainFamiliarity: this.assessDomainFamiliarity(context), + riskLevel: this.assessRiskLevel(context), + reversibility: context.isReversible ? 0.9 : 0.3, + }; + } + + /** + * Assess how clearly defined the task is + */ + private assessTaskClarity(context: TaskAssessmentContext): number { + let clarity = 0.5; // Base + + // Longer descriptions tend to be clearer + const descLength = context.stepDescription.length; + if (descLength > 100) clarity += 0.15; + else if (descLength > 50) clarity += 0.10; + + // Having goal context improves clarity + if (context.goalContext) clarity += 0.20; + + // Action verbs in description indicate clearer intent + const actionVerbs = [ + 'click', 'type', 'navigate', 'search', 'open', 'close', + 'submit', 'select', 'enter', 'fill', 'download', 'upload', + ]; + const hasActionVerb = actionVerbs.some((verb) => + context.stepDescription.toLowerCase().includes(verb), + ); + if (hasActionVerb) clarity += 0.10; + + return Math.min(clarity, 1); + } + + /** + * Assess if there's sufficient context + */ + private assessContextSufficiency(context: TaskAssessmentContext): number { + let sufficiency = 0.3; // Base + + // Goal context provides crucial information + if (context.goalContext) { + const contextLength = context.goalContext.length; + if (contextLength > 200) sufficiency += 0.30; + else if (contextLength > 100) sufficiency += 0.20; + else sufficiency += 0.10; + } + + // Previous steps provide continuity context + if (context.previousStepsCount > 0) { + const stepsBonus = Math.min(context.previousStepsCount * 0.05, 0.20); + sufficiency += stepsBonus; + + // High success rate in previous steps indicates good context + if (context.previousStepsSuccessRate > 0.8) { + sufficiency += 0.15; + } + } + + return Math.min(sufficiency, 1); + } + + /** + * Assess domain familiarity (browser automation, APIs, etc.) + */ + private assessDomainFamiliarity(context: TaskAssessmentContext): number { + // Desktop/browser automation is well-understood + if (context.hasDesktopRequirement) { + return 0.75; + } + + // Default for general tasks + return 0.60; + } + + /** + * Assess risk level based on impact + */ + private assessRiskLevel(context: TaskAssessmentContext): number { + const impactRisk: Record = { + low: 0.20, + medium: 0.50, + high: 0.75, + critical: 0.95, + }; + + let risk = impactRisk[context.estimatedImpact] || 0.50; + + // Retry attempts indicate potential issues + if (context.retryCount > 0) { + risk += Math.min(context.retryCount * 0.10, 0.20); + } + + return Math.min(risk, 1); + } + + /** + * Calculate composite score from dimensions + */ + private calculateCompositeScore(dimensions: ConfidenceDimensions): number { + let score = 0; + + for (const [key, weight] of Object.entries(this.dimensionWeights)) { + const dimensionValue = dimensions[key as keyof ConfidenceDimensions]; + score += dimensionValue * weight; + } + + // Invert risk (high risk = lower confidence) + // The riskLevel dimension is already weighted, but we apply an additional penalty + const riskPenalty = dimensions.riskLevel * 0.15; + score = Math.max(0, score - riskPenalty); + + return Math.min(Math.max(score, 0), 1); + } + + /** + * Get effective thresholds with risk adjustments + */ + private getEffectiveThresholds(context: TaskAssessmentContext): { + autoApprove: number; + review: number; + escalate: number; + } { + const { autoApproveThreshold, reviewThreshold, escalateThreshold, impactAdjustments } = + this.thresholdConfig; + + // Apply impact-based adjustment + const adjustment = impactAdjustments[context.estimatedImpact] || 0; + + // Apply retry penalty (stricter thresholds after failures) + const retryPenalty = Math.min(context.retryCount * 0.05, 0.15); + + return { + autoApprove: Math.min(autoApproveThreshold + adjustment + retryPenalty, 0.99), + review: Math.min(reviewThreshold + adjustment + retryPenalty, autoApproveThreshold), + escalate: escalateThreshold, + }; + } + + /** + * Make recommendation based on score and thresholds + */ + private makeRecommendation( + score: number, + thresholds: { autoApprove: number; review: number; escalate: number }, + context: TaskAssessmentContext, + ): 'proceed' | 'review' | 'escalate' { + // Auto-approve: high confidence + (reversible OR very high confidence) + if (score >= thresholds.autoApprove) { + if (context.isReversible || score >= 0.95) { + return 'proceed'; + } + // High confidence but irreversible - still require review + return 'review'; + } + + // Review: medium confidence + if (score >= thresholds.review) { + return 'review'; + } + + // Escalate: low confidence + return 'escalate'; + } + + /** + * Generate human-readable reasoning + */ + private generateReasoning( + score: number, + dimensions: ConfidenceDimensions, + recommendation: string, + context: TaskAssessmentContext, + ): string { + const percentage = (score * 100).toFixed(1); + + // Identify low-scoring dimensions + const lowDimensions = Object.entries(dimensions) + .filter(([key, value]) => { + // Risk is inverted (high = bad) + if (key === 'riskLevel') return value > 0.6; + return value < 0.5; + }) + .map(([key]) => key.replace(/([A-Z])/g, ' $1').toLowerCase().trim()); + + const parts: string[] = [`Confidence: ${percentage}%.`]; + + if (lowDimensions.length > 0) { + parts.push(`Concerns: ${lowDimensions.join(', ')}.`); + } + + if (context.retryCount > 0) { + parts.push(`Previous attempts: ${context.retryCount}.`); + } + + parts.push(`Recommendation: ${recommendation}.`); + + return parts.join(' '); + } + + /** + * Record assessment for metrics tracking + */ + private recordAssessment(assessment: ConfidenceAssessment): void { + this.metrics.assessments.push({ + confidence: assessment.overall, + recommendation: assessment.recommendation, + timestamp: new Date(), + }); + + // Keep only last 1000 assessments + if (this.metrics.assessments.length > 1000) { + this.metrics.assessments = this.metrics.assessments.slice(-1000); + } + } + + /** + * Calculate Expected Calibration Error (ECE) + * Measures how well confidence predictions match actual outcomes + */ + private calculateCalibrationError(): number { + const withOutcomes = this.metrics.assessments.filter((a) => a.outcome); + if (withOutcomes.length < 10) return 0; // Not enough data + + // Group into bins + const bins: Array<{ confidence: number; success: boolean }[]> = [ + [], [], [], [], [], [], [], [], [], [], + ]; + + for (const assessment of withOutcomes) { + const binIndex = Math.min(Math.floor(assessment.confidence * 10), 9); + bins[binIndex].push({ + confidence: assessment.confidence, + success: assessment.outcome === 'success', + }); + } + + // Calculate ECE + let totalError = 0; + let totalCount = 0; + + for (const bin of bins) { + if (bin.length === 0) continue; + + const avgConfidence = bin.reduce((s, a) => s + a.confidence, 0) / bin.length; + const actualSuccessRate = bin.filter((a) => a.success).length / bin.length; + + totalError += bin.length * Math.abs(avgConfidence - actualSuccessRate); + totalCount += bin.length; + } + + return totalCount > 0 ? totalError / totalCount : 0; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/context-summarization.service.ts b/packages/bytebot-workflow-orchestrator/src/services/context-summarization.service.ts new file mode 100644 index 000000000..1b63f958e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/context-summarization.service.ts @@ -0,0 +1,412 @@ +/** + * Context Summarization Service + * v1.0.0: Adaptive Context Management for Long-Running Agents + * + * Implements industry-standard context window management patterns: + * - LangChain: ConversationSummaryBufferMemory pattern + * - Anthropic: Hierarchical context with recent detailed, older summarized + * - OpenAI: Token counting and smart truncation + * + * Key Features: + * 1. Token-aware context management (stays within model limits) + * 2. Hierarchical summarization (recent=detailed, older=summarized) + * 3. Key fact preservation (never loses critical information) + * 4. LLM-driven summarization for coherent compression + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; + +// Context window configuration by model +const MODEL_CONTEXT_LIMITS: Record = { + 'claude-3-5-sonnet-20241022': 200000, + 'claude-3-opus-20240229': 200000, + 'claude-3-haiku-20240307': 200000, + 'gpt-4-turbo': 128000, + 'gpt-4o': 128000, + 'gpt-4': 8192, + 'default': 100000, +}; + +// Rough token estimation (4 chars per token average) +const CHARS_PER_TOKEN = 4; + +// Context item types +export interface ContextItem { + id: string; + type: 'step_result' | 'observation' | 'action' | 'thought' | 'summary'; + timestamp: Date; + content: string; + metadata?: { + stepNumber?: number; + importance?: 'critical' | 'high' | 'medium' | 'low'; + keyFacts?: string[]; + }; +} + +// Summarization result +export interface SummarizationResult { + originalTokens: number; + compressedTokens: number; + compressionRatio: number; + summaryItems: ContextItem[]; + preservedKeyFacts: string[]; +} + +// Context window status +export interface ContextWindowStatus { + currentTokens: number; + maxTokens: number; + utilizationPercent: number; + needsSummarization: boolean; + itemCount: number; + oldestItemAge: number; // in minutes +} + +@Injectable() +export class ContextSummarizationService { + private readonly logger = new Logger(ContextSummarizationService.name); + private readonly enabled: boolean; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + private readonly llmModel: string; + + // Configuration thresholds + private readonly summarizationThreshold: number; // % of context before summarizing + private readonly recentItemsToPreserve: number; // Items to keep detailed + private readonly maxSummaryLength: number; // Max tokens for summary + private readonly keyFactsLimit: number; // Max key facts to preserve + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.enabled = this.configService.get('CONTEXT_SUMMARIZATION_ENABLED', 'true') === 'true'; + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + + // Thresholds + this.summarizationThreshold = parseInt( + this.configService.get('SUMMARIZATION_THRESHOLD_PERCENT', '70'), + 10, + ); + this.recentItemsToPreserve = parseInt( + this.configService.get('RECENT_ITEMS_TO_PRESERVE', '5'), + 10, + ); + this.maxSummaryLength = parseInt( + this.configService.get('MAX_SUMMARY_TOKENS', '2000'), + 10, + ); + this.keyFactsLimit = parseInt( + this.configService.get('KEY_FACTS_LIMIT', '20'), + 10, + ); + + this.logger.log( + `Context summarization ${this.enabled ? 'enabled' : 'disabled'} ` + + `(threshold: ${this.summarizationThreshold}%, preserve: ${this.recentItemsToPreserve} items)`, + ); + } + + /** + * Check if context needs summarization + */ + needsSummarization(items: ContextItem[], modelId?: string): boolean { + if (!this.enabled) return false; + + const status = this.getContextStatus(items, modelId); + return status.needsSummarization; + } + + /** + * Get current context window status + */ + getContextStatus(items: ContextItem[], modelId?: string): ContextWindowStatus { + const model = modelId || this.llmModel; + const maxTokens = MODEL_CONTEXT_LIMITS[model] || MODEL_CONTEXT_LIMITS['default']; + const currentTokens = this.estimateTokens(items); + const utilizationPercent = (currentTokens / maxTokens) * 100; + + const oldestItem = items.length > 0 + ? items.reduce((oldest, item) => + item.timestamp < oldest.timestamp ? item : oldest + ) + : null; + + const oldestItemAge = oldestItem + ? (Date.now() - oldestItem.timestamp.getTime()) / 60000 + : 0; + + return { + currentTokens, + maxTokens, + utilizationPercent, + needsSummarization: utilizationPercent >= this.summarizationThreshold, + itemCount: items.length, + oldestItemAge, + }; + } + + /** + * Summarize context items using hierarchical approach + * + * Strategy: + * 1. Preserve N most recent items in full detail + * 2. Extract key facts from older items + * 3. Create LLM-generated summary of older items + * 4. Return combined context that fits within limits + */ + async summarizeContext( + items: ContextItem[], + goalDescription: string, + modelId?: string, + ): Promise { + const originalTokens = this.estimateTokens(items); + + if (items.length <= this.recentItemsToPreserve) { + // Nothing to summarize + return { + originalTokens, + compressedTokens: originalTokens, + compressionRatio: 1, + summaryItems: items, + preservedKeyFacts: [], + }; + } + + this.logger.log( + `Summarizing context: ${items.length} items, ${originalTokens} tokens`, + ); + + // Split items: recent (preserve) vs older (summarize) + const sortedItems = [...items].sort( + (a, b) => b.timestamp.getTime() - a.timestamp.getTime(), + ); + const recentItems = sortedItems.slice(0, this.recentItemsToPreserve); + const olderItems = sortedItems.slice(this.recentItemsToPreserve); + + // Extract key facts from all items (especially older ones) + const allKeyFacts = this.extractKeyFacts(olderItems); + + // Generate LLM summary of older items + const summary = await this.generateSummary(olderItems, goalDescription); + + // Create summary item + const summaryItem: ContextItem = { + id: `summary-${Date.now()}`, + type: 'summary', + timestamp: new Date(), + content: summary, + metadata: { + importance: 'high', + keyFacts: allKeyFacts.slice(0, this.keyFactsLimit), + }, + }; + + // Combine: summary first, then recent items + const summaryItems = [summaryItem, ...recentItems.reverse()]; + const compressedTokens = this.estimateTokens(summaryItems); + + const result: SummarizationResult = { + originalTokens, + compressedTokens, + compressionRatio: compressedTokens / originalTokens, + summaryItems, + preservedKeyFacts: allKeyFacts.slice(0, this.keyFactsLimit), + }; + + // Emit event for monitoring + this.eventEmitter.emit('context.summarized', { + originalTokens, + compressedTokens, + compressionRatio: result.compressionRatio, + itemsSummarized: olderItems.length, + itemsPreserved: recentItems.length, + }); + + this.logger.log( + `Context summarized: ${originalTokens} -> ${compressedTokens} tokens ` + + `(${Math.round(result.compressionRatio * 100)}% of original)`, + ); + + return result; + } + + /** + * Estimate token count for items + */ + estimateTokens(items: ContextItem[]): number { + let totalChars = 0; + for (const item of items) { + totalChars += item.content.length; + if (item.metadata?.keyFacts) { + totalChars += item.metadata.keyFacts.join(' ').length; + } + } + return Math.ceil(totalChars / CHARS_PER_TOKEN); + } + + /** + * Estimate tokens for a string + */ + estimateStringTokens(text: string): number { + return Math.ceil(text.length / CHARS_PER_TOKEN); + } + + /** + * Extract key facts from context items + */ + private extractKeyFacts(items: ContextItem[]): string[] { + const facts: string[] = []; + + for (const item of items) { + // Get explicit key facts from metadata + if (item.metadata?.keyFacts) { + facts.push(...item.metadata.keyFacts); + } + + // Extract facts from step results + if (item.type === 'step_result' && item.content) { + // Look for common patterns that indicate key information + const patterns = [ + /found:?\s*(.+?)(?:\.|$)/gi, + /result:?\s*(.+?)(?:\.|$)/gi, + /confirmed:?\s*(.+?)(?:\.|$)/gi, + /selected:?\s*(.+?)(?:\.|$)/gi, + /price:?\s*\$?[\d,]+/gi, + /\$[\d,]+(?:\.\d{2})?/g, + ]; + + for (const pattern of patterns) { + const matches = item.content.match(pattern); + if (matches) { + facts.push(...matches.slice(0, 3)); // Limit per pattern + } + } + } + } + + // Deduplicate and limit + const uniqueFacts = [...new Set(facts)]; + return uniqueFacts.slice(0, this.keyFactsLimit); + } + + /** + * Generate LLM summary of older items + */ + private async generateSummary( + items: ContextItem[], + goalDescription: string, + ): Promise { + if (!this.llmApiKey || items.length === 0) { + // Fallback to simple concatenation + return this.generateFallbackSummary(items); + } + + const itemsText = items.map(item => + `[${item.type.toUpperCase()}] ${item.content}` + ).join('\n\n'); + + const prompt = `You are summarizing the history of an AI agent working on a goal. + +GOAL: ${goalDescription} + +HISTORY TO SUMMARIZE: +${itemsText} + +Create a concise summary that: +1. Captures the key actions taken +2. Preserves important facts, numbers, and decisions +3. Notes any errors or issues encountered +4. Maintains context needed for continuing the task + +Keep the summary under ${this.maxSummaryLength * CHARS_PER_TOKEN} characters. +Focus on WHAT was accomplished and WHAT was learned, not HOW. + +Summary:`; + + try { + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: 'claude-3-haiku-20240307', // Use fast model for summarization + max_tokens: this.maxSummaryLength, + messages: [{ role: 'user', content: prompt }], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status}`); + } + + const data = await response.json(); + return data.content?.[0]?.text || this.generateFallbackSummary(items); + } catch (error) { + this.logger.warn(`LLM summarization failed: ${(error as Error).message}`); + return this.generateFallbackSummary(items); + } + } + + /** + * Generate fallback summary without LLM + */ + private generateFallbackSummary(items: ContextItem[]): string { + const stepResults = items.filter(i => i.type === 'step_result'); + const actions = items.filter(i => i.type === 'action'); + + const lines: string[] = [ + `[SUMMARY] ${items.length} earlier actions were summarized:`, + ]; + + // Add step results briefly + if (stepResults.length > 0) { + lines.push(`- Completed ${stepResults.length} steps`); + for (const result of stepResults.slice(0, 3)) { + const brief = result.content.substring(0, 100); + lines.push(` * ${brief}${result.content.length > 100 ? '...' : ''}`); + } + if (stepResults.length > 3) { + lines.push(` * ... and ${stepResults.length - 3} more steps`); + } + } + + // Add actions briefly + if (actions.length > 0) { + lines.push(`- Performed ${actions.length} actions`); + } + + return lines.join('\n'); + } + + /** + * Convert step results to context items + */ + convertStepsToContextItems(steps: Array<{ + order: number; + description: string; + status: string; + actualOutcome?: string | null; + completedAt?: Date | null; + }>): ContextItem[] { + return steps.map(step => ({ + id: `step-${step.order}`, + type: 'step_result' as const, + timestamp: step.completedAt || new Date(), + content: step.actualOutcome || `Step ${step.order}: ${step.description} [${step.status}]`, + metadata: { + stepNumber: step.order, + importance: step.status === 'COMPLETED' ? 'high' : 'medium', + }, + })); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/cross-goal-learning.service.ts b/packages/bytebot-workflow-orchestrator/src/services/cross-goal-learning.service.ts new file mode 100644 index 000000000..c0fd6888c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/cross-goal-learning.service.ts @@ -0,0 +1,630 @@ +/** + * Cross-Goal Learning Service + * v1.0.0: Knowledge Transfer Between Related Goals + * + * Implements industry-standard patterns for learning across tasks: + * - AutoGPT: Memory persistence and retrieval across sessions + * - MemGPT: Hierarchical memory with LLM-managed recall + * - LangChain: Experience replay and chain-of-thought caching + * + * Key Features: + * 1. Goal similarity detection (semantic and structural) + * 2. Success pattern transfer from past goals + * 3. Failure avoidance from learned mistakes + * 4. Reusable step/plan suggestions + * 5. Context injection from related past executions + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS_V2.md + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { KnowledgeExtractionService, ExtractedFact } from './knowledge-extraction.service'; +import { EntityResolutionService, ResolvedEntity } from './entity-resolution.service'; + +// Goal Similarity Result +export interface GoalSimilarity { + goalRunId: string; + goalDescription: string; + similarityScore: number; // 0-1 + matchType: 'semantic' | 'structural' | 'entity' | 'outcome'; + relevantFacts: ExtractedFact[]; + relevantEntities: ResolvedEntity[]; + status: string; + outcome?: string; +} + +// Learned Experience +export interface LearnedExperience { + id: string; + goalRunId: string; + goalDescription: string; + experienceType: 'success_pattern' | 'failure_lesson' | 'optimization' | 'shortcut'; + description: string; + applicableWhen: string[]; // Conditions when this applies + steps?: Array<{ + order: number; + description: string; + outcome: string; + }>; + impact: 'high' | 'medium' | 'low'; + usageCount: number; + lastUsedAt?: Date; + createdAt: Date; +} + +// Context Suggestion for new goals +export interface ContextSuggestion { + source: 'past_goal' | 'learned_experience' | 'entity_knowledge'; + content: string; + relevance: number; // 0-1 + sourceGoalId?: string; + experienceId?: string; +} + +// Learning Summary +export interface LearningSummary { + totalExperiences: number; + successPatterns: number; + failureLessons: number; + optimizations: number; + averageReuse: number; + topExperiences: LearnedExperience[]; +} + +@Injectable() +export class CrossGoalLearningService implements OnModuleInit { + private readonly logger = new Logger(CrossGoalLearningService.name); + private readonly enabled: boolean; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + + // Learned experiences storage + private experiences: Map = new Map(); + + // Goal embeddings cache (for similarity) + private goalEmbeddings: Map = new Map(); + + // Configuration + private readonly minSimilarityThreshold: number; + private readonly maxSimilarGoals: number; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly prisma: PrismaService, + private readonly knowledgeService: KnowledgeExtractionService, + private readonly entityService: EntityResolutionService, + ) { + this.enabled = this.configService.get('CROSS_GOAL_LEARNING_ENABLED', 'true') === 'true'; + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.minSimilarityThreshold = parseFloat(this.configService.get('GOAL_SIMILARITY_THRESHOLD', '0.6')); + this.maxSimilarGoals = parseInt(this.configService.get('MAX_SIMILAR_GOALS', '5'), 10); + + this.logger.log(`Cross-goal learning ${this.enabled ? 'enabled' : 'disabled'}`); + } + + async onModuleInit(): Promise { + if (!this.enabled) return; + + // Load experiences from database + await this.loadExperiences(); + + // Build goal embeddings index + await this.buildGoalIndex(); + + this.logger.log(`Cross-goal learning initialized with ${this.experiences.size} experiences`); + } + + /** + * Find similar past goals for a new goal + */ + async findSimilarGoals( + goalDescription: string, + tenantId?: string, + ): Promise { + if (!this.enabled) return []; + + const queryTokens = this.tokenize(goalDescription); + const similarities: GoalSimilarity[] = []; + + // Search through indexed goals + for (const [goalRunId, embedding] of this.goalEmbeddings) { + const similarity = this.calculateTextSimilarity(queryTokens, embedding.tokens); + + if (similarity >= this.minSimilarityThreshold) { + // Get goal details + try { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { + id: true, + goal: true, + status: true, + error: true, + tenantId: true, + }, + }); + + if (!goalRun) continue; + + // Filter by tenant if specified + if (tenantId && goalRun.tenantId !== tenantId) continue; + + // Get relevant knowledge + const knowledge = this.knowledgeService.getKnowledge(goalRunId); + const entities = this.entityService.getResolvedEntities(goalRunId); + + similarities.push({ + goalRunId: goalRun.id, + goalDescription: goalRun.goal, + similarityScore: similarity, + matchType: 'semantic', + relevantFacts: knowledge?.facts.slice(0, 5) || [], + relevantEntities: entities.slice(0, 5), + status: goalRun.status, + outcome: goalRun.error || undefined, + }); + } catch (error) { + // Skip goals that can't be loaded + } + } + } + + // Sort by similarity and limit + return similarities + .sort((a, b) => b.similarityScore - a.similarityScore) + .slice(0, this.maxSimilarGoals); + } + + /** + * Get context suggestions for a new goal based on past executions + */ + async getSuggestions( + goalDescription: string, + tenantId?: string, + ): Promise { + const suggestions: ContextSuggestion[] = []; + + // Find similar goals + const similarGoals = await this.findSimilarGoals(goalDescription, tenantId); + + // Add suggestions from successful similar goals + for (const similar of similarGoals) { + if (similar.status === 'COMPLETED') { + suggestions.push({ + source: 'past_goal', + content: `Similar goal "${similar.goalDescription.substring(0, 50)}..." completed successfully`, + relevance: similar.similarityScore, + sourceGoalId: similar.goalRunId, + }); + + // Add key facts from similar goal + for (const fact of similar.relevantFacts.slice(0, 2)) { + suggestions.push({ + source: 'entity_knowledge', + content: fact.content, + relevance: similar.similarityScore * fact.confidence, + sourceGoalId: similar.goalRunId, + }); + } + } + } + + // Add suggestions from learned experiences + const relevantExperiences = this.findRelevantExperiences(goalDescription); + for (const exp of relevantExperiences) { + suggestions.push({ + source: 'learned_experience', + content: exp.description, + relevance: 0.8, + experienceId: exp.id, + }); + } + + // Sort by relevance and deduplicate + return suggestions + .sort((a, b) => b.relevance - a.relevance) + .slice(0, 10); + } + + /** + * Format suggestions as context for LLM + */ + async formatSuggestionsForLLM( + goalDescription: string, + tenantId?: string, + maxTokens: number = 1000, + ): Promise { + const suggestions = await this.getSuggestions(goalDescription, tenantId); + + if (suggestions.length === 0) { + return ''; + } + + const lines: string[] = ['=== LEARNED FROM PAST GOALS ===']; + + // Group by source + const pastGoals = suggestions.filter(s => s.source === 'past_goal'); + const experiences = suggestions.filter(s => s.source === 'learned_experience'); + const knowledge = suggestions.filter(s => s.source === 'entity_knowledge'); + + if (pastGoals.length > 0) { + lines.push('\nSimilar Past Goals:'); + for (const pg of pastGoals.slice(0, 3)) { + lines.push(`- ${pg.content} (${Math.round(pg.relevance * 100)}% similar)`); + } + } + + if (experiences.length > 0) { + lines.push('\nLearned Experiences:'); + for (const exp of experiences.slice(0, 3)) { + lines.push(`- ${exp.content}`); + } + } + + if (knowledge.length > 0) { + lines.push('\nRelevant Knowledge:'); + for (const k of knowledge.slice(0, 3)) { + lines.push(`- ${k.content}`); + } + } + + lines.push('\n=== END LEARNED ==='); + + // Truncate if too long + let result = lines.join('\n'); + const maxChars = maxTokens * 4; + if (result.length > maxChars) { + result = result.substring(0, maxChars - 50) + '\n... (truncated)'; + } + + return result; + } + + /** + * Learn from a completed goal + */ + @OnEvent('goal.completed') + async handleGoalCompleted(payload: { goalRunId: string }): Promise { + if (!this.enabled) return; + + try { + await this.learnFromGoal(payload.goalRunId, 'success'); + } catch (error) { + this.logger.warn(`Failed to learn from completed goal: ${(error as Error).message}`); + } + } + + /** + * Learn from a failed goal + */ + @OnEvent('goal.failed') + async handleGoalFailed(payload: { goalRunId: string; reason?: string }): Promise { + if (!this.enabled) return; + + try { + await this.learnFromGoal(payload.goalRunId, 'failure'); + } catch (error) { + this.logger.warn(`Failed to learn from failed goal: ${(error as Error).message}`); + } + } + + /** + * Record usage of an experience + */ + recordExperienceUsage(experienceId: string): void { + const exp = this.experiences.get(experienceId); + if (exp) { + exp.usageCount++; + exp.lastUsedAt = new Date(); + } + } + + /** + * Get learning summary + */ + getLearningSummary(): LearningSummary { + const experiences = Array.from(this.experiences.values()); + + const successPatterns = experiences.filter(e => e.experienceType === 'success_pattern').length; + const failureLessons = experiences.filter(e => e.experienceType === 'failure_lesson').length; + const optimizations = experiences.filter(e => e.experienceType === 'optimization').length; + + const totalUsage = experiences.reduce((sum, e) => sum + e.usageCount, 0); + const averageReuse = experiences.length > 0 ? totalUsage / experiences.length : 0; + + const topExperiences = experiences + .sort((a, b) => b.usageCount - a.usageCount) + .slice(0, 5); + + return { + totalExperiences: experiences.length, + successPatterns, + failureLessons, + optimizations, + averageReuse, + topExperiences, + }; + } + + /** + * Get all experiences + */ + getAllExperiences(): LearnedExperience[] { + return Array.from(this.experiences.values()); + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + private async loadExperiences(): Promise { + try { + // Load from completed goals with good outcomes + const recentSuccesses = await this.prisma.goalRun.findMany({ + where: { + status: 'COMPLETED', + completedAt: { gte: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000) }, // Last 30 days + }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + where: { status: 'COMPLETED' }, + orderBy: { order: 'asc' }, + }, + }, + }, + }, + take: 100, + }); + + for (const goal of recentSuccesses) { + const plan = goal.planVersions[0]; + if (!plan || plan.checklistItems.length < 2) continue; + + const experience: LearnedExperience = { + id: `exp-${goal.id}`, + goalRunId: goal.id, + goalDescription: goal.goal, + experienceType: 'success_pattern', + description: `Successfully completed: ${goal.goal.substring(0, 100)}`, + applicableWhen: this.extractApplicableConditions(goal.goal), + steps: plan.checklistItems.map(item => ({ + order: item.order, + description: item.description, + outcome: item.actualOutcome || 'Completed', + })), + impact: 'medium', + usageCount: 0, + createdAt: goal.completedAt || goal.createdAt, + }; + + this.experiences.set(experience.id, experience); + } + + // Also load failure lessons + const recentFailures = await this.prisma.goalRun.findMany({ + where: { + status: 'FAILED', + completedAt: { gte: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000) }, + }, + select: { + id: true, + goal: true, + error: true, + completedAt: true, + createdAt: true, + }, + take: 50, + }); + + for (const goal of recentFailures) { + if (!goal.error) continue; + + const experience: LearnedExperience = { + id: `exp-fail-${goal.id}`, + goalRunId: goal.id, + goalDescription: goal.goal, + experienceType: 'failure_lesson', + description: `Avoid: ${goal.error.substring(0, 100)}`, + applicableWhen: this.extractApplicableConditions(goal.goal), + impact: 'high', + usageCount: 0, + createdAt: goal.completedAt || goal.createdAt, + }; + + this.experiences.set(experience.id, experience); + } + } catch (error) { + this.logger.warn(`Failed to load experiences: ${(error as Error).message}`); + } + } + + private async buildGoalIndex(): Promise { + try { + const recentGoals = await this.prisma.goalRun.findMany({ + where: { + status: { in: ['COMPLETED', 'FAILED'] }, + createdAt: { gte: new Date(Date.now() - 60 * 24 * 60 * 60 * 1000) }, // Last 60 days + }, + select: { + id: true, + goal: true, + }, + take: 500, + }); + + for (const goal of recentGoals) { + this.goalEmbeddings.set(goal.id, { + text: goal.goal, + tokens: this.tokenize(goal.goal), + }); + } + + this.logger.debug(`Built goal index with ${this.goalEmbeddings.size} entries`); + } catch (error) { + this.logger.warn(`Failed to build goal index: ${(error as Error).message}`); + } + } + + private async learnFromGoal(goalRunId: string, outcome: 'success' | 'failure'): Promise { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) return; + + // Add to embeddings index + this.goalEmbeddings.set(goalRunId, { + text: goalRun.goal, + tokens: this.tokenize(goalRun.goal), + }); + + // Create experience + const plan = goalRun.planVersions[0]; + const experienceId = `exp-${outcome === 'success' ? '' : 'fail-'}${goalRunId}`; + + const experience: LearnedExperience = { + id: experienceId, + goalRunId, + goalDescription: goalRun.goal, + experienceType: outcome === 'success' ? 'success_pattern' : 'failure_lesson', + description: outcome === 'success' + ? `Completed: ${goalRun.goal.substring(0, 100)}` + : `Failed: ${goalRun.error?.substring(0, 100) || 'Unknown reason'}`, + applicableWhen: this.extractApplicableConditions(goalRun.goal), + steps: outcome === 'success' && plan + ? plan.checklistItems.map(item => ({ + order: item.order, + description: item.description, + outcome: item.actualOutcome || 'Completed', + })) + : undefined, + impact: outcome === 'failure' ? 'high' : 'medium', + usageCount: 0, + createdAt: new Date(), + }; + + this.experiences.set(experienceId, experience); + + this.logger.debug(`Learned ${outcome} experience from goal ${goalRunId}`); + + // Emit event + this.eventEmitter.emit('learning.experience.created', { + experienceId, + goalRunId, + type: experience.experienceType, + }); + } + + private findRelevantExperiences(goalDescription: string): LearnedExperience[] { + const queryTokens = this.tokenize(goalDescription); + const relevant: Array<{ exp: LearnedExperience; score: number }> = []; + + for (const exp of this.experiences.values()) { + // Check if any applicable conditions match + const conditionScore = exp.applicableWhen.reduce((score, condition) => { + const conditionTokens = this.tokenize(condition); + const overlap = this.calculateTextSimilarity(queryTokens, conditionTokens); + return Math.max(score, overlap); + }, 0); + + // Also check goal description similarity + const descTokens = this.tokenize(exp.goalDescription); + const descScore = this.calculateTextSimilarity(queryTokens, descTokens); + + const finalScore = Math.max(conditionScore, descScore); + + if (finalScore > 0.5) { + relevant.push({ exp, score: finalScore }); + } + } + + return relevant + .sort((a, b) => b.score - a.score) + .slice(0, 5) + .map(r => r.exp); + } + + private extractApplicableConditions(goalDescription: string): string[] { + const conditions: string[] = []; + + // Extract key phrases + const words = goalDescription.toLowerCase().split(/\s+/); + + // Look for action verbs + const actionVerbs = ['book', 'find', 'search', 'create', 'update', 'delete', 'send', 'schedule', 'order']; + for (const verb of actionVerbs) { + if (words.includes(verb)) { + conditions.push(`action:${verb}`); + } + } + + // Look for domain keywords + const domains = ['flight', 'hotel', 'car', 'email', 'meeting', 'report', 'data', 'file']; + for (const domain of domains) { + if (goalDescription.toLowerCase().includes(domain)) { + conditions.push(`domain:${domain}`); + } + } + + // Add the full goal as a condition + conditions.push(goalDescription.substring(0, 100)); + + return conditions; + } + + private tokenize(text: string): string[] { + // Simple tokenization - in production, use a proper NLP tokenizer + return text + .toLowerCase() + .replace(/[^\w\s]/g, ' ') + .split(/\s+/) + .filter(t => t.length > 2) + .filter(t => !this.isStopWord(t)); + } + + private isStopWord(word: string): boolean { + const stopWords = new Set([ + 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', + 'of', 'with', 'by', 'from', 'as', 'is', 'was', 'are', 'were', 'been', + 'be', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', + 'should', 'may', 'might', 'must', 'shall', 'can', 'need', 'this', 'that', + 'these', 'those', 'i', 'you', 'he', 'she', 'it', 'we', 'they', 'what', + 'which', 'who', 'whom', 'whose', 'where', 'when', 'why', 'how', 'all', + 'each', 'every', 'both', 'few', 'more', 'most', 'other', 'some', 'such', + ]); + return stopWords.has(word); + } + + private calculateTextSimilarity(tokens1: string[], tokens2: string[]): number { + if (tokens1.length === 0 || tokens2.length === 0) return 0; + + const set1 = new Set(tokens1); + const set2 = new Set(tokens2); + + const intersection = new Set([...set1].filter(x => set2.has(x))); + const union = new Set([...set1, ...set2]); + + // Jaccard similarity + return intersection.size / union.size; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/dashboard.service.ts b/packages/bytebot-workflow-orchestrator/src/services/dashboard.service.ts new file mode 100644 index 000000000..a1fc557fb --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/dashboard.service.ts @@ -0,0 +1,779 @@ +/** + * Dashboard Service + * v1.0.0: Real-Time Dashboard Visualization for Agent Monitoring + * + * Implements industry-standard patterns for agent observability: + * - OpenAI: Run status and streaming progress + * - Grafana: Time-series metrics with aggregation + * - Datadog: Real-time event streams with filtering + * + * Key Features: + * 1. Aggregated metrics for all active goal runs + * 2. Timeline visualization data for multi-step workflows + * 3. Health status across all agents + * 4. Real-time event streaming via WebSocket/SSE + * 5. Historical analytics with time-range queries + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS_V2.md + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { GoalCheckpointService } from './goal-checkpoint.service'; +import { CircuitBreakerService } from './circuit-breaker.service'; +import { KnowledgeExtractionService } from './knowledge-extraction.service'; +import { BackgroundModeService } from './background-mode.service'; + +// Dashboard Overview Response +export interface DashboardOverview { + timestamp: Date; + summary: { + activeGoals: number; + completedToday: number; + failedToday: number; + averageCompletionTime: number; // ms + successRate: number; // percentage + }; + recentActivity: ActivityItem[]; + agentHealth: AgentHealthSummary; + resourceUtilization: ResourceMetrics; +} + +// Activity Stream Item +export interface ActivityItem { + id: string; + timestamp: Date; + type: 'goal_started' | 'step_completed' | 'step_failed' | 'goal_completed' | 'goal_failed' | 'replan' | 'alert'; + goalRunId: string; + message: string; + severity: 'info' | 'warning' | 'error' | 'success'; + metadata?: Record; +} + +// Agent Health Summary +export interface AgentHealthSummary { + totalAgents: number; + healthy: number; + degraded: number; + unhealthy: number; + agents: Array<{ + id: string; + name: string; + status: 'healthy' | 'degraded' | 'unhealthy'; + successRate: number; + avgResponseTime: number; + activeGoals: number; + }>; +} + +// Resource Metrics +export interface ResourceMetrics { + backgroundTasks: { + queued: number; + running: number; + completed: number; + }; + checkpoints: { + active: number; + totalSize: number; + }; + knowledgeGraphs: { + totalFacts: number; + totalEntities: number; + }; +} + +// Goal Timeline for Visualization +export interface GoalTimeline { + goalRunId: string; + goalDescription: string; + status: string; + startedAt: Date; + completedAt?: Date; + duration?: number; + steps: Array<{ + order: number; + description: string; + status: 'pending' | 'in_progress' | 'completed' | 'failed' | 'skipped'; + startedAt?: Date; + completedAt?: Date; + duration?: number; + outcome?: string; + }>; + metrics: { + stepsCompleted: number; + stepsFailed: number; + stepsRemaining: number; + progressPercent: number; + estimatedTimeRemaining?: number; + }; +} + +// Historical Analytics +export interface HistoricalAnalytics { + timeRange: { + start: Date; + end: Date; + }; + totals: { + goalsStarted: number; + goalsCompleted: number; + goalsFailed: number; + stepsExecuted: number; + }; + timeSeries: Array<{ + timestamp: Date; + goalsStarted: number; + goalsCompleted: number; + goalsFailed: number; + avgCompletionTime: number; + }>; + topFailureReasons: Array<{ + reason: string; + count: number; + percentage: number; + }>; + performanceByHour: Array<{ + hour: number; + successRate: number; + avgDuration: number; + }>; +} + +@Injectable() +export class DashboardService implements OnModuleInit { + private readonly logger = new Logger(DashboardService.name); + private readonly enabled: boolean; + + // Activity stream buffer (in-memory, latest 1000 items) + private activityStream: ActivityItem[] = []; + private readonly maxActivityItems = 1000; + + // Cached metrics (updated periodically) + private cachedOverview: DashboardOverview | null = null; + private cacheExpiry: Date = new Date(0); + private readonly cacheTtlMs = 10000; // 10 seconds + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly prisma: PrismaService, + private readonly checkpointService: GoalCheckpointService, + private readonly circuitBreakerService: CircuitBreakerService, + private readonly knowledgeService: KnowledgeExtractionService, + private readonly backgroundService: BackgroundModeService, + ) { + this.enabled = this.configService.get('DASHBOARD_ENABLED', 'true') === 'true'; + this.logger.log(`Dashboard service ${this.enabled ? 'enabled' : 'disabled'}`); + } + + async onModuleInit(): Promise { + if (!this.enabled) return; + + // Pre-populate activity stream with recent events + await this.loadRecentActivity(); + this.logger.log('Dashboard service initialized'); + } + + /** + * Get dashboard overview with aggregated metrics + */ + async getOverview(): Promise { + // Return cached if valid + if (this.cachedOverview && this.cacheExpiry > new Date()) { + return this.cachedOverview; + } + + const now = new Date(); + const todayStart = new Date(now.getFullYear(), now.getMonth(), now.getDate()); + + // Get goal run statistics + const [activeGoals, completedToday, failedToday, allRecentGoals] = await Promise.all([ + this.prisma.goalRun.count({ + where: { status: 'RUNNING' }, + }), + this.prisma.goalRun.count({ + where: { + status: 'COMPLETED', + completedAt: { gte: todayStart }, + }, + }), + this.prisma.goalRun.count({ + where: { + status: 'FAILED', + completedAt: { gte: todayStart }, + }, + }), + this.prisma.goalRun.findMany({ + where: { + completedAt: { gte: todayStart }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + select: { + status: true, + startedAt: true, + completedAt: true, + }, + }), + ]); + + // Calculate average completion time + const completionTimes = allRecentGoals + .filter(g => g.startedAt && g.completedAt) + .map(g => new Date(g.completedAt!).getTime() - new Date(g.startedAt!).getTime()); + + const avgCompletionTime = completionTimes.length > 0 + ? completionTimes.reduce((a, b) => a + b, 0) / completionTimes.length + : 0; + + // Calculate success rate + const totalToday = completedToday + failedToday; + const successRate = totalToday > 0 ? (completedToday / totalToday) * 100 : 100; + + // Get agent health + const agentHealth = await this.getAgentHealthSummary(); + + // Get resource metrics + const resourceUtilization = await this.getResourceMetrics(); + + const overview: DashboardOverview = { + timestamp: now, + summary: { + activeGoals, + completedToday, + failedToday, + averageCompletionTime: Math.round(avgCompletionTime), + successRate: Math.round(successRate * 10) / 10, + }, + recentActivity: this.activityStream.slice(0, 20), + agentHealth, + resourceUtilization, + }; + + // Cache the result + this.cachedOverview = overview; + this.cacheExpiry = new Date(Date.now() + this.cacheTtlMs); + + return overview; + } + + /** + * Get detailed timeline for a specific goal + */ + async getGoalTimeline(goalRunId: string): Promise { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) { + return null; + } + + const plan = goalRun.planVersions[0]; + const items = plan?.checklistItems || []; + + const steps = items.map(item => ({ + order: item.order, + description: item.description, + status: item.status.toLowerCase() as any, + startedAt: item.startedAt || undefined, + completedAt: item.completedAt || undefined, + duration: item.startedAt && item.completedAt + ? new Date(item.completedAt).getTime() - new Date(item.startedAt).getTime() + : undefined, + outcome: item.actualOutcome || undefined, + })); + + const completed = items.filter(i => i.status === 'COMPLETED').length; + const failed = items.filter(i => i.status === 'FAILED').length; + const remaining = items.filter(i => ['PENDING', 'IN_PROGRESS'].includes(i.status)).length; + + // Estimate remaining time based on average step duration + const completedDurations = steps + .filter(s => s.duration) + .map(s => s.duration!); + const avgStepDuration = completedDurations.length > 0 + ? completedDurations.reduce((a, b) => a + b, 0) / completedDurations.length + : 0; + + return { + goalRunId: goalRun.id, + goalDescription: goalRun.goal, + status: goalRun.status, + startedAt: goalRun.startedAt || goalRun.createdAt, + completedAt: goalRun.completedAt || undefined, + duration: goalRun.startedAt && goalRun.completedAt + ? new Date(goalRun.completedAt).getTime() - new Date(goalRun.startedAt).getTime() + : undefined, + steps, + metrics: { + stepsCompleted: completed, + stepsFailed: failed, + stepsRemaining: remaining, + progressPercent: items.length > 0 + ? Math.round((completed / items.length) * 100) + : 0, + estimatedTimeRemaining: remaining > 0 && avgStepDuration > 0 + ? Math.round(remaining * avgStepDuration) + : undefined, + }, + }; + } + + /** + * Get all active goal timelines + */ + async getActiveGoalTimelines(): Promise { + const activeGoals = await this.prisma.goalRun.findMany({ + where: { status: 'RUNNING' }, + select: { id: true }, + take: 50, + }); + + const timelines = await Promise.all( + activeGoals.map(g => this.getGoalTimeline(g.id)) + ); + + return timelines.filter((t): t is GoalTimeline => t !== null); + } + + /** + * Get historical analytics for a time range + */ + async getHistoricalAnalytics( + startDate: Date, + endDate: Date, + interval: 'hour' | 'day' = 'hour', + ): Promise { + // Get all goal runs in range + const goalRuns = await this.prisma.goalRun.findMany({ + where: { + createdAt: { gte: startDate, lte: endDate }, + }, + select: { + id: true, + status: true, + createdAt: true, + startedAt: true, + completedAt: true, + error: true, + }, + orderBy: { createdAt: 'asc' }, + }); + + // Calculate totals + const goalsStarted = goalRuns.length; + const goalsCompleted = goalRuns.filter(g => g.status === 'COMPLETED').length; + const goalsFailed = goalRuns.filter(g => g.status === 'FAILED').length; + + // Get step count + const stepsExecuted = await this.prisma.checklistItem.count({ + where: { + planVersion: { + goalRun: { + createdAt: { gte: startDate, lte: endDate }, + }, + }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + }); + + // Build time series + const timeSeries = this.buildTimeSeries(goalRuns, startDate, endDate, interval); + + // Analyze failure reasons + const failedGoals = goalRuns.filter(g => g.status === 'FAILED'); + const failureReasons = this.analyzeFailureReasons(failedGoals); + + // Performance by hour + const performanceByHour = this.calculatePerformanceByHour(goalRuns); + + return { + timeRange: { start: startDate, end: endDate }, + totals: { + goalsStarted, + goalsCompleted, + goalsFailed, + stepsExecuted, + }, + timeSeries, + topFailureReasons: failureReasons, + performanceByHour, + }; + } + + /** + * Get activity stream with optional filtering + */ + getActivityStream(options: { + limit?: number; + types?: ActivityItem['type'][]; + severity?: ActivityItem['severity'][]; + goalRunId?: string; + } = {}): ActivityItem[] { + let filtered = [...this.activityStream]; + + if (options.types?.length) { + filtered = filtered.filter(a => options.types!.includes(a.type)); + } + + if (options.severity?.length) { + filtered = filtered.filter(a => options.severity!.includes(a.severity)); + } + + if (options.goalRunId) { + filtered = filtered.filter(a => a.goalRunId === options.goalRunId); + } + + return filtered.slice(0, options.limit || 100); + } + + /** + * Event handlers to populate activity stream + */ + @OnEvent('goal.started') + handleGoalStarted(payload: { goalRunId: string; goal: string }) { + this.addActivity({ + type: 'goal_started', + goalRunId: payload.goalRunId, + message: `Goal started: ${payload.goal.substring(0, 100)}`, + severity: 'info', + }); + } + + @OnEvent('activity.STEP_COMPLETED') + handleStepCompleted(payload: { goalRunId: string; checklistItemId: string; outcome?: string }) { + this.addActivity({ + type: 'step_completed', + goalRunId: payload.goalRunId, + message: `Step completed${payload.outcome ? `: ${payload.outcome.substring(0, 50)}` : ''}`, + severity: 'success', + }); + } + + @OnEvent('activity.STEP_FAILED') + handleStepFailed(payload: { goalRunId: string; checklistItemId: string; error?: string }) { + this.addActivity({ + type: 'step_failed', + goalRunId: payload.goalRunId, + message: `Step failed${payload.error ? `: ${payload.error.substring(0, 50)}` : ''}`, + severity: 'error', + }); + } + + @OnEvent('goal.completed') + handleGoalCompleted(payload: { goalRunId: string }) { + this.addActivity({ + type: 'goal_completed', + goalRunId: payload.goalRunId, + message: 'Goal completed successfully', + severity: 'success', + }); + } + + @OnEvent('goal.failed') + handleGoalFailed(payload: { goalRunId: string; reason?: string }) { + this.addActivity({ + type: 'goal_failed', + goalRunId: payload.goalRunId, + message: `Goal failed${payload.reason ? `: ${payload.reason.substring(0, 50)}` : ''}`, + severity: 'error', + }); + } + + @OnEvent('plan.replanned') + handleReplan(payload: { goalRunId: string; reason?: string }) { + this.addActivity({ + type: 'replan', + goalRunId: payload.goalRunId, + message: `Plan updated${payload.reason ? `: ${payload.reason.substring(0, 50)}` : ''}`, + severity: 'warning', + }); + } + + @OnEvent('circuit-breaker.state-change') + handleCircuitBreakerChange(payload: { serviceName: string; state: string }) { + if (payload.state === 'OPEN') { + this.addActivity({ + type: 'alert', + goalRunId: '', + message: `Circuit breaker OPEN for ${payload.serviceName}`, + severity: 'error', + metadata: { serviceName: payload.serviceName }, + }); + } + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + private addActivity(activity: Omit): void { + const item: ActivityItem = { + id: `act-${Date.now()}-${Math.random().toString(36).substring(2, 8)}`, + timestamp: new Date(), + ...activity, + }; + + this.activityStream.unshift(item); + + // Trim to max size + if (this.activityStream.length > this.maxActivityItems) { + this.activityStream = this.activityStream.slice(0, this.maxActivityItems); + } + + // Emit for real-time streaming + this.eventEmitter.emit('dashboard.activity', item); + } + + private async loadRecentActivity(): Promise { + try { + const recentGoals = await this.prisma.goalRun.findMany({ + where: { + createdAt: { gte: new Date(Date.now() - 24 * 60 * 60 * 1000) }, + }, + orderBy: { createdAt: 'desc' }, + take: 50, + select: { + id: true, + goal: true, + status: true, + createdAt: true, + completedAt: true, + }, + }); + + for (const goal of recentGoals.reverse()) { + this.addActivity({ + type: 'goal_started', + goalRunId: goal.id, + message: `Goal: ${goal.goal.substring(0, 100)}`, + severity: 'info', + }); + + if (goal.status === 'COMPLETED') { + this.addActivity({ + type: 'goal_completed', + goalRunId: goal.id, + message: 'Goal completed', + severity: 'success', + }); + } else if (goal.status === 'FAILED') { + this.addActivity({ + type: 'goal_failed', + goalRunId: goal.id, + message: 'Goal failed', + severity: 'error', + }); + } + } + } catch (error) { + this.logger.warn(`Failed to load recent activity: ${(error as Error).message}`); + } + } + + private async getAgentHealthSummary(): Promise { + const healthOverview = this.circuitBreakerService.getAllAgentHealth(); + + const agents = healthOverview.agents.map(a => { + const total = a.successCount + a.failureCount; + const successRate = total > 0 ? (a.successCount / total) * 100 : 100; + + let status: 'healthy' | 'degraded' | 'unhealthy' = 'healthy'; + if (a.consecutiveFailures >= 5) { + status = 'unhealthy'; + } else if (a.consecutiveFailures > 0 || successRate < 90) { + status = 'degraded'; + } + + return { + id: a.agentId, + name: a.agentId, + status, + successRate: Math.round(successRate), + avgResponseTime: a.responseTimeMs.length > 0 + ? Math.round(a.responseTimeMs.reduce((a, b) => a + b, 0) / a.responseTimeMs.length) + : 0, + activeGoals: 0, // Would need additional tracking + }; + }); + + return { + totalAgents: agents.length || 1, // At least 1 (default agent) + healthy: healthOverview.summary.healthy || 1, + degraded: healthOverview.summary.degraded, + unhealthy: healthOverview.summary.unhealthy, + agents, + }; + } + + private async getResourceMetrics(): Promise { + const backgroundStats = this.backgroundService.getQueueStats(); + + // Get checkpoint count (approximate from cache) + const activeCheckpoints = await this.prisma.goalRun.count({ + where: { + status: 'RUNNING', + NOT: { constraints: { equals: {} } }, + }, + }); + + return { + backgroundTasks: { + queued: backgroundStats.queuedTasks, + running: backgroundStats.activeTasks, + completed: backgroundStats.completedTasks, + }, + checkpoints: { + active: activeCheckpoints, + totalSize: 0, // Would need actual calculation + }, + knowledgeGraphs: { + totalFacts: 0, // Would aggregate from knowledge service + totalEntities: 0, + }, + }; + } + + private buildTimeSeries( + goalRuns: any[], + startDate: Date, + endDate: Date, + interval: 'hour' | 'day', + ): HistoricalAnalytics['timeSeries'] { + const buckets = new Map(); + + // Initialize buckets + const current = new Date(startDate); + while (current <= endDate) { + const key = interval === 'hour' + ? current.toISOString().substring(0, 13) + : current.toISOString().substring(0, 10); + buckets.set(key, { started: 0, completed: 0, failed: 0, completionTimes: [] }); + + if (interval === 'hour') { + current.setHours(current.getHours() + 1); + } else { + current.setDate(current.getDate() + 1); + } + } + + // Populate buckets + for (const goal of goalRuns) { + const createdKey = interval === 'hour' + ? goal.createdAt.toISOString().substring(0, 13) + : goal.createdAt.toISOString().substring(0, 10); + + if (buckets.has(createdKey)) { + buckets.get(createdKey)!.started++; + } + + if (goal.completedAt) { + const completedKey = interval === 'hour' + ? goal.completedAt.toISOString().substring(0, 13) + : goal.completedAt.toISOString().substring(0, 10); + + if (buckets.has(completedKey)) { + if (goal.status === 'COMPLETED') { + buckets.get(completedKey)!.completed++; + } else if (goal.status === 'FAILED') { + buckets.get(completedKey)!.failed++; + } + + if (goal.startedAt) { + const duration = goal.completedAt.getTime() - goal.startedAt.getTime(); + buckets.get(completedKey)!.completionTimes.push(duration); + } + } + } + } + + // Convert to array + return Array.from(buckets.entries()).map(([key, data]) => ({ + timestamp: new Date(key), + goalsStarted: data.started, + goalsCompleted: data.completed, + goalsFailed: data.failed, + avgCompletionTime: data.completionTimes.length > 0 + ? Math.round(data.completionTimes.reduce((a, b) => a + b, 0) / data.completionTimes.length) + : 0, + })); + } + + private analyzeFailureReasons(failedGoals: any[]): HistoricalAnalytics['topFailureReasons'] { + const reasons = new Map(); + + for (const goal of failedGoals) { + const reason = goal.outcome || 'Unknown error'; + // Normalize reason (take first 50 chars) + const normalizedReason = reason.substring(0, 50); + reasons.set(normalizedReason, (reasons.get(normalizedReason) || 0) + 1); + } + + const total = failedGoals.length || 1; + + return Array.from(reasons.entries()) + .map(([reason, count]) => ({ + reason, + count, + percentage: Math.round((count / total) * 100), + })) + .sort((a, b) => b.count - a.count) + .slice(0, 10); + } + + private calculatePerformanceByHour(goalRuns: any[]): HistoricalAnalytics['performanceByHour'] { + const hourlyData = new Map(); + + // Initialize all hours + for (let h = 0; h < 24; h++) { + hourlyData.set(h, { completed: 0, failed: 0, durations: [] }); + } + + for (const goal of goalRuns) { + if (!goal.completedAt) continue; + + const hour = goal.completedAt.getHours(); + const data = hourlyData.get(hour)!; + + if (goal.status === 'COMPLETED') { + data.completed++; + } else if (goal.status === 'FAILED') { + data.failed++; + } + + if (goal.startedAt) { + data.durations.push(goal.completedAt.getTime() - goal.startedAt.getTime()); + } + } + + return Array.from(hourlyData.entries()).map(([hour, data]) => { + const total = data.completed + data.failed; + return { + hour, + successRate: total > 0 ? Math.round((data.completed / total) * 100) : 100, + avgDuration: data.durations.length > 0 + ? Math.round(data.durations.reduce((a, b) => a + b, 0) / data.durations.length) + : 0, + }; + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/db-transient.service.ts b/packages/bytebot-workflow-orchestrator/src/services/db-transient.service.ts new file mode 100644 index 000000000..309925e37 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/db-transient.service.ts @@ -0,0 +1,360 @@ +/** + * Database Transient Error Handling Service + * v1.0.0: Resilience layer for transient database errors + * + * Purpose: Make planned or transient DB restarts boring and non-fatal. + * + * Key Features: + * - Classify transient vs non-transient database errors + * - Exponential backoff gate (5s → 60s max) + * - Throttled logging (once per backoff window) + * - Throttled activity events (once per minute max) + * - Wrapper for loop ticks and pollers + * + * Error Patterns Detected: + * - FATAL: the database system is shutting down + * - FATAL: terminating connection due to administrator command + * - ECONNRESET, ETIMEDOUT, EPIPE, ECONNREFUSED, ENOTFOUND + * - Connection pool timeout (P2024) + * - PrismaClientInitializationError + * - Connection-related P1xxx errors + * + * @see https://www.prisma.io/docs/orm/reference/error-reference + * @see https://advancedweb.hu/how-to-implement-an-exponential-backoff-retry-strategy-in-javascript/ + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; + +// Backoff configuration +const DEFAULT_INITIAL_BACKOFF_MS = 5000; // 5 seconds +const DEFAULT_MAX_BACKOFF_MS = 60000; // 60 seconds +const ACTIVITY_EVENT_THROTTLE_MS = 60000; // 1 minute between activity events + +// Known transient error patterns +const TRANSIENT_ERROR_PATTERNS = [ + // PostgreSQL shutdown/admin commands + 'database system is shutting down', + 'terminating connection due to administrator command', + 'server closed the connection unexpectedly', + 'connection terminated', + 'the database system is starting up', + 'the database system is in recovery mode', + + // Network errors + 'ECONNRESET', + 'ETIMEDOUT', + 'EPIPE', + 'ECONNREFUSED', + 'ENOTFOUND', + 'ENETUNREACH', + 'EHOSTUNREACH', + 'socket hang up', + 'read ECONNRESET', + 'write EPIPE', + + // Connection pool issues + 'Timed out fetching a new connection from the pool', + 'Connection pool timeout', + 'pool_timeout', + 'Can\'t reach database server', + 'Unable to connect to database', + 'Connection refused', + + // Prisma-specific + 'Error querying the database', + 'Error in connector', + 'Query engine exited', + 'Prisma engine crashed', +]; + +// Prisma error codes that are transient (P1xxx = connection issues) +const TRANSIENT_PRISMA_CODES = [ + 'P1000', // Authentication failed + 'P1001', // Can't reach database server + 'P1002', // Database server timed out + 'P1003', // Database does not exist (during migration/restart) + 'P1008', // Operations timed out + 'P1009', // Database already exists (concurrent startup) + 'P1010', // User denied access + 'P1017', // Server closed connection + 'P2024', // Connection pool timeout +]; + +// Error types that indicate Prisma connection issues +const TRANSIENT_ERROR_CLASSES = [ + 'PrismaClientInitializationError', + 'PrismaClientRustPanicError', +]; + +export interface DbBackoffState { + isInBackoff: boolean; + backoffUntil: number; + currentBackoffMs: number; + consecutiveTransientErrors: number; + lastLoggedAt: number; + lastActivityEventAt: number; +} + +@Injectable() +export class DbTransientService { + private readonly logger = new Logger(DbTransientService.name); + private readonly initialBackoffMs: number; + private readonly maxBackoffMs: number; + + // Global backoff state + private backoffState: DbBackoffState = { + isInBackoff: false, + backoffUntil: 0, + currentBackoffMs: 0, + consecutiveTransientErrors: 0, + lastLoggedAt: 0, + lastActivityEventAt: 0, + }; + + constructor(private readonly configService: ConfigService) { + this.initialBackoffMs = parseInt( + this.configService.get('DB_TRANSIENT_INITIAL_BACKOFF_MS', String(DEFAULT_INITIAL_BACKOFF_MS)), + 10, + ); + this.maxBackoffMs = parseInt( + this.configService.get('DB_TRANSIENT_MAX_BACKOFF_MS', String(DEFAULT_MAX_BACKOFF_MS)), + 10, + ); + + this.logger.log( + `DbTransientService initialized (initialBackoff=${this.initialBackoffMs}ms, maxBackoff=${this.maxBackoffMs}ms)`, + ); + } + + /** + * Check if an error is a transient database error + * Transient errors should trigger backoff, not crash + */ + isDbTransient(error: any): boolean { + if (!error) return false; + + // Check error class/constructor name + const errorClassName = error?.constructor?.name || ''; + if (TRANSIENT_ERROR_CLASSES.some(cls => errorClassName.includes(cls))) { + return true; + } + + // Check Prisma error codes + const errorCode = error?.code || ''; + if (TRANSIENT_PRISMA_CODES.includes(errorCode)) { + return true; + } + + // Check error message patterns + const errorMessage = this.extractErrorMessage(error); + const lowerMessage = errorMessage.toLowerCase(); + + if (TRANSIENT_ERROR_PATTERNS.some(pattern => + lowerMessage.includes(pattern.toLowerCase()) + )) { + return true; + } + + // Check nested cause/original error + if (error.cause && this.isDbTransient(error.cause)) { + return true; + } + if (error.originalError && this.isDbTransient(error.originalError)) { + return true; + } + + return false; + } + + /** + * Extract error message from various error formats + */ + private extractErrorMessage(error: any): string { + if (typeof error === 'string') return error; + if (error?.message) return String(error.message); + if (error?.meta?.message) return String(error.meta.message); + try { + return JSON.stringify(error); + } catch { + return String(error); + } + } + + /** + * Check if we're currently in a backoff period + */ + isInBackoff(): boolean { + if (!this.backoffState.isInBackoff) return false; + + const now = Date.now(); + if (now >= this.backoffState.backoffUntil) { + // Backoff period expired - ready to try again + return false; + } + + return true; + } + + /** + * Get time remaining in backoff (ms) + */ + getBackoffRemainingMs(): number { + if (!this.isInBackoff()) return 0; + return Math.max(0, this.backoffState.backoffUntil - Date.now()); + } + + /** + * Get current backoff state for monitoring + */ + getState(): DbBackoffState { + return { ...this.backoffState }; + } + + /** + * Record a transient database error and enter backoff + * Returns true if we should log this occurrence (throttled) + */ + recordTransientError(error: any): { shouldLog: boolean; shouldEmitActivity: boolean; backoffMs: number } { + const now = Date.now(); + + // Calculate new backoff duration (exponential with cap) + if (this.backoffState.consecutiveTransientErrors === 0) { + this.backoffState.currentBackoffMs = this.initialBackoffMs; + } else { + this.backoffState.currentBackoffMs = Math.min( + this.backoffState.currentBackoffMs * 2, + this.maxBackoffMs, + ); + } + + this.backoffState.consecutiveTransientErrors++; + this.backoffState.isInBackoff = true; + this.backoffState.backoffUntil = now + this.backoffState.currentBackoffMs; + + // Determine if we should log (throttled to once per backoff window) + const shouldLog = now - this.backoffState.lastLoggedAt >= this.backoffState.currentBackoffMs; + if (shouldLog) { + this.backoffState.lastLoggedAt = now; + } + + // Determine if we should emit activity event (throttled to once per minute) + const shouldEmitActivity = now - this.backoffState.lastActivityEventAt >= ACTIVITY_EVENT_THROTTLE_MS; + if (shouldEmitActivity) { + this.backoffState.lastActivityEventAt = now; + } + + return { + shouldLog, + shouldEmitActivity, + backoffMs: this.backoffState.currentBackoffMs, + }; + } + + /** + * Record a successful database operation - reset backoff + */ + recordSuccess(): void { + if (this.backoffState.consecutiveTransientErrors > 0) { + this.logger.log( + `Database connection recovered after ${this.backoffState.consecutiveTransientErrors} transient errors`, + ); + } + + this.backoffState = { + isInBackoff: false, + backoffUntil: 0, + currentBackoffMs: 0, + consecutiveTransientErrors: 0, + lastLoggedAt: this.backoffState.lastLoggedAt, + lastActivityEventAt: this.backoffState.lastActivityEventAt, + }; + } + + /** + * Wrapper for async functions that should handle transient DB errors gracefully + * + * Usage: + * ```typescript + * await this.dbTransient.withTransientGuard( + * async () => { await this.prisma.goalRun.findMany(...) }, + * 'OrchestratorLoop.runIteration' + * ); + * ``` + * + * @param fn - Async function to execute + * @param context - Context string for logging + * @param options - Configuration options + * @returns Result of fn, or undefined if in backoff or transient error occurred + */ + async withTransientGuard( + fn: () => Promise, + context: string, + options: { + onTransientError?: (error: any, backoffMs: number) => void | Promise; + onNonTransientError?: (error: any) => void; + skipIfInBackoff?: boolean; + } = {}, + ): Promise { + const { onTransientError, onNonTransientError, skipIfInBackoff = true } = options; + + // Check if we're in backoff period + if (skipIfInBackoff && this.isInBackoff()) { + const remainingMs = this.getBackoffRemainingMs(); + this.logger.debug( + `[${context}] Skipping - in DB backoff for ${Math.round(remainingMs / 1000)}s more`, + ); + return undefined; + } + + try { + const result = await fn(); + this.recordSuccess(); + return result; + } catch (error: any) { + if (this.isDbTransient(error)) { + const { shouldLog, shouldEmitActivity, backoffMs } = this.recordTransientError(error); + + if (shouldLog) { + this.logger.warn( + `[${context}] Transient DB error (backoff ${Math.round(backoffMs / 1000)}s, ` + + `consecutive: ${this.backoffState.consecutiveTransientErrors}): ${this.extractErrorMessage(error)}`, + ); + } + + if (onTransientError) { + try { + await onTransientError(error, backoffMs); + } catch (callbackError) { + // Don't let callback errors propagate + this.logger.debug(`[${context}] onTransientError callback failed: ${callbackError}`); + } + } + + return undefined; + } + + // Non-transient error - propagate it + if (onNonTransientError) { + onNonTransientError(error); + } + throw error; + } + } + + /** + * Check if we should emit a DB unavailable activity event + * Throttled to once per minute + */ + shouldEmitDbUnavailableActivity(): boolean { + const now = Date.now(); + return now - this.backoffState.lastActivityEventAt >= ACTIVITY_EVENT_THROTTLE_MS; + } + + /** + * Mark that we emitted a DB unavailable activity event + */ + markActivityEmitted(): void { + this.backoffState.lastActivityEventAt = Date.now(); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/dead-letter-queue.service.ts b/packages/bytebot-workflow-orchestrator/src/services/dead-letter-queue.service.ts new file mode 100644 index 000000000..89988edae --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/dead-letter-queue.service.ts @@ -0,0 +1,786 @@ +/** + * Dead Letter Queue Service + * v1.0.1: Phase 9 Self-Healing & Auto-Recovery + * + * Manages permanently failed tasks that need manual intervention: + * - Captures failed tasks after retry exhaustion + * - Provides manual retry/skip/discard operations + * - Tracks failure patterns for alerting + * - Supports bulk operations + * + * DLQ entries should be regularly monitored and processed + * to prevent workflow bottlenecks. + * + * v1.0.1 Fix: Use parseInt for ConfigService number values + * (ConfigService.get only provides TypeScript type hints, + * not actual runtime type conversion) + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { LeaderElectionService } from './leader-election.service'; + +// DLQ entry status +export enum DLQStatus { + PENDING = 'PENDING', + RETRYING = 'RETRYING', + RESOLVED = 'RESOLVED', + DISCARDED = 'DISCARDED', +} + +// Failure categories +export enum FailureCategory { + RETRYABLE = 'RETRYABLE', // Transient failures, can retry + PERMANENT = 'PERMANENT', // Permanent failures, needs manual fix + UNKNOWN = 'UNKNOWN', // Unclassified failures +} + +// Severity levels +export enum DLQSeverity { + LOW = 'low', + MEDIUM = 'medium', + HIGH = 'high', + CRITICAL = 'critical', +} + +// DLQ entry summary +export interface DLQEntry { + id: string; + tenantId: string; + workflowRunId: string; + nodeRunId: string; + taskType: string; + failureReason: string; + failureCount: number; + failureCategory: FailureCategory; + severity: DLQSeverity; + status: DLQStatus; + retryCount: number; + maxRetries: number; + createdAt: Date; + lastFailedAt: Date; +} + +// Action result +export interface DLQActionResult { + id: string; + success: boolean; + action: 'RETRY' | 'SKIP' | 'DISCARD'; + error?: string; +} + +// DLQ statistics +export interface DLQStats { + total: number; + pending: number; + retrying: number; + resolved: number; + discarded: number; + bySeverity: Record; + byCategory: Record; + avgResolutionTimeMs: number; +} + +@Injectable() +export class DeadLetterQueueService implements OnModuleInit { + private readonly logger = new Logger(DeadLetterQueueService.name); + + // Configuration + private readonly autoRetryEnabled: boolean; + private readonly autoRetryMaxAttempts: number; + private readonly autoRetryDelayMs: number; + private readonly alertThreshold: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly leaderElection: LeaderElectionService, + ) { + // v1.0.1: Use string comparison for boolean env vars (env vars are always strings) + this.autoRetryEnabled = + this.configService.get('DLQ_AUTO_RETRY_ENABLED', 'false') === 'true'; + + // v1.0.1: Use parseInt for numeric env vars to ensure actual number type + this.autoRetryMaxAttempts = parseInt( + this.configService.get('DLQ_AUTO_RETRY_MAX_ATTEMPTS', '3'), + 10, + ); + this.autoRetryDelayMs = parseInt( + this.configService.get('DLQ_AUTO_RETRY_DELAY_MS', '300000'), // 5 minutes + 10, + ); + this.alertThreshold = parseInt( + this.configService.get('DLQ_ALERT_THRESHOLD', '10'), + 10, + ); + } + + onModuleInit() { + this.logger.log( + `Dead Letter Queue Service initialized (autoRetry: ${this.autoRetryEnabled}, ` + + `alertThreshold: ${this.alertThreshold})`, + ); + } + + /** + * Add an entry to the DLQ + */ + async addEntry(params: { + tenantId: string; + workflowRunId: string; + nodeRunId: string; + taskType: string; + originalPayload: any; + failureReason: string; + errorDetails?: any; + failureCategory?: FailureCategory; + severity?: DLQSeverity; + }): Promise { + const entry = await this.prisma.deadLetterEntry.create({ + data: { + tenantId: params.tenantId, + workflowRunId: params.workflowRunId, + nodeRunId: params.nodeRunId, + taskType: params.taskType, + originalPayload: params.originalPayload, + failureReason: params.failureReason, + lastFailedAt: new Date(), + errorDetails: params.errorDetails ?? {}, + failureCategory: params.failureCategory ?? FailureCategory.UNKNOWN, + severity: params.severity ?? DLQSeverity.HIGH, + status: DLQStatus.PENDING, + maxRetries: this.autoRetryMaxAttempts, + nextRetryAt: this.autoRetryEnabled + ? new Date(Date.now() + this.autoRetryDelayMs) + : null, + }, + }); + + this.logger.log( + `Added DLQ entry ${entry.id} for node run ${params.nodeRunId}: ${params.failureReason}`, + ); + + this.eventEmitter.emit('dlq.entry-added', { + id: entry.id, + tenantId: params.tenantId, + workflowRunId: params.workflowRunId, + nodeRunId: params.nodeRunId, + severity: params.severity ?? DLQSeverity.HIGH, + }); + + // Check if alert threshold exceeded + await this.checkAlertThreshold(params.tenantId); + + return entry.id; + } + + /** + * Get DLQ entries with filtering + */ + async getEntries(params: { + tenantId?: string; + status?: DLQStatus; + severity?: DLQSeverity; + category?: FailureCategory; + limit?: number; + offset?: number; + }): Promise<{ entries: DLQEntry[]; total: number }> { + const where: any = {}; + + if (params.tenantId) { + where.tenantId = params.tenantId; + } + if (params.status) { + where.status = params.status; + } + if (params.severity) { + where.severity = params.severity; + } + if (params.category) { + where.failureCategory = params.category; + } + + const [entries, total] = await Promise.all([ + this.prisma.deadLetterEntry.findMany({ + where, + orderBy: [ + { severity: 'desc' }, + { createdAt: 'desc' }, + ], + take: params.limit ?? 50, + skip: params.offset ?? 0, + }), + this.prisma.deadLetterEntry.count({ where }), + ]); + + return { + entries: entries.map((e) => ({ + id: e.id, + tenantId: e.tenantId, + workflowRunId: e.workflowRunId, + nodeRunId: e.nodeRunId, + taskType: e.taskType, + failureReason: e.failureReason, + failureCount: e.failureCount, + failureCategory: e.failureCategory as FailureCategory, + severity: e.severity as DLQSeverity, + status: e.status as DLQStatus, + retryCount: e.retryCount, + maxRetries: e.maxRetries, + createdAt: e.createdAt, + lastFailedAt: e.lastFailedAt, + })), + total, + }; + } + + /** + * Get a single DLQ entry by ID + */ + async getEntry(id: string): Promise { + const entry = await this.prisma.deadLetterEntry.findUnique({ + where: { id }, + }); + + if (!entry) { + return null; + } + + return { + id: entry.id, + tenantId: entry.tenantId, + workflowRunId: entry.workflowRunId, + nodeRunId: entry.nodeRunId, + taskType: entry.taskType, + failureReason: entry.failureReason, + failureCount: entry.failureCount, + failureCategory: entry.failureCategory as FailureCategory, + severity: entry.severity as DLQSeverity, + status: entry.status as DLQStatus, + retryCount: entry.retryCount, + maxRetries: entry.maxRetries, + createdAt: entry.createdAt, + lastFailedAt: entry.lastFailedAt, + }; + } + + /** + * Retry a DLQ entry + */ + async retryEntry( + id: string, + userId?: string, + ): Promise { + try { + const entry = await this.prisma.deadLetterEntry.findUnique({ + where: { id }, + }); + + if (!entry) { + return { id, success: false, action: 'RETRY', error: 'Entry not found' }; + } + + if (entry.status !== DLQStatus.PENDING) { + return { + id, + success: false, + action: 'RETRY', + error: `Cannot retry entry in ${entry.status} status`, + }; + } + + // Update entry to retrying + await this.prisma.deadLetterEntry.update({ + where: { id }, + data: { + status: DLQStatus.RETRYING, + retryCount: entry.retryCount + 1, + }, + }); + + // Reset the node run to allow re-execution + await this.prisma.workflowNodeRun.update({ + where: { id: entry.nodeRunId }, + data: { + status: 'PENDING', + error: null, + }, + }); + + // Log recovery action + await this.logRecoveryAction( + entry.tenantId, + entry.nodeRunId, + 'DLQ_RETRY', + 'FAILED', + 'PENDING', + `Manual retry from DLQ (attempt ${entry.retryCount + 1})`, + true, + userId, + ); + + this.logger.log( + `DLQ entry ${id} marked for retry (attempt ${entry.retryCount + 1})`, + ); + + this.eventEmitter.emit('dlq.entry-retried', { + id, + nodeRunId: entry.nodeRunId, + attempt: entry.retryCount + 1, + }); + + return { id, success: true, action: 'RETRY' }; + } catch (error) { + return { id, success: false, action: 'RETRY', error: error.message }; + } + } + + /** + * Skip a DLQ entry (mark node as skipped, continue workflow) + */ + async skipEntry( + id: string, + reason: string, + userId?: string, + ): Promise { + try { + const entry = await this.prisma.deadLetterEntry.findUnique({ + where: { id }, + }); + + if (!entry) { + return { id, success: false, action: 'SKIP', error: 'Entry not found' }; + } + + if (entry.status !== DLQStatus.PENDING) { + return { + id, + success: false, + action: 'SKIP', + error: `Cannot skip entry in ${entry.status} status`, + }; + } + + // Update entry as resolved + await this.prisma.deadLetterEntry.update({ + where: { id }, + data: { + status: DLQStatus.RESOLVED, + resolvedAt: new Date(), + resolvedBy: userId, + resolutionNote: `Skipped: ${reason}`, + }, + }); + + // Mark node run as skipped + await this.prisma.workflowNodeRun.update({ + where: { id: entry.nodeRunId }, + data: { + status: 'SKIPPED', + error: `Skipped via DLQ: ${reason}`, + completedAt: new Date(), + }, + }); + + // Log recovery action + await this.logRecoveryAction( + entry.tenantId, + entry.nodeRunId, + 'DLQ_SKIP', + 'FAILED', + 'SKIPPED', + reason, + true, + userId, + ); + + this.logger.log(`DLQ entry ${id} skipped: ${reason}`); + + this.eventEmitter.emit('dlq.entry-skipped', { + id, + nodeRunId: entry.nodeRunId, + reason, + }); + + return { id, success: true, action: 'SKIP' }; + } catch (error) { + return { id, success: false, action: 'SKIP', error: error.message }; + } + } + + /** + * Discard a DLQ entry (abandon permanently) + */ + async discardEntry( + id: string, + reason: string, + userId?: string, + ): Promise { + try { + const entry = await this.prisma.deadLetterEntry.findUnique({ + where: { id }, + }); + + if (!entry) { + return { id, success: false, action: 'DISCARD', error: 'Entry not found' }; + } + + if (entry.status !== DLQStatus.PENDING) { + return { + id, + success: false, + action: 'DISCARD', + error: `Cannot discard entry in ${entry.status} status`, + }; + } + + // Update entry as discarded + await this.prisma.deadLetterEntry.update({ + where: { id }, + data: { + status: DLQStatus.DISCARDED, + resolvedAt: new Date(), + resolvedBy: userId, + resolutionNote: `Discarded: ${reason}`, + }, + }); + + // Mark node run as permanently failed + await this.prisma.workflowNodeRun.update({ + where: { id: entry.nodeRunId }, + data: { + status: 'FAILED', + error: `Discarded via DLQ: ${reason}`, + completedAt: new Date(), + }, + }); + + // Log recovery action + await this.logRecoveryAction( + entry.tenantId, + entry.nodeRunId, + 'DLQ_DISCARD', + 'PENDING', + 'DISCARDED', + reason, + true, + userId, + ); + + this.logger.log(`DLQ entry ${id} discarded: ${reason}`); + + this.eventEmitter.emit('dlq.entry-discarded', { + id, + nodeRunId: entry.nodeRunId, + reason, + }); + + return { id, success: true, action: 'DISCARD' }; + } catch (error) { + return { id, success: false, action: 'DISCARD', error: error.message }; + } + } + + /** + * Bulk retry entries + */ + async bulkRetry( + ids: string[], + userId?: string, + ): Promise { + const results: DLQActionResult[] = []; + + for (const id of ids) { + const result = await this.retryEntry(id, userId); + results.push(result); + } + + return results; + } + + /** + * Bulk skip entries + */ + async bulkSkip( + ids: string[], + reason: string, + userId?: string, + ): Promise { + const results: DLQActionResult[] = []; + + for (const id of ids) { + const result = await this.skipEntry(id, reason, userId); + results.push(result); + } + + return results; + } + + /** + * Get DLQ statistics + */ + async getStats(tenantId?: string): Promise { + const where = tenantId ? { tenantId } : {}; + + const [total, pending, retrying, resolved, discarded] = await Promise.all([ + this.prisma.deadLetterEntry.count({ where }), + this.prisma.deadLetterEntry.count({ + where: { ...where, status: DLQStatus.PENDING }, + }), + this.prisma.deadLetterEntry.count({ + where: { ...where, status: DLQStatus.RETRYING }, + }), + this.prisma.deadLetterEntry.count({ + where: { ...where, status: DLQStatus.RESOLVED }, + }), + this.prisma.deadLetterEntry.count({ + where: { ...where, status: DLQStatus.DISCARDED }, + }), + ]); + + // Get counts by severity + const bySeverity: Record = { + [DLQSeverity.LOW]: 0, + [DLQSeverity.MEDIUM]: 0, + [DLQSeverity.HIGH]: 0, + [DLQSeverity.CRITICAL]: 0, + }; + + const severityCounts = await this.prisma.deadLetterEntry.groupBy({ + by: ['severity'], + where: { ...where, status: DLQStatus.PENDING }, + _count: true, + }); + + for (const sc of severityCounts) { + bySeverity[sc.severity as DLQSeverity] = sc._count; + } + + // Get counts by category + const byCategory: Record = { + [FailureCategory.RETRYABLE]: 0, + [FailureCategory.PERMANENT]: 0, + [FailureCategory.UNKNOWN]: 0, + }; + + const categoryCounts = await this.prisma.deadLetterEntry.groupBy({ + by: ['failureCategory'], + where: { ...where, status: DLQStatus.PENDING }, + _count: true, + }); + + for (const cc of categoryCounts) { + byCategory[cc.failureCategory as FailureCategory] = cc._count; + } + + // Calculate average resolution time + const resolvedEntries = await this.prisma.deadLetterEntry.findMany({ + where: { + ...where, + status: { in: [DLQStatus.RESOLVED, DLQStatus.DISCARDED] }, + resolvedAt: { not: null }, + }, + select: { + createdAt: true, + resolvedAt: true, + }, + take: 100, + orderBy: { resolvedAt: 'desc' }, + }); + + let avgResolutionTimeMs = 0; + if (resolvedEntries.length > 0) { + const totalMs = resolvedEntries.reduce((sum, e) => { + return sum + (e.resolvedAt!.getTime() - e.createdAt.getTime()); + }, 0); + avgResolutionTimeMs = Math.round(totalMs / resolvedEntries.length); + } + + return { + total, + pending, + retrying, + resolved, + discarded, + bySeverity, + byCategory, + avgResolutionTimeMs, + }; + } + + /** + * Process auto-retry entries (runs every 5 minutes) + */ + @Cron(CronExpression.EVERY_5_MINUTES) + async processAutoRetries(): Promise { + if (!this.leaderElection.isLeader) { + return; + } + + if (!this.autoRetryEnabled) { + return; + } + + try { + const entriesToRetry = await this.prisma.deadLetterEntry.findMany({ + where: { + status: DLQStatus.PENDING, + failureCategory: FailureCategory.RETRYABLE, + retryCount: { lt: this.autoRetryMaxAttempts }, + nextRetryAt: { lte: new Date() }, + }, + take: 10, + }); + + for (const entry of entriesToRetry) { + await this.retryEntry(entry.id, 'SYSTEM_AUTO_RETRY'); + } + + if (entriesToRetry.length > 0) { + this.logger.log( + `Auto-retried ${entriesToRetry.length} DLQ entries`, + ); + } + } catch (error) { + this.logger.error(`Auto-retry processing failed: ${error.message}`); + } + } + + /** + * Handle retry completion callback + */ + async handleRetryComplete( + nodeRunId: string, + success: boolean, + error?: string, + ): Promise { + const entry = await this.prisma.deadLetterEntry.findFirst({ + where: { + nodeRunId, + status: DLQStatus.RETRYING, + }, + }); + + if (!entry) { + return; + } + + if (success) { + // Mark as resolved + await this.prisma.deadLetterEntry.update({ + where: { id: entry.id }, + data: { + status: DLQStatus.RESOLVED, + resolvedAt: new Date(), + resolutionNote: 'Retry successful', + }, + }); + + this.logger.log(`DLQ entry ${entry.id} resolved after retry`); + + this.eventEmitter.emit('dlq.entry-resolved', { + id: entry.id, + nodeRunId, + }); + } else { + // Handle retry failure + if (entry.retryCount >= entry.maxRetries) { + // Max retries exceeded, mark as permanent + await this.prisma.deadLetterEntry.update({ + where: { id: entry.id }, + data: { + status: DLQStatus.PENDING, + failureCategory: FailureCategory.PERMANENT, + failureReason: `${entry.failureReason} (retry failed: ${error})`, + failureCount: entry.failureCount + 1, + lastFailedAt: new Date(), + }, + }); + + this.logger.warn( + `DLQ entry ${entry.id} marked as permanent after ${entry.retryCount} retries`, + ); + } else { + // Schedule next retry + await this.prisma.deadLetterEntry.update({ + where: { id: entry.id }, + data: { + status: DLQStatus.PENDING, + failureCount: entry.failureCount + 1, + lastFailedAt: new Date(), + nextRetryAt: new Date(Date.now() + this.autoRetryDelayMs), + errorDetails: { + ...(entry.errorDetails as any), + [`retry_${entry.retryCount}`]: error, + }, + }, + }); + } + } + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + /** + * Check if alert threshold is exceeded + */ + private async checkAlertThreshold(tenantId: string): Promise { + const pendingCount = await this.prisma.deadLetterEntry.count({ + where: { + tenantId, + status: DLQStatus.PENDING, + }, + }); + + if (pendingCount >= this.alertThreshold) { + this.logger.warn( + `DLQ alert threshold exceeded for tenant ${tenantId}: ${pendingCount} pending entries`, + ); + + this.eventEmitter.emit('dlq.threshold-exceeded', { + tenantId, + count: pendingCount, + threshold: this.alertThreshold, + }); + } + } + + /** + * Log a recovery action + */ + private async logRecoveryAction( + tenantId: string, + targetId: string, + actionType: string, + previousState: string, + newState: string, + reason: string, + success: boolean, + actorId?: string, + ): Promise { + try { + await this.prisma.recoveryLog.create({ + data: { + tenantId, + actionType, + targetType: 'NODE', + targetId, + previousState, + newState, + reason, + actorType: actorId?.startsWith('SYSTEM') ? 'SYSTEM' : 'USER', + actorId, + success, + }, + }); + } catch (error) { + this.logger.error(`Failed to log recovery action: ${error.message}`); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/dependency-inference.service.ts b/packages/bytebot-workflow-orchestrator/src/services/dependency-inference.service.ts new file mode 100644 index 000000000..a566eb567 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/dependency-inference.service.ts @@ -0,0 +1,560 @@ +/** + * Dependency Inference Service + * v1.0.0: Nice-to-Have Enhancement for Step Ordering + * + * Implements LLM-based dependency inference for workflow steps: + * - Analyzes step descriptions to identify dependencies + * - Builds DAG (Directed Acyclic Graph) for execution order + * - Detects circular dependencies + * - Calculates execution levels for parallel execution + * + * Enhances the planner by automatically ordering steps based on + * their logical dependencies rather than just sequential order. + * + * @see /docs/CONTEXT_PROPAGATION_FIX_JAN_2026.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; + +// Step input for dependency analysis +export interface StepInput { + id: string; + description: string; + order: number; +} + +// Step with inferred dependencies +export interface StepWithDependencies { + id: string; + description: string; + order: number; + dependsOn: string[]; // IDs of prerequisite steps + reasoning: string; // Why these dependencies exist + confidence: number; // Confidence in the inference (0-1) +} + +// Dependency analysis result +export interface DependencyAnalysisResult { + steps: StepWithDependencies[]; + hasCircularDependency: boolean; + executionLevels: string[][]; // Steps grouped by execution level + criticalPath: string[]; // Longest path through the DAG + parallelizationPotential: number; // 0-1, how much can be parallelized +} + +// DAG node for execution ordering +interface DAGNode { + id: string; + dependencies: string[]; + dependents: string[]; + inDegree: number; + level: number; +} + +@Injectable() +export class DependencyInferenceService { + private readonly logger = new Logger(DependencyInferenceService.name); + private readonly enabled: boolean; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + private readonly llmModel: string; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.enabled = this.configService.get('DEPENDENCY_INFERENCE_ENABLED', 'true') === 'true'; + this.llmApiKey = this.configService.get('LLM_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + this.logger.log(`Dependency inference ${this.enabled ? 'enabled' : 'disabled'}`); + } + + /** + * Infer dependencies between workflow steps + * + * Uses the LLM to analyze step descriptions and identify + * which steps depend on others. Returns a DAG structure + * that can be used for parallel execution planning. + */ + async inferDependencies( + goalDescription: string, + steps: StepInput[], + ): Promise { + if (!this.enabled || steps.length < 2) { + // Return simple sequential order if disabled or too few steps + return this.createSequentialResult(steps); + } + + this.logger.log(`Inferring dependencies for ${steps.length} steps`); + + try { + // Use LLM to analyze dependencies + const stepsWithDeps = await this.analyzeWithLLM(goalDescription, steps); + + // Detect and handle circular dependencies + const hasCircular = this.detectCircularDependencies(stepsWithDeps); + if (hasCircular) { + this.logger.warn('Circular dependency detected, falling back to sequential order'); + return this.createSequentialResult(steps); + } + + // Build DAG and calculate execution levels + const dag = this.buildDAG(stepsWithDeps); + const executionLevels = this.calculateExecutionLevels(dag); + const criticalPath = this.calculateCriticalPath(dag); + const parallelizationPotential = this.calculateParallelizationPotential( + steps.length, + executionLevels.length, + ); + + const result: DependencyAnalysisResult = { + steps: stepsWithDeps, + hasCircularDependency: false, + executionLevels, + criticalPath, + parallelizationPotential, + }; + + // Emit event for monitoring + this.eventEmitter.emit('dependency.inferred', { + stepCount: steps.length, + levelCount: executionLevels.length, + parallelizationPotential, + }); + + this.logger.log( + `Inferred dependencies: ${executionLevels.length} levels, ` + + `${(parallelizationPotential * 100).toFixed(1)}% parallelizable`, + ); + + return result; + } catch (error) { + this.logger.error(`Dependency inference failed: ${(error as Error).message}`); + // Fall back to sequential order on error + return this.createSequentialResult(steps); + } + } + + /** + * Check if step A depends on step B + */ + hasDependency( + analysisResult: DependencyAnalysisResult, + stepId: string, + dependsOnId: string, + ): boolean { + const step = analysisResult.steps.find((s) => s.id === stepId); + return step?.dependsOn.includes(dependsOnId) || false; + } + + /** + * Get steps that can run in parallel at a given level + */ + getParallelSteps( + analysisResult: DependencyAnalysisResult, + level: number, + ): string[] { + return analysisResult.executionLevels[level] || []; + } + + /** + * Get the execution level for a step + */ + getStepLevel( + analysisResult: DependencyAnalysisResult, + stepId: string, + ): number { + for (let level = 0; level < analysisResult.executionLevels.length; level++) { + if (analysisResult.executionLevels[level].includes(stepId)) { + return level; + } + } + return -1; // Not found + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + /** + * Use LLM to analyze step dependencies + */ + private async analyzeWithLLM( + goalDescription: string, + steps: StepInput[], + ): Promise { + const prompt = this.buildAnalysisPrompt(goalDescription, steps); + + try { + const response = await this.callLLM(prompt); + + // Parse LLM response + const parsed = this.parseLLMResponse(response, steps); + return parsed; + } catch (error) { + this.logger.error(`LLM dependency analysis failed: ${(error as Error).message}`); + // Return steps with no dependencies (sequential fallback) + return steps.map((step, index) => ({ + ...step, + dependsOn: index > 0 ? [steps[index - 1].id] : [], + reasoning: 'Fallback to sequential order', + confidence: 0.5, + })); + } + } + + /** + * Call LLM API directly (similar to PlannerService pattern) + */ + private async callLLM(prompt: string): Promise { + // If no API key, return mock response for development + if (!this.llmApiKey) { + this.logger.warn('No LLM API key configured, using fallback'); + return '{"steps": []}'; + } + + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: this.llmModel, + max_tokens: 2000, + temperature: 0, // Deterministic for consistency + messages: [ + { + role: 'user', + content: prompt, + }, + ], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json(); + return data.content?.[0]?.text || ''; + } + + /** + * Build the prompt for dependency analysis + */ + private buildAnalysisPrompt(goalDescription: string, steps: StepInput[]): string { + const stepsText = steps + .map((s) => `- ID: "${s.id}", Step ${s.order + 1}: "${s.description}"`) + .join('\n'); + + return `You are analyzing workflow step dependencies. Given a goal and list of steps, +identify which steps depend on other steps. + +Rules for dependency detection: +1. A step depends on another if it requires the OUTPUT or COMPLETION of that step +2. Look for temporal indicators: "after", "once", "when", "then", "using the" +3. Look for data dependencies: step B uses data that step A creates +4. Look for resource dependencies: step B needs a resource that step A sets up +5. Steps with NO dependencies can run in parallel +6. Do NOT create circular dependencies + +Goal: ${goalDescription} + +Steps to analyze: +${stepsText} + +Respond with a JSON object in this exact format: +{ + "steps": [ + { + "id": "", + "dependsOn": ["", ...], + "reasoning": "", + "confidence": + } + ] +} + +For steps with no dependencies, use an empty array: "dependsOn": [] +Be conservative - only add a dependency if there's a clear logical requirement.`; + } + + /** + * Parse LLM response into structured format + */ + private parseLLMResponse( + response: string, + originalSteps: StepInput[], + ): StepWithDependencies[] { + try { + // Extract JSON from response (handle markdown code blocks) + let jsonStr = response; + const jsonMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/); + if (jsonMatch) { + jsonStr = jsonMatch[1]; + } + + const parsed = JSON.parse(jsonStr.trim()); + + if (!parsed.steps || !Array.isArray(parsed.steps)) { + throw new Error('Invalid response format: missing steps array'); + } + + // Map parsed response back to original steps + return originalSteps.map((step) => { + const analyzed = parsed.steps.find((s: any) => s.id === step.id); + return { + ...step, + dependsOn: analyzed?.dependsOn || [], + reasoning: analyzed?.reasoning || 'No dependencies identified', + confidence: analyzed?.confidence || 0.8, + }; + }); + } catch (error) { + this.logger.error(`Failed to parse LLM response: ${(error as Error).message}`); + throw error; + } + } + + /** + * Detect circular dependencies using DFS + */ + private detectCircularDependencies(steps: StepWithDependencies[]): boolean { + const visited = new Set(); + const recursionStack = new Set(); + const stepMap = new Map(steps.map((s) => [s.id, s])); + + const hasCycle = (stepId: string): boolean => { + if (recursionStack.has(stepId)) return true; + if (visited.has(stepId)) return false; + + visited.add(stepId); + recursionStack.add(stepId); + + const step = stepMap.get(stepId); + if (step) { + for (const depId of step.dependsOn) { + if (hasCycle(depId)) return true; + } + } + + recursionStack.delete(stepId); + return false; + }; + + for (const step of steps) { + if (hasCycle(step.id)) return true; + } + return false; + } + + /** + * Build DAG from steps with dependencies + */ + private buildDAG(steps: StepWithDependencies[]): Map { + const nodes = new Map(); + + // Initialize nodes + for (const step of steps) { + nodes.set(step.id, { + id: step.id, + dependencies: [...step.dependsOn], + dependents: [], + inDegree: step.dependsOn.length, + level: 0, + }); + } + + // Build reverse edges (dependents) + for (const step of steps) { + for (const depId of step.dependsOn) { + const depNode = nodes.get(depId); + if (depNode) { + depNode.dependents.push(step.id); + } + } + } + + return nodes; + } + + /** + * Calculate execution levels using Kahn's algorithm (topological sort) + * Level 0: No dependencies (can start immediately) + * Level N: All dependencies from levels 0 to N-1 + */ + private calculateExecutionLevels(dag: Map): string[][] { + const levels: string[][] = []; + const inDegree = new Map(); + const remaining = new Set(); + + // Initialize in-degrees + for (const [id, node] of dag) { + inDegree.set(id, node.inDegree); + remaining.add(id); + } + + while (remaining.size > 0) { + const currentLevel: string[] = []; + + // Find all nodes with in-degree 0 + for (const nodeId of remaining) { + if (inDegree.get(nodeId) === 0) { + currentLevel.push(nodeId); + } + } + + if (currentLevel.length === 0 && remaining.size > 0) { + // Should not happen if no cycles, but safety check + this.logger.error('Cycle detected during level calculation'); + break; + } + + // Remove processed nodes and update in-degrees + for (const nodeId of currentLevel) { + remaining.delete(nodeId); + const node = dag.get(nodeId)!; + + for (const dependentId of node.dependents) { + const degree = inDegree.get(dependentId) || 0; + inDegree.set(dependentId, degree - 1); + } + } + + if (currentLevel.length > 0) { + levels.push(currentLevel); + } + } + + return levels; + } + + /** + * Calculate the critical path (longest path through DAG) + */ + private calculateCriticalPath(dag: Map): string[] { + const distances = new Map(); + const predecessors = new Map(); + + // Initialize + for (const nodeId of dag.keys()) { + distances.set(nodeId, 0); + predecessors.set(nodeId, null); + } + + // Topological order processing + const sorted = this.topologicalSort(dag); + + for (const nodeId of sorted) { + const node = dag.get(nodeId)!; + const currentDist = distances.get(nodeId)!; + + for (const dependentId of node.dependents) { + const newDist = currentDist + 1; + if (newDist > distances.get(dependentId)!) { + distances.set(dependentId, newDist); + predecessors.set(dependentId, nodeId); + } + } + } + + // Find the node with maximum distance + let maxNode = sorted[0]; + let maxDist = 0; + for (const [nodeId, dist] of distances) { + if (dist > maxDist) { + maxDist = dist; + maxNode = nodeId; + } + } + + // Reconstruct path + const path: string[] = []; + let current: string | null = maxNode; + while (current) { + path.unshift(current); + current = predecessors.get(current) || null; + } + + return path; + } + + /** + * Topological sort using Kahn's algorithm + */ + private topologicalSort(dag: Map): string[] { + const result: string[] = []; + const inDegree = new Map(); + const queue: string[] = []; + + // Calculate in-degrees + for (const [id, node] of dag) { + inDegree.set(id, node.inDegree); + if (node.inDegree === 0) { + queue.push(id); + } + } + + while (queue.length > 0) { + const nodeId = queue.shift()!; + result.push(nodeId); + + const node = dag.get(nodeId)!; + for (const dependentId of node.dependents) { + const newDegree = inDegree.get(dependentId)! - 1; + inDegree.set(dependentId, newDegree); + if (newDegree === 0) { + queue.push(dependentId); + } + } + } + + return result; + } + + /** + * Calculate parallelization potential + * 1.0 = all steps can run in parallel (unlikely) + * 0.0 = all steps must run sequentially + */ + private calculateParallelizationPotential( + totalSteps: number, + levelCount: number, + ): number { + if (totalSteps <= 1) return 0; + if (levelCount <= 1) return 1; + + // Ideal parallelization: all steps in 1 level + // Worst case: each step in its own level (sequential) + // Score = 1 - (levels - 1) / (steps - 1) + return 1 - (levelCount - 1) / (totalSteps - 1); + } + + /** + * Create a simple sequential result (fallback) + */ + private createSequentialResult(steps: StepInput[]): DependencyAnalysisResult { + const stepsWithDeps: StepWithDependencies[] = steps.map((step, index) => ({ + ...step, + dependsOn: index > 0 ? [steps[index - 1].id] : [], + reasoning: 'Sequential order (default)', + confidence: 1.0, + })); + + const executionLevels = steps.map((s) => [s.id]); + const criticalPath = steps.map((s) => s.id); + + return { + steps: stepsWithDeps, + hasCircularDependency: false, + executionLevels, + criticalPath, + parallelizationPotential: 0, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/desktop-control.service.ts b/packages/bytebot-workflow-orchestrator/src/services/desktop-control.service.ts new file mode 100644 index 000000000..833d4a500 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/desktop-control.service.ts @@ -0,0 +1,337 @@ +/** + * Desktop Control Service + * Phase 4: Live Desktop Control APIs + * + * Responsibilities: + * - Get desktop status for goal runs + * - Wake hibernated desktops + * - Capture screenshots + * - Coordinate with desktop-router service + */ + +import { Injectable, Logger, NotFoundException, HttpException, HttpStatus } from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { createId } from '@paralleldrive/cuid2'; + +export interface DesktopStatus { + runId: string; + workspaceId?: string; + status: 'ready' | 'starting' | 'hibernated' | 'unavailable' | 'error'; + vncReady: boolean; + podIP?: string; + hibernated: boolean; + lastActiveAt?: Date; + error?: string; +} + +export interface DesktopUrls { + direct?: string; + websockify?: string; + workspaceVnc?: string; + expiresAt: Date; +} + +export interface Screenshot { + id: string; + runId: string; + url: string; + thumbnailUrl?: string; + timestamp: Date; + stepId?: string; + stepDescription?: string; +} + +@Injectable() +export class DesktopControlService { + private readonly logger = new Logger(DesktopControlService.name); + + // Desktop router service URL (from env) + private readonly desktopRouterUrl = process.env.DESKTOP_ROUTER_URL || 'http://desktop-router:3000'; + + // Screenshot storage URL (from env) + private readonly screenshotStorageUrl = process.env.SCREENSHOT_STORAGE_URL || '/api/screenshots'; + + constructor( + private prisma: PrismaService, + private eventEmitter: EventEmitter2, + ) {} + + /** + * Get desktop status for a goal run + */ + async getDesktopStatus(runId: string): Promise { + this.logger.log(`Getting desktop status for run ${runId}`); + + // Get goal run to find workspace + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: runId }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${runId} not found`); + } + + try { + // Query desktop-router for status + const response = await fetch(`${this.desktopRouterUrl}/desktop/runs/${runId}/status`, { + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + if (response.status === 404) { + return { + runId, + status: 'unavailable', + vncReady: false, + hibernated: false, + }; + } + throw new Error(`Desktop router returned ${response.status}`); + } + + const data = await response.json(); + + return { + runId, + workspaceId: data.workspaceId, + status: data.status || 'ready', + vncReady: data.vncReady ?? true, + podIP: data.podIP, + hibernated: data.hibernated ?? false, + lastActiveAt: data.lastActiveAt ? new Date(data.lastActiveAt) : undefined, + }; + } catch (error: any) { + this.logger.warn(`Failed to get desktop status: ${error.message}`); + + // Return degraded status + return { + runId, + status: 'error', + vncReady: false, + hibernated: false, + error: error.message, + }; + } + } + + /** + * Get VNC connection URLs for a goal run + */ + async getDesktopUrls(runId: string): Promise { + this.logger.log(`Getting desktop URLs for run ${runId}`); + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: runId }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${runId} not found`); + } + + try { + const response = await fetch(`${this.desktopRouterUrl}/desktop/runs/${runId}/url`, { + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + throw new HttpException( + `Failed to get desktop URLs: ${response.statusText}`, + response.status, + ); + } + + const data = await response.json(); + + return { + direct: data.urls?.direct, + websockify: data.urls?.websockify, + workspaceVnc: data.urls?.workspaceVnc, + expiresAt: new Date(data.expiresAt || Date.now() + 3600000), // 1 hour default + }; + } catch (error: any) { + this.logger.error(`Failed to get desktop URLs: ${error.message}`); + throw new HttpException( + `Failed to get desktop URLs: ${error.message}`, + HttpStatus.SERVICE_UNAVAILABLE, + ); + } + } + + /** + * Wake a hibernated desktop + */ + async wakeDesktop(runId: string): Promise<{ status: string; estimatedReady: string }> { + this.logger.log(`Waking desktop for run ${runId}`); + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: runId }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${runId} not found`); + } + + try { + const response = await fetch(`${this.desktopRouterUrl}/desktop/runs/${runId}/wake`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + }); + + if (!response.ok) { + throw new HttpException( + `Failed to wake desktop: ${response.statusText}`, + response.status, + ); + } + + const data = await response.json(); + + // Emit event for tracking + this.eventEmitter.emit('desktop.waking', { runId }); + + return { + status: data.status || 'waking', + estimatedReady: data.estimatedReady || '30s', + }; + } catch (error: any) { + this.logger.error(`Failed to wake desktop: ${error.message}`); + throw new HttpException( + `Failed to wake desktop: ${error.message}`, + HttpStatus.SERVICE_UNAVAILABLE, + ); + } + } + + /** + * Capture a screenshot from the desktop + */ + async captureScreenshot( + runId: string, + stepId?: string, + ): Promise { + this.logger.log(`Capturing screenshot for run ${runId}`); + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: runId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: true, + }, + }, + }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${runId} not found`); + } + + // Get step description if stepId provided + let stepDescription: string | undefined; + if (stepId) { + const step = goalRun.planVersions[0]?.checklistItems.find( + (item) => item.id === stepId, + ); + stepDescription = step?.description; + } + + const screenshotId = `ss-${createId()}`; + const timestamp = new Date(); + + try { + // Request screenshot from desktop-router + const response = await fetch(`${this.desktopRouterUrl}/desktop/runs/${runId}/screenshot`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ screenshotId }), + }); + + if (!response.ok) { + throw new HttpException( + `Failed to capture screenshot: ${response.statusText}`, + response.status, + ); + } + + const data = await response.json(); + + const screenshot: Screenshot = { + id: screenshotId, + runId, + url: data.url || `${this.screenshotStorageUrl}/${screenshotId}`, + thumbnailUrl: data.thumbnailUrl, + timestamp, + stepId, + stepDescription, + }; + + // Store screenshot reference in activity event + await this.prisma.activityEvent.create({ + data: { + goalRunId: runId, + eventType: 'SCREENSHOT_CAPTURED', + title: 'Screenshot captured', + description: stepDescription ? `During: ${stepDescription}` : undefined, + details: { + screenshotId, + url: screenshot.url, + stepId, + } as object, + checklistItemId: stepId, + }, + }); + + // Emit event for real-time delivery + this.eventEmitter.emit('screenshot.captured', { + runId, + screenshot, + }); + + return screenshot; + } catch (error: any) { + this.logger.error(`Failed to capture screenshot: ${error.message}`); + throw new HttpException( + `Failed to capture screenshot: ${error.message}`, + HttpStatus.SERVICE_UNAVAILABLE, + ); + } + } + + /** + * Get screenshots for a goal run + */ + async getScreenshots(runId: string): Promise { + this.logger.log(`Getting screenshots for run ${runId}`); + + const events = await this.prisma.activityEvent.findMany({ + where: { + goalRunId: runId, + eventType: 'SCREENSHOT_CAPTURED', + }, + orderBy: { createdAt: 'asc' }, + }); + + return events.map((event) => { + const details = event.details as { screenshotId?: string; url?: string; stepId?: string } | null; + return { + id: details?.screenshotId || event.id, + runId, + url: details?.url || '', + timestamp: event.createdAt, + stepId: details?.stepId, + stepDescription: event.description?.replace('During: ', ''), + }; + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/entity-resolution.service.ts b/packages/bytebot-workflow-orchestrator/src/services/entity-resolution.service.ts new file mode 100644 index 000000000..25eb47d5f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/entity-resolution.service.ts @@ -0,0 +1,630 @@ +/** + * Entity Resolution Service + * v1.0.0: Advanced Entity Linking and Disambiguation + * + * Implements industry-standard patterns for entity resolution: + * - Google Knowledge Graph: Entity disambiguation with confidence + * - Microsoft Entity Linking: Coreference resolution + * - Amazon Product Graph: Fuzzy matching with canonical forms + * + * Key Features: + * 1. Cross-step entity linking (same entity mentioned differently) + * 2. Fuzzy string matching (Levenshtein, Jaro-Winkler) + * 3. Canonical entity normalization + * 4. Entity relationship mapping + * 5. LLM-assisted disambiguation + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS_V2.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { ExtractedEntity } from './knowledge-extraction.service'; + +// Resolved Entity (canonical form) +export interface ResolvedEntity { + id: string; + canonicalName: string; + type: ExtractedEntity['type']; + aliases: string[]; // All variations found + mentions: number; + confidence: number; + firstSeen: Date; + lastSeen: Date; + sources: Array<{ + goalRunId: string; + stepNumber: number; + originalText: string; + }>; + relationships: EntityRelationship[]; + metadata: Record; +} + +// Entity Relationship +export interface EntityRelationship { + type: 'related_to' | 'part_of' | 'same_as' | 'derived_from' | 'refers_to'; + targetEntityId: string; + confidence: number; + evidence?: string; +} + +// Resolution Result +export interface ResolutionResult { + resolved: boolean; + entity: ResolvedEntity; + matchedExisting: boolean; + matchConfidence: number; + alternativeMatches?: Array<{ + entityId: string; + confidence: number; + }>; +} + +// Entity Cluster (group of related entities) +export interface EntityCluster { + id: string; + primaryEntity: ResolvedEntity; + relatedEntities: ResolvedEntity[]; + clusterType: 'identity' | 'hierarchy' | 'association'; + totalMentions: number; +} + +@Injectable() +export class EntityResolutionService { + private readonly logger = new Logger(EntityResolutionService.name); + private readonly enabled: boolean; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + + // Canonical entity store (goalRunId -> entityId -> ResolvedEntity) + private entityStore: Map> = new Map(); + + // Global entity index for cross-goal resolution + private globalEntityIndex: Map = new Map(); + + // Configuration + private readonly matchThreshold: number; + private readonly useLlmDisambiguation: boolean; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.enabled = this.configService.get('ENTITY_RESOLUTION_ENABLED', 'true') === 'true'; + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.matchThreshold = parseFloat(this.configService.get('ENTITY_MATCH_THRESHOLD', '0.85')); + this.useLlmDisambiguation = this.configService.get('USE_LLM_DISAMBIGUATION', 'true') === 'true'; + + this.logger.log( + `Entity resolution ${this.enabled ? 'enabled' : 'disabled'} ` + + `(threshold: ${this.matchThreshold})` + ); + } + + /** + * Resolve an entity mention to a canonical form + */ + async resolveEntity( + goalRunId: string, + entity: ExtractedEntity, + stepNumber: number, + context?: string, + ): Promise { + if (!this.enabled) { + return this.createNewEntity(goalRunId, entity, stepNumber); + } + + // Get or create goal entity store + if (!this.entityStore.has(goalRunId)) { + this.entityStore.set(goalRunId, new Map()); + } + const goalEntities = this.entityStore.get(goalRunId)!; + + // Try to find matching entity + const matches = this.findMatches(entity.name, entity.type, goalEntities); + + if (matches.length > 0 && matches[0].confidence >= this.matchThreshold) { + // Found a match - merge with existing + const bestMatch = matches[0]; + const resolved = this.mergeEntity(bestMatch.entity, entity, stepNumber, goalRunId); + + return { + resolved: true, + entity: resolved, + matchedExisting: true, + matchConfidence: bestMatch.confidence, + alternativeMatches: matches.slice(1).map(m => ({ + entityId: m.entity.id, + confidence: m.confidence, + })), + }; + } + + // Check if LLM disambiguation can help + if (this.useLlmDisambiguation && matches.length > 0 && context) { + const disambiguated = await this.llmDisambiguate(entity, matches, context); + if (disambiguated) { + const resolved = this.mergeEntity(disambiguated.entity, entity, stepNumber, goalRunId); + return { + resolved: true, + entity: resolved, + matchedExisting: true, + matchConfidence: disambiguated.confidence, + }; + } + } + + // No match found - create new entity + return this.createNewEntity(goalRunId, entity, stepNumber); + } + + /** + * Resolve multiple entities in batch + */ + async resolveEntities( + goalRunId: string, + entities: ExtractedEntity[], + stepNumber: number, + context?: string, + ): Promise { + const results: ResolutionResult[] = []; + + for (const entity of entities) { + const result = await this.resolveEntity(goalRunId, entity, stepNumber, context); + results.push(result); + } + + // Find relationships between resolved entities + this.detectRelationships(goalRunId, results.map(r => r.entity)); + + return results; + } + + /** + * Get all resolved entities for a goal + */ + getResolvedEntities(goalRunId: string): ResolvedEntity[] { + const goalEntities = this.entityStore.get(goalRunId); + if (!goalEntities) { + return []; + } + return Array.from(goalEntities.values()); + } + + /** + * Get entity by ID + */ + getEntity(goalRunId: string, entityId: string): ResolvedEntity | null { + return this.entityStore.get(goalRunId)?.get(entityId) || null; + } + + /** + * Find entity clusters (groups of related entities) + */ + findEntityClusters(goalRunId: string): EntityCluster[] { + const entities = this.getResolvedEntities(goalRunId); + const visited = new Set(); + const clusters: EntityCluster[] = []; + + for (const entity of entities) { + if (visited.has(entity.id)) continue; + + const cluster = this.buildCluster(entity, entities, visited); + if (cluster.relatedEntities.length > 0 || cluster.primaryEntity.mentions > 2) { + clusters.push(cluster); + } + } + + return clusters.sort((a, b) => b.totalMentions - a.totalMentions); + } + + /** + * Link entities across goals (for cross-goal learning) + */ + async linkCrossGoalEntities( + goalRunIds: string[], + ): Promise> { + const linkedGroups = new Map(); + const allEntities: Array<{ goalRunId: string; entity: ResolvedEntity }> = []; + + // Collect all entities + for (const goalRunId of goalRunIds) { + const entities = this.getResolvedEntities(goalRunId); + for (const entity of entities) { + allEntities.push({ goalRunId, entity }); + } + } + + // Find matches across goals + for (let i = 0; i < allEntities.length; i++) { + for (let j = i + 1; j < allEntities.length; j++) { + if (allEntities[i].goalRunId === allEntities[j].goalRunId) continue; + + const similarity = this.calculateSimilarity( + allEntities[i].entity.canonicalName, + allEntities[j].entity.canonicalName, + ); + + if (similarity >= this.matchThreshold && + allEntities[i].entity.type === allEntities[j].entity.type) { + const key1 = `${allEntities[i].goalRunId}:${allEntities[i].entity.id}`; + const key2 = `${allEntities[j].goalRunId}:${allEntities[j].entity.id}`; + + if (!linkedGroups.has(key1)) { + linkedGroups.set(key1, [key1]); + } + linkedGroups.get(key1)!.push(key2); + } + } + } + + return linkedGroups; + } + + /** + * Get entity statistics + */ + getStatistics(goalRunId: string): { + totalEntities: number; + byType: Record; + averageAliases: number; + totalRelationships: number; + } { + const entities = this.getResolvedEntities(goalRunId); + + const byType: Record = {}; + let totalAliases = 0; + let totalRelationships = 0; + + for (const entity of entities) { + byType[entity.type] = (byType[entity.type] || 0) + 1; + totalAliases += entity.aliases.length; + totalRelationships += entity.relationships.length; + } + + return { + totalEntities: entities.length, + byType, + averageAliases: entities.length > 0 ? totalAliases / entities.length : 0, + totalRelationships, + }; + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + private findMatches( + name: string, + type: ExtractedEntity['type'], + entities: Map, + ): Array<{ entity: ResolvedEntity; confidence: number }> { + const matches: Array<{ entity: ResolvedEntity; confidence: number }> = []; + const nameLower = name.toLowerCase(); + + for (const entity of entities.values()) { + // Type must match + if (entity.type !== type) continue; + + // Check canonical name + let maxConfidence = this.calculateSimilarity(nameLower, entity.canonicalName.toLowerCase()); + + // Check aliases + for (const alias of entity.aliases) { + const aliasConfidence = this.calculateSimilarity(nameLower, alias.toLowerCase()); + maxConfidence = Math.max(maxConfidence, aliasConfidence); + } + + if (maxConfidence > 0.5) { + matches.push({ entity, confidence: maxConfidence }); + } + } + + return matches.sort((a, b) => b.confidence - a.confidence); + } + + private calculateSimilarity(str1: string, str2: string): number { + // Exact match + if (str1 === str2) return 1.0; + + // Normalize + const s1 = str1.toLowerCase().trim(); + const s2 = str2.toLowerCase().trim(); + + if (s1 === s2) return 0.99; + + // Contains check + if (s1.includes(s2) || s2.includes(s1)) { + return 0.9; + } + + // Jaro-Winkler similarity + return this.jaroWinkler(s1, s2); + } + + private jaroWinkler(s1: string, s2: string): number { + const jaro = this.jaroSimilarity(s1, s2); + + // Common prefix (up to 4 chars) + let prefix = 0; + for (let i = 0; i < Math.min(4, Math.min(s1.length, s2.length)); i++) { + if (s1[i] === s2[i]) { + prefix++; + } else { + break; + } + } + + return jaro + (prefix * 0.1 * (1 - jaro)); + } + + private jaroSimilarity(s1: string, s2: string): number { + if (s1.length === 0 && s2.length === 0) return 1.0; + if (s1.length === 0 || s2.length === 0) return 0.0; + + const matchWindow = Math.max(Math.floor(Math.max(s1.length, s2.length) / 2) - 1, 0); + const s1Matches = new Array(s1.length).fill(false); + const s2Matches = new Array(s2.length).fill(false); + + let matches = 0; + let transpositions = 0; + + // Find matches + for (let i = 0; i < s1.length; i++) { + const start = Math.max(0, i - matchWindow); + const end = Math.min(i + matchWindow + 1, s2.length); + + for (let j = start; j < end; j++) { + if (s2Matches[j] || s1[i] !== s2[j]) continue; + s1Matches[i] = true; + s2Matches[j] = true; + matches++; + break; + } + } + + if (matches === 0) return 0.0; + + // Count transpositions + let k = 0; + for (let i = 0; i < s1.length; i++) { + if (!s1Matches[i]) continue; + while (!s2Matches[k]) k++; + if (s1[i] !== s2[k]) transpositions++; + k++; + } + + return ( + (matches / s1.length + + matches / s2.length + + (matches - transpositions / 2) / matches) / 3 + ); + } + + private createNewEntity( + goalRunId: string, + entity: ExtractedEntity, + stepNumber: number, + ): ResolutionResult { + const canonicalName = this.normalizeEntityName(entity.name); + const entityId = `ent-${Date.now()}-${Math.random().toString(36).substring(2, 8)}`; + + const resolved: ResolvedEntity = { + id: entityId, + canonicalName, + type: entity.type, + aliases: entity.name !== canonicalName ? [entity.name] : [], + mentions: entity.mentions, + confidence: 1.0, + firstSeen: entity.firstSeen, + lastSeen: entity.lastSeen, + sources: [{ + goalRunId, + stepNumber, + originalText: entity.name, + }], + relationships: [], + metadata: {}, + }; + + // Store + if (!this.entityStore.has(goalRunId)) { + this.entityStore.set(goalRunId, new Map()); + } + this.entityStore.get(goalRunId)!.set(entityId, resolved); + + return { + resolved: true, + entity: resolved, + matchedExisting: false, + matchConfidence: 1.0, + }; + } + + private mergeEntity( + existing: ResolvedEntity, + newEntity: ExtractedEntity, + stepNumber: number, + goalRunId: string, + ): ResolvedEntity { + // Add alias if different from canonical + const newName = this.normalizeEntityName(newEntity.name); + if (newName !== existing.canonicalName && !existing.aliases.includes(newEntity.name)) { + existing.aliases.push(newEntity.name); + } + + // Update mentions and timestamps + existing.mentions += newEntity.mentions; + if (newEntity.lastSeen > existing.lastSeen) { + existing.lastSeen = newEntity.lastSeen; + } + if (newEntity.firstSeen < existing.firstSeen) { + existing.firstSeen = newEntity.firstSeen; + } + + // Add source + existing.sources.push({ + goalRunId, + stepNumber, + originalText: newEntity.name, + }); + + return existing; + } + + private normalizeEntityName(name: string): string { + // Remove common prefixes/suffixes + let normalized = name.trim(); + + // Remove currency symbols for prices + normalized = normalized.replace(/^[\$€£]/, '').trim(); + + // Title case for proper nouns + if (/^[a-z]/.test(normalized)) { + normalized = normalized.charAt(0).toUpperCase() + normalized.slice(1); + } + + // Remove trailing punctuation + normalized = normalized.replace(/[.,;:!?]+$/, ''); + + return normalized; + } + + private async llmDisambiguate( + entity: ExtractedEntity, + candidates: Array<{ entity: ResolvedEntity; confidence: number }>, + context: string, + ): Promise<{ entity: ResolvedEntity; confidence: number } | null> { + if (!this.llmApiKey || candidates.length === 0) { + return null; + } + + const candidateList = candidates.slice(0, 5).map((c, i) => + `${i + 1}. "${c.entity.canonicalName}" (aliases: ${c.entity.aliases.join(', ')})` + ).join('\n'); + + const prompt = `Given the context and entity mention, determine if any candidate matches. + +CONTEXT: ${context.substring(0, 500)} + +ENTITY MENTION: "${entity.name}" (type: ${entity.type}) + +CANDIDATES: +${candidateList} + +Respond with ONLY a JSON object: +{"match": , "confidence": <0-1>}`; + + try { + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: 'claude-3-haiku-20240307', + max_tokens: 100, + messages: [{ role: 'user', content: prompt }], + }), + }); + + if (!response.ok) return null; + + const data = await response.json(); + const text = data.content?.[0]?.text || ''; + const match = text.match(/\{[^}]+\}/); + + if (match) { + const result = JSON.parse(match[0]); + if (result.match && result.match >= 1 && result.match <= candidates.length) { + return { + entity: candidates[result.match - 1].entity, + confidence: result.confidence || 0.8, + }; + } + } + } catch (error) { + this.logger.debug(`LLM disambiguation failed: ${(error as Error).message}`); + } + + return null; + } + + private detectRelationships(goalRunId: string, entities: ResolvedEntity[]): void { + // Detect relationships between entities based on co-occurrence and naming + for (let i = 0; i < entities.length; i++) { + for (let j = i + 1; j < entities.length; j++) { + const e1 = entities[i]; + const e2 = entities[j]; + + // Check for hierarchical relationship (one contains the other) + if (e1.canonicalName.toLowerCase().includes(e2.canonicalName.toLowerCase())) { + e2.relationships.push({ + type: 'part_of', + targetEntityId: e1.id, + confidence: 0.7, + }); + } else if (e2.canonicalName.toLowerCase().includes(e1.canonicalName.toLowerCase())) { + e1.relationships.push({ + type: 'part_of', + targetEntityId: e2.id, + confidence: 0.7, + }); + } + + // Check for co-occurrence in same step + const sharedSteps = e1.sources.filter(s1 => + e2.sources.some(s2 => s2.stepNumber === s1.stepNumber) + ); + if (sharedSteps.length > 0) { + e1.relationships.push({ + type: 'related_to', + targetEntityId: e2.id, + confidence: 0.5 + (sharedSteps.length * 0.1), + }); + } + } + } + } + + private buildCluster( + entity: ResolvedEntity, + allEntities: ResolvedEntity[], + visited: Set, + ): EntityCluster { + visited.add(entity.id); + + const relatedEntities: ResolvedEntity[] = []; + let totalMentions = entity.mentions; + + // Find directly related entities + for (const rel of entity.relationships) { + const related = allEntities.find(e => e.id === rel.targetEntityId); + if (related && !visited.has(related.id)) { + visited.add(related.id); + relatedEntities.push(related); + totalMentions += related.mentions; + } + } + + // Determine cluster type + let clusterType: EntityCluster['clusterType'] = 'association'; + if (entity.relationships.some(r => r.type === 'same_as')) { + clusterType = 'identity'; + } else if (entity.relationships.some(r => r.type === 'part_of')) { + clusterType = 'hierarchy'; + } + + return { + id: `cluster-${entity.id}`, + primaryEntity: entity, + relatedEntities, + clusterType, + totalMentions, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/failure-analysis.service.ts b/packages/bytebot-workflow-orchestrator/src/services/failure-analysis.service.ts new file mode 100644 index 000000000..e0f8e5fa1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/failure-analysis.service.ts @@ -0,0 +1,830 @@ +/** + * Failure Analysis Service + * Phase 9 (v5.4.0): Advanced AI Features + * + * Responsibilities: + * - Analyze failed goal runs to identify root causes + * - Cluster similar failures to detect patterns + * - Generate remediation suggestions + * - Predict potential failures before they occur + * - Track failure trends over time + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { z } from 'zod'; + +// Zod schemas for LLM output validation +const RootCauseSchema = z.object({ + category: z.enum([ + 'configuration', + 'network', + 'authentication', + 'timeout', + 'resource', + 'validation', + 'dependency', + 'user_input', + 'system', + 'unknown', + ]), + description: z.string(), + confidence: z.number().min(0).max(1), + evidence: z.array(z.string()).optional(), +}); + +const RemediationSchema = z.object({ + action: z.string(), + priority: z.enum(['high', 'medium', 'low']), + estimatedEffort: z.enum(['quick', 'moderate', 'significant']), + reasoning: z.string().optional(), +}); + +const FailureAnalysisOutputSchema = z.object({ + rootCauses: z.array(RootCauseSchema).min(1), + remediations: z.array(RemediationSchema), + similarFailures: z.array(z.string()).optional(), + preventionSuggestions: z.array(z.string()).optional(), + severity: z.enum(['critical', 'high', 'medium', 'low']), +}); + +type FailureAnalysisOutput = z.infer; + +// Failure categories for clustering +export enum FailureCategory { + CONFIGURATION = 'configuration', + NETWORK = 'network', + AUTHENTICATION = 'authentication', + TIMEOUT = 'timeout', + RESOURCE = 'resource', + VALIDATION = 'validation', + DEPENDENCY = 'dependency', + USER_INPUT = 'user_input', + SYSTEM = 'system', + UNKNOWN = 'unknown', +} + +// Public interfaces +export interface FailureAnalysisRequest { + tenantId: string; + goalRunId: string; + includeHistory?: boolean; +} + +export interface FailureAnalysisResult { + goalRunId: string; + goal: string; + failedAt: Date; + rootCauses: Array<{ + category: FailureCategory; + description: string; + confidence: number; + evidence?: string[]; + }>; + remediations: Array<{ + action: string; + priority: 'high' | 'medium' | 'low'; + estimatedEffort: 'quick' | 'moderate' | 'significant'; + reasoning?: string; + }>; + severity: 'critical' | 'high' | 'medium' | 'low'; + similarFailuresCount: number; + preventionSuggestions: string[]; + analysisConfidence: number; +} + +export interface FailurePattern { + id: string; + category: FailureCategory; + pattern: string; + occurrenceCount: number; + firstSeen: Date; + lastSeen: Date; + affectedGoalRunIds: string[]; + commonRemediations: string[]; + resolutionRate: number; +} + +export interface FailureTrend { + period: string; + totalFailures: number; + byCategory: Record; + topPatterns: Array<{ pattern: string; count: number }>; + resolutionRate: number; +} + +export interface PredictiveAnalysis { + goalRunId: string; + riskScore: number; + potentialFailurePoints: Array<{ + step: number; + description: string; + riskLevel: 'high' | 'medium' | 'low'; + mitigationSuggestion?: string; + }>; + historicalSuccessRate: number; +} + +@Injectable() +export class FailureAnalysisService { + private readonly logger = new Logger(FailureAnalysisService.name); + private readonly llmModel: string; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + + this.logger.log('FailureAnalysisService initialized'); + } + + /** + * Analyze a specific failed goal run + */ + async analyzeFailure(request: FailureAnalysisRequest): Promise { + const { tenantId, goalRunId, includeHistory = true } = request; + + this.logger.log(`Analyzing failure for goal run ${goalRunId}`); + + // Fetch the failed goal run with context + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + activityEvents: { + orderBy: { createdAt: 'desc' }, + take: 20, + }, + }, + }); + + if (!goalRun) { + throw new Error(`Goal run ${goalRunId} not found`); + } + + // Get similar historical failures if requested + let historicalContext: any[] = []; + if (includeHistory) { + historicalContext = await this.getSimilarFailures(tenantId, goalRun.goal, goalRun.error || ''); + } + + // Build analysis prompt + const prompt = this.buildAnalysisPrompt(goalRun, historicalContext); + + try { + const response = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(response); + const validated = FailureAnalysisOutputSchema.safeParse(parsed); + + let result: FailureAnalysisResult; + + if (validated.success) { + result = this.transformToResult(goalRun, validated.data, historicalContext.length); + } else { + this.logger.warn(`LLM response validation failed: ${validated.error.message}`); + result = this.generateFallbackAnalysis(goalRun); + } + + // Store analysis result + await this.storeAnalysisResult(goalRunId, result); + + // Emit event + this.eventEmitter.emit('failure-analysis.completed', { + tenantId, + goalRunId, + severity: result.severity, + rootCauseCount: result.rootCauses.length, + }); + + return result; + } catch (error: any) { + this.logger.error(`Failure analysis failed: ${error.message}`); + return this.generateFallbackAnalysis(goalRun); + } + } + + /** + * Get failure patterns for a tenant + */ + async getFailurePatterns( + tenantId: string, + options?: { days?: number; limit?: number }, + ): Promise { + const days = options?.days || 30; + const limit = options?.limit || 10; + const since = new Date(Date.now() - days * 24 * 60 * 60 * 1000); + + // Fetch failed goal runs + const failedRuns = await this.prisma.goalRun.findMany({ + where: { + tenantId, + status: 'FAILED', + createdAt: { gte: since }, + }, + select: { + id: true, + goal: true, + error: true, + createdAt: true, + }, + orderBy: { createdAt: 'desc' }, + take: 500, + }); + + // Cluster failures by error similarity + const patterns = this.clusterFailures(failedRuns); + + return patterns.slice(0, limit); + } + + /** + * Get failure trends over time + */ + async getFailureTrends( + tenantId: string, + options?: { days?: number; granularity?: 'day' | 'week' | 'month' }, + ): Promise { + const days = options?.days || 30; + const granularity = options?.granularity || 'day'; + const since = new Date(Date.now() - days * 24 * 60 * 60 * 1000); + + // Fetch failed and completed goal runs + const runs = await this.prisma.goalRun.findMany({ + where: { + tenantId, + createdAt: { gte: since }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + select: { + id: true, + status: true, + error: true, + createdAt: true, + }, + orderBy: { createdAt: 'asc' }, + }); + + // Group by period + const trends = this.groupByPeriod(runs, granularity); + + return trends; + } + + /** + * Predictive analysis for a goal before execution + */ + async predictFailureRisk( + tenantId: string, + goal: string, + constraints?: Record, + ): Promise { + // Find similar historical goals + const similarGoals = await this.prisma.goalRun.findMany({ + where: { + tenantId, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + select: { + id: true, + goal: true, + status: true, + error: true, + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + orderBy: { createdAt: 'desc' }, + take: 100, + }); + + // Calculate similarity and success rate + const similar = similarGoals + .map(g => ({ + ...g, + similarity: this.calculateSimilarity(goal, g.goal), + })) + .filter(g => g.similarity > 0.5) + .sort((a, b) => b.similarity - a.similarity) + .slice(0, 20); + + const successCount = similar.filter(g => g.status === 'COMPLETED').length; + const historicalSuccessRate = similar.length > 0 ? successCount / similar.length : 0.5; + + // Calculate risk score (inverse of success rate with adjustments) + let riskScore = 1 - historicalSuccessRate; + + // Adjust for goal complexity + const wordCount = goal.split(' ').length; + if (wordCount > 50) riskScore += 0.1; + if (wordCount < 5) riskScore += 0.15; + + // Check for high-risk keywords + const highRiskKeywords = ['delete', 'remove', 'transfer', 'payment', 'password']; + if (highRiskKeywords.some(k => goal.toLowerCase().includes(k))) { + riskScore += 0.1; + } + + riskScore = Math.min(1, Math.max(0, riskScore)); + + // Identify potential failure points + const failedSimilar = similar.filter(g => g.status === 'FAILED'); + const potentialFailurePoints: PredictiveAnalysis['potentialFailurePoints'] = []; + + // Analyze common failure steps + const failureSteps = new Map(); + for (const failed of failedSimilar) { + const items = failed.planVersions?.[0]?.checklistItems || []; + items.forEach((item: any, idx: number) => { + if (item.status === 'FAILED') { + const key = `${idx}:${item.description.substring(0, 50)}`; + failureSteps.set(key, (failureSteps.get(key) || 0) + 1); + } + }); + } + + // Add top failure points + const sortedSteps = [...failureSteps.entries()].sort((a, b) => b[1] - a[1]); + for (const [step, count] of sortedSteps.slice(0, 3)) { + const [idx, desc] = step.split(':'); + potentialFailurePoints.push({ + step: parseInt(idx) + 1, + description: desc, + riskLevel: count >= 3 ? 'high' : count >= 2 ? 'medium' : 'low', + mitigationSuggestion: `This step has failed ${count} times in similar goals. Consider adding verification.`, + }); + } + + return { + goalRunId: '', // Not yet created + riskScore: Math.round(riskScore * 100) / 100, + potentialFailurePoints, + historicalSuccessRate: Math.round(historicalSuccessRate * 100) / 100, + }; + } + + /** + * Get remediation suggestions for a failure category + */ + async getRemediationSuggestions( + category: FailureCategory, + ): Promise> { + // Common remediations by category + const commonRemediations: Record> = { + [FailureCategory.CONFIGURATION]: [ + { action: 'Verify environment variables are set correctly', successRate: 0.85 }, + { action: 'Check configuration file syntax', successRate: 0.75 }, + { action: 'Ensure required dependencies are installed', successRate: 0.8 }, + ], + [FailureCategory.NETWORK]: [ + { action: 'Check network connectivity to target host', successRate: 0.7 }, + { action: 'Verify firewall rules allow the connection', successRate: 0.65 }, + { action: 'Test with increased timeout values', successRate: 0.6 }, + ], + [FailureCategory.AUTHENTICATION]: [ + { action: 'Verify credentials are valid and not expired', successRate: 0.9 }, + { action: 'Check API key permissions', successRate: 0.85 }, + { action: 'Ensure authentication tokens are refreshed', successRate: 0.75 }, + ], + [FailureCategory.TIMEOUT]: [ + { action: 'Increase operation timeout', successRate: 0.7 }, + { action: 'Break operation into smaller chunks', successRate: 0.65 }, + { action: 'Check for resource bottlenecks', successRate: 0.6 }, + ], + [FailureCategory.RESOURCE]: [ + { action: 'Check available disk space', successRate: 0.8 }, + { action: 'Verify memory limits are sufficient', successRate: 0.75 }, + { action: 'Review resource quotas', successRate: 0.7 }, + ], + [FailureCategory.VALIDATION]: [ + { action: 'Verify input data format', successRate: 0.85 }, + { action: 'Check for required fields', successRate: 0.8 }, + { action: 'Validate data types match expected schema', successRate: 0.75 }, + ], + [FailureCategory.DEPENDENCY]: [ + { action: 'Update dependencies to compatible versions', successRate: 0.7 }, + { action: 'Check dependency service health', successRate: 0.75 }, + { action: 'Verify dependency API compatibility', successRate: 0.65 }, + ], + [FailureCategory.USER_INPUT]: [ + { action: 'Clarify goal requirements with user', successRate: 0.8 }, + { action: 'Add input validation before processing', successRate: 0.75 }, + { action: 'Provide better error messages for invalid input', successRate: 0.7 }, + ], + [FailureCategory.SYSTEM]: [ + { action: 'Check system logs for errors', successRate: 0.6 }, + { action: 'Restart affected services', successRate: 0.55 }, + { action: 'Verify system resources are available', successRate: 0.65 }, + ], + [FailureCategory.UNKNOWN]: [ + { action: 'Review detailed logs for more context', successRate: 0.5 }, + { action: 'Try breaking down the goal into smaller steps', successRate: 0.6 }, + { action: 'Contact support if issue persists', successRate: 0.4 }, + ], + }; + + return commonRemediations[category] || commonRemediations[FailureCategory.UNKNOWN]; + } + + // Private methods + + private async getSimilarFailures( + tenantId: string, + goal: string, + error: string, + ): Promise { + const recentFailures = await this.prisma.goalRun.findMany({ + where: { + tenantId, + status: 'FAILED', + }, + select: { + id: true, + goal: true, + error: true, + createdAt: true, + }, + orderBy: { createdAt: 'desc' }, + take: 50, + }); + + // Find similar by goal or error + return recentFailures.filter(f => { + const goalSimilarity = this.calculateSimilarity(goal, f.goal); + const errorSimilarity = error && f.error ? this.calculateSimilarity(error, f.error) : 0; + return goalSimilarity > 0.5 || errorSimilarity > 0.6; + }); + } + + private calculateSimilarity(text1: string, text2: string): number { + const words1 = new Set(text1.toLowerCase().split(/\s+/).filter(w => w.length > 2)); + const words2 = new Set(text2.toLowerCase().split(/\s+/).filter(w => w.length > 2)); + + const intersection = [...words1].filter(w => words2.has(w)).length; + const union = new Set([...words1, ...words2]).size; + + return union > 0 ? intersection / union : 0; + } + + private buildAnalysisPrompt(goalRun: any, historicalContext: any[]): string { + const errorInfo = goalRun.error || 'No error message recorded'; + const activities = goalRun.activityEvents + ?.filter((e: any) => e.severity === 'error' || e.eventType.includes('FAIL')) + .map((e: any) => `[${e.eventType}] ${e.title}: ${e.description || ''}`) + .join('\n') || 'No error events'; + + const checklistItems = goalRun.planVersions?.[0]?.checklistItems || []; + const failedSteps = checklistItems + .filter((i: any) => i.status === 'FAILED') + .map((i: any) => `Step ${i.order}: ${i.description}`) + .join('\n') || 'No failed steps recorded'; + + const historicalInfo = historicalContext.length > 0 + ? `\nSIMILAR HISTORICAL FAILURES (${historicalContext.length}):\n${ + historicalContext.slice(0, 3).map(f => `- ${f.error || f.goal.substring(0, 50)}`).join('\n') + }` + : ''; + + return `Analyze this failed goal run and identify root causes: + +GOAL: "${goalRun.goal}" + +ERROR: ${errorInfo} + +FAILED STEPS: +${failedSteps} + +ERROR EVENTS: +${activities} +${historicalInfo} + +TASK: +1. Identify likely root causes with confidence scores +2. Categorize into: configuration, network, authentication, timeout, resource, validation, dependency, user_input, system, or unknown +3. Suggest specific remediations with priority +4. Assess severity: critical, high, medium, or low +5. Suggest prevention strategies + +OUTPUT FORMAT (JSON): +{ + "rootCauses": [ + { + "category": "category_name", + "description": "What went wrong", + "confidence": 0.0-1.0, + "evidence": ["evidence1", "evidence2"] + } + ], + "remediations": [ + { + "action": "What to do", + "priority": "high|medium|low", + "estimatedEffort": "quick|moderate|significant", + "reasoning": "Why this will help" + } + ], + "preventionSuggestions": ["suggestion1", "suggestion2"], + "severity": "critical|high|medium|low" +} + +Analyze the failure:`; + } + + private async callLLM(prompt: string): Promise { + if (!this.llmApiKey) { + this.logger.warn('No LLM API key configured, using heuristic analysis'); + return this.getHeuristicAnalysis(prompt); + } + + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: this.llmModel, + max_tokens: 2000, + messages: [{ role: 'user', content: prompt }], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json(); + return data.content[0].text; + } + + private parseLLMResponse(response: string): any { + const jsonMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/); + const jsonStr = jsonMatch ? jsonMatch[1] : response; + + try { + return JSON.parse(jsonStr.trim()); + } catch { + const objectMatch = response.match(/\{[\s\S]*\}/); + if (objectMatch) { + return JSON.parse(objectMatch[0]); + } + throw new Error('Failed to parse LLM response as JSON'); + } + } + + private transformToResult( + goalRun: any, + output: FailureAnalysisOutput, + similarCount: number, + ): FailureAnalysisResult { + return { + goalRunId: goalRun.id, + goal: goalRun.goal, + failedAt: goalRun.updatedAt, + rootCauses: output.rootCauses.map(rc => ({ + category: rc.category as FailureCategory, + description: rc.description, + confidence: rc.confidence, + evidence: rc.evidence, + })), + remediations: output.remediations, + severity: output.severity, + similarFailuresCount: similarCount, + preventionSuggestions: output.preventionSuggestions || [], + analysisConfidence: output.rootCauses.reduce((sum, rc) => sum + rc.confidence, 0) / output.rootCauses.length, + }; + } + + private generateFallbackAnalysis(goalRun: any): FailureAnalysisResult { + const error = goalRun.error?.toLowerCase() || ''; + + // Heuristic categorization + let category = FailureCategory.UNKNOWN; + let description = 'Unable to determine specific root cause'; + + if (error.includes('timeout') || error.includes('timed out')) { + category = FailureCategory.TIMEOUT; + description = 'Operation timed out before completion'; + } else if (error.includes('network') || error.includes('connection') || error.includes('econnrefused')) { + category = FailureCategory.NETWORK; + description = 'Network connectivity issue'; + } else if (error.includes('auth') || error.includes('permission') || error.includes('unauthorized')) { + category = FailureCategory.AUTHENTICATION; + description = 'Authentication or permission failure'; + } else if (error.includes('not found') || error.includes('missing')) { + category = FailureCategory.CONFIGURATION; + description = 'Required resource or configuration not found'; + } else if (error.includes('invalid') || error.includes('validation')) { + category = FailureCategory.VALIDATION; + description = 'Input validation failed'; + } + + return { + goalRunId: goalRun.id, + goal: goalRun.goal, + failedAt: goalRun.updatedAt, + rootCauses: [{ + category, + description, + confidence: 0.5, + }], + remediations: [{ + action: 'Review error logs for more details', + priority: 'high', + estimatedEffort: 'quick', + }], + severity: 'medium', + similarFailuresCount: 0, + preventionSuggestions: ['Add better error handling', 'Include more detailed logging'], + analysisConfidence: 0.5, + }; + } + + private clusterFailures( + failures: Array<{ id: string; goal: string; error: string | null; createdAt: Date }>, + ): FailurePattern[] { + const patterns: FailurePattern[] = []; + const processed = new Set(); + + for (const failure of failures) { + if (processed.has(failure.id)) continue; + + const errorKey = failure.error?.substring(0, 100) || failure.goal.substring(0, 50); + const cluster = failures.filter(f => { + if (processed.has(f.id)) return false; + const fKey = f.error?.substring(0, 100) || f.goal.substring(0, 50); + return this.calculateSimilarity(errorKey, fKey) > 0.6; + }); + + if (cluster.length >= 2) { + cluster.forEach(c => processed.add(c.id)); + + const category = this.categorizeError(failure.error || failure.goal); + + patterns.push({ + id: `fp-${Date.now()}-${patterns.length}`, + category, + pattern: errorKey, + occurrenceCount: cluster.length, + firstSeen: cluster[cluster.length - 1].createdAt, + lastSeen: cluster[0].createdAt, + affectedGoalRunIds: cluster.map(c => c.id), + commonRemediations: [], + resolutionRate: 0, + }); + } + } + + patterns.sort((a, b) => b.occurrenceCount - a.occurrenceCount); + return patterns; + } + + private categorizeError(text: string): FailureCategory { + const lower = text.toLowerCase(); + + if (lower.includes('timeout')) return FailureCategory.TIMEOUT; + if (lower.includes('network') || lower.includes('connection')) return FailureCategory.NETWORK; + if (lower.includes('auth') || lower.includes('permission')) return FailureCategory.AUTHENTICATION; + if (lower.includes('config') || lower.includes('not found')) return FailureCategory.CONFIGURATION; + if (lower.includes('valid') || lower.includes('format')) return FailureCategory.VALIDATION; + if (lower.includes('depend') || lower.includes('require')) return FailureCategory.DEPENDENCY; + if (lower.includes('resource') || lower.includes('memory') || lower.includes('disk')) return FailureCategory.RESOURCE; + + return FailureCategory.UNKNOWN; + } + + private groupByPeriod( + runs: Array<{ id: string; status: string; error: string | null; createdAt: Date }>, + granularity: 'day' | 'week' | 'month', + ): FailureTrend[] { + const groups = new Map(); + + for (const run of runs) { + const date = new Date(run.createdAt); + let key: string; + + if (granularity === 'day') { + key = date.toISOString().split('T')[0]; + } else if (granularity === 'week') { + const weekStart = new Date(date); + weekStart.setDate(date.getDate() - date.getDay()); + key = `Week of ${weekStart.toISOString().split('T')[0]}`; + } else { + key = `${date.getFullYear()}-${String(date.getMonth() + 1).padStart(2, '0')}`; + } + + if (!groups.has(key)) { + groups.set(key, []); + } + groups.get(key)!.push(run); + } + + const trends: FailureTrend[] = []; + + for (const [period, periodRuns] of groups) { + const failures = periodRuns.filter(r => r.status === 'FAILED'); + const completed = periodRuns.filter(r => r.status === 'COMPLETED'); + + const byCategory: Record = {} as any; + for (const cat of Object.values(FailureCategory)) { + byCategory[cat] = 0; + } + + for (const failure of failures) { + const cat = this.categorizeError(failure.error || ''); + byCategory[cat]++; + } + + // Top patterns + const patternCounts = new Map(); + for (const failure of failures) { + const pattern = (failure.error || 'Unknown').substring(0, 50); + patternCounts.set(pattern, (patternCounts.get(pattern) || 0) + 1); + } + + const topPatterns = [...patternCounts.entries()] + .sort((a, b) => b[1] - a[1]) + .slice(0, 3) + .map(([pattern, count]) => ({ pattern, count })); + + trends.push({ + period, + totalFailures: failures.length, + byCategory, + topPatterns, + resolutionRate: periodRuns.length > 0 + ? completed.length / periodRuns.length + : 0, + }); + } + + return trends; + } + + private async storeAnalysisResult(goalRunId: string, result: FailureAnalysisResult): Promise { + try { + await this.prisma.failureAnalysisResult.create({ + data: { + goalRunId, + primaryCategory: result.rootCauses[0]?.category || 'unknown', + severity: result.severity, + rootCauses: result.rootCauses as any, + remediations: result.remediations as any, + analysisConfidence: result.analysisConfidence, + }, + }); + } catch (error: any) { + this.logger.warn(`Failed to store analysis result: ${error.message}`); + } + } + + private getHeuristicAnalysis(prompt: string): string { + // Extract error from prompt + const errorMatch = prompt.match(/ERROR: (.+?)(?:\n|FAILED)/s); + const error = errorMatch ? errorMatch[1].trim() : 'Unknown error'; + + const category = this.categorizeError(error); + + return JSON.stringify({ + rootCauses: [{ + category, + description: `Detected ${category} issue based on error pattern`, + confidence: 0.6, + evidence: [error.substring(0, 100)], + }], + remediations: [{ + action: 'Review the error message and check related configuration', + priority: 'high', + estimatedEffort: 'moderate', + reasoning: 'Standard troubleshooting approach', + }], + preventionSuggestions: [ + 'Add more detailed logging', + 'Implement retry logic for transient failures', + ], + severity: 'medium', + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/failure-classification.service.ts b/packages/bytebot-workflow-orchestrator/src/services/failure-classification.service.ts new file mode 100644 index 000000000..f4d0d7ba9 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/failure-classification.service.ts @@ -0,0 +1,702 @@ +/** + * Failure Classification Service + * v1.0.0: Google-style Failure Classification (Option C Industry Standard) + * + * Implements a hierarchical failure classification system following Google's + * SRE practices for distinguishing between transient and permanent failures. + * + * Failure Types: + * - TRANSIENT: Temporary issues that may resolve with retry (network, heartbeat) + * - SEMANTIC: Task logic failures requiring replanning (wrong approach) + * - PERMANENT: Unrecoverable failures requiring immediate termination + * + * This service enables: + * - Separate retry budgets for different failure types + * - Intelligent escalation paths (transient → semantic → permanent) + * - Error preservation for diagnostics (Manus-style) + * - Checkpoint/resume support (Anthropic-style) + * + * @see docs/OPTION_C_INDUSTRY_STANDARD_FIX.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; + +// ============================================================================= +// Failure Classification Types +// ============================================================================= + +/** + * Failure category hierarchy (Google SRE pattern) + */ +export enum FailureCategory { + /** + * Transient failures - temporary issues that may self-resolve + * Examples: network timeout, heartbeat gap, service unavailable + * Action: Retry with exponential backoff + */ + TRANSIENT = 'TRANSIENT', + + /** + * Semantic failures - task logic or approach failures + * Examples: wrong selector, invalid input, assertion failed + * Action: Replan with alternative approach + */ + SEMANTIC = 'SEMANTIC', + + /** + * Permanent failures - unrecoverable issues + * Examples: resource deleted, permission denied, budget exhausted + * Action: Fail immediately with diagnostic info + */ + PERMANENT = 'PERMANENT', +} + +/** + * Specific failure type within a category + */ +export enum FailureType { + // Transient failures + HEARTBEAT_TIMEOUT = 'HEARTBEAT_TIMEOUT', + NETWORK_ERROR = 'NETWORK_ERROR', + SERVICE_UNAVAILABLE = 'SERVICE_UNAVAILABLE', + RESOURCE_CONTENTION = 'RESOURCE_CONTENTION', + CAPACITY_EXHAUSTED = 'CAPACITY_EXHAUSTED', + AGENT_UNREACHABLE = 'AGENT_UNREACHABLE', + + // Semantic failures + STEP_FAILED = 'STEP_FAILED', + VALIDATION_ERROR = 'VALIDATION_ERROR', + ASSERTION_FAILED = 'ASSERTION_FAILED', + NEEDS_HELP = 'NEEDS_HELP', + WRONG_APPROACH = 'WRONG_APPROACH', + + // Permanent failures + RESOURCE_DELETED = 'RESOURCE_DELETED', + PERMISSION_DENIED = 'PERMISSION_DENIED', + BUDGET_EXHAUSTED = 'BUDGET_EXHAUSTED', + GOAL_CANCELLED = 'GOAL_CANCELLED', + FATAL_ERROR = 'FATAL_ERROR', + + // Unknown (default) + UNKNOWN = 'UNKNOWN', +} + +/** + * Complete failure classification result + */ +export interface FailureClassification { + category: FailureCategory; + type: FailureType; + isRetryable: boolean; + isReplannable: boolean; + suggestedAction: 'RETRY' | 'REPLAN' | 'FAIL' | 'WAIT_PROVIDER'; + reasoning: string; + diagnosticInfo: DiagnosticInfo; +} + +/** + * Diagnostic information for error preservation (Manus-style) + */ +export interface DiagnosticInfo { + timestamp: Date; + errorMessage: string; + errorCode?: string; + stackTrace?: string; + context: Record; + previousAttempts: AttemptRecord[]; + suggestedRecovery?: string; +} + +/** + * Record of a previous attempt (for error preservation) + */ +export interface AttemptRecord { + attemptNumber: number; + timestamp: Date; + failureType: FailureType; + errorMessage: string; + duration?: number; +} + +/** + * Retry budget tracking per failure type + */ +export interface RetryBudget { + maxRetries: number; + currentRetries: number; + baseDelayMs: number; + maxDelayMs: number; + lastRetryAt?: Date; + nextRetryAt?: Date; +} + +// ============================================================================= +// Service Implementation +// ============================================================================= + +@Injectable() +export class FailureClassificationService { + private readonly logger = new Logger(FailureClassificationService.name); + + // Retry budgets per checklist item and failure category + // Key: `${checklistItemId}:${FailureCategory}` + private readonly retryBudgets = new Map(); + + // Error history per checklist item (Manus-style preservation) + // Key: checklistItemId + private readonly errorHistory = new Map(); + + // Default retry configuration + private readonly defaults: { + transientMaxRetries: number; + semanticMaxRetries: number; + heartbeatMaxRetries: number; + transientBaseDelayMs: number; + transientMaxDelayMs: number; + heartbeatBaseDelayMs: number; + heartbeatMaxDelayMs: number; + }; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.defaults = { + // Transient failures get more retries (infrastructure is usually flaky temporarily) + transientMaxRetries: parseInt( + this.configService.get('TRANSIENT_MAX_RETRIES', '5'), + 10, + ), + // Semantic failures get replan attempts (already tracked by orchestrator) + semanticMaxRetries: parseInt( + this.configService.get('SEMANTIC_MAX_RETRIES', '3'), + 10, + ), + // Heartbeat timeouts get their own budget (separate from infra failures) + heartbeatMaxRetries: parseInt( + this.configService.get('HEARTBEAT_MAX_RETRIES', '5'), + 10, + ), + // Transient retry delays + transientBaseDelayMs: parseInt( + this.configService.get('TRANSIENT_BASE_DELAY_MS', '10000'), + 10, + ), + transientMaxDelayMs: parseInt( + this.configService.get('TRANSIENT_MAX_DELAY_MS', '120000'), + 10, + ), + // Heartbeat retry delays (shorter, as agent might recover quickly) + heartbeatBaseDelayMs: parseInt( + this.configService.get('HEARTBEAT_BASE_DELAY_MS', '15000'), + 10, + ), + heartbeatMaxDelayMs: parseInt( + this.configService.get('HEARTBEAT_MAX_DELAY_MS', '60000'), + 10, + ), + }; + + this.logger.log( + `FailureClassificationService initialized ` + + `(transient: ${this.defaults.transientMaxRetries} retries, ` + + `heartbeat: ${this.defaults.heartbeatMaxRetries} retries, ` + + `semantic: ${this.defaults.semanticMaxRetries} replans)`, + ); + } + + // =========================================================================== + // Public API + // =========================================================================== + + /** + * Classify a failure and determine the appropriate action + * + * @param itemId - Checklist item ID + * @param errorMessage - Error message or outcome + * @param context - Additional context for classification + * @returns Classification result with suggested action + */ + classifyFailure( + itemId: string, + errorMessage: string, + context: Record = {}, + ): FailureClassification { + const type = this.detectFailureType(errorMessage, context); + const category = this.getCategory(type); + + // Get or create retry budget for this item+category + const budgetKey = `${itemId}:${category}`; + let budget = this.retryBudgets.get(budgetKey); + if (!budget) { + budget = this.createBudget(category, type); + this.retryBudgets.set(budgetKey, budget); + } + + // Record this attempt in error history + this.recordAttempt(itemId, type, errorMessage); + + // Determine if we can retry/replan based on budget + const isRetryable = category === FailureCategory.TRANSIENT && + budget.currentRetries < budget.maxRetries; + const isReplannable = category === FailureCategory.SEMANTIC; + + // Determine suggested action + let suggestedAction: 'RETRY' | 'REPLAN' | 'FAIL'; + let reasoning: string; + + if (category === FailureCategory.PERMANENT) { + suggestedAction = 'FAIL'; + reasoning = `Permanent failure (${type}): cannot recover`; + } else if (category === FailureCategory.TRANSIENT) { + if (isRetryable) { + suggestedAction = 'RETRY'; + reasoning = `Transient failure (${type}): retry ${budget.currentRetries + 1}/${budget.maxRetries}`; + } else { + // Exhausted transient retries - escalate to replan + suggestedAction = 'REPLAN'; + reasoning = `Transient retries exhausted (${budget.maxRetries}): escalating to replan`; + } + } else { + // Semantic failure + suggestedAction = 'REPLAN'; + reasoning = `Semantic failure (${type}): requires replanning`; + } + + const classification: FailureClassification = { + category, + type, + isRetryable, + isReplannable, + suggestedAction, + reasoning, + diagnosticInfo: this.buildDiagnosticInfo(itemId, errorMessage, context), + }; + + // Emit event for observability + this.eventEmitter.emit('failure.classified', { + itemId, + classification: { + category, + type, + suggestedAction, + retryCount: budget.currentRetries, + }, + }); + + this.logger.log( + `Classified failure for ${itemId}: ${category}/${type} → ${suggestedAction}`, + ); + + return classification; + } + + /** + * Classify a heartbeat timeout specifically + * This has its own retry budget separate from other transient failures + */ + classifyHeartbeatTimeout( + itemId: string, + consecutiveUnhealthy: number, + lastHeartbeat: Date | null, + context: Record = {}, + ): FailureClassification { + const type = FailureType.HEARTBEAT_TIMEOUT; + const category = FailureCategory.TRANSIENT; + + // Use heartbeat-specific budget key + const budgetKey = `${itemId}:HEARTBEAT`; + let budget = this.retryBudgets.get(budgetKey); + if (!budget) { + budget = { + maxRetries: this.defaults.heartbeatMaxRetries, + currentRetries: 0, + baseDelayMs: this.defaults.heartbeatBaseDelayMs, + maxDelayMs: this.defaults.heartbeatMaxDelayMs, + }; + this.retryBudgets.set(budgetKey, budget); + } + + const errorMessage = `Heartbeat timeout: ${consecutiveUnhealthy} consecutive unhealthy checks, ` + + `last heartbeat: ${lastHeartbeat?.toISOString() || 'never'}`; + + // Record this attempt + this.recordAttempt(itemId, type, errorMessage); + + const isRetryable = budget.currentRetries < budget.maxRetries; + + let suggestedAction: 'RETRY' | 'REPLAN' | 'FAIL' | 'WAIT_PROVIDER'; + let reasoning: string; + + if (isRetryable) { + suggestedAction = 'RETRY'; + reasoning = `Heartbeat timeout: retry ${budget.currentRetries + 1}/${budget.maxRetries} ` + + `(agent may recover)`; + } else { + // Exhausted heartbeat retries - pause safely. + // Heartbeat gaps are almost always infra/provider degradation (agent can't reach proxy/model), + // and replanning here creates churn + consumes replan budget without fixing the root cause. + suggestedAction = 'WAIT_PROVIDER'; + reasoning = `Heartbeat retries exhausted (${budget.maxRetries}): agent not responding; ` + + `pausing in WAITING_PROVIDER for recovery`; + } + + const classification: FailureClassification = { + category, + type, + isRetryable, + isReplannable: true, + suggestedAction, + reasoning, + diagnosticInfo: this.buildDiagnosticInfo(itemId, errorMessage, { + ...context, + consecutiveUnhealthy, + lastHeartbeat, + heartbeatRetries: budget.currentRetries, + }), + }; + + this.logger.log( + `Classified heartbeat timeout for ${itemId}: ${suggestedAction} ` + + `(${budget.currentRetries}/${budget.maxRetries} retries used)`, + ); + + return classification; + } + + /** + * Consume one retry from the budget and calculate delay + * + * @returns Retry delay in milliseconds, or null if budget exhausted + */ + consumeRetry( + itemId: string, + category: FailureCategory | 'HEARTBEAT', + ): { delayMs: number; retryCount: number } | null { + const budgetKey = `${itemId}:${category}`; + const budget = this.retryBudgets.get(budgetKey); + + if (!budget || budget.currentRetries >= budget.maxRetries) { + return null; + } + + budget.currentRetries++; + budget.lastRetryAt = new Date(); + + // Calculate exponential backoff with jitter + const exponentialDelay = budget.baseDelayMs * Math.pow(2, budget.currentRetries - 1); + const jitter = Math.random() * 0.3 * exponentialDelay; // 0-30% jitter + const delayMs = Math.min(exponentialDelay + jitter, budget.maxDelayMs); + + budget.nextRetryAt = new Date(Date.now() + delayMs); + + this.logger.debug( + `Consumed retry ${budget.currentRetries}/${budget.maxRetries} for ${itemId}:${category}, ` + + `delay: ${Math.round(delayMs / 1000)}s`, + ); + + return { + delayMs, + retryCount: budget.currentRetries, + }; + } + + /** + * Get current retry status for an item + */ + getRetryStatus( + itemId: string, + category: FailureCategory | 'HEARTBEAT', + ): RetryBudget | null { + const budgetKey = `${itemId}:${category}`; + return this.retryBudgets.get(budgetKey) || null; + } + + /** + * Get error history for an item (Manus-style error preservation) + */ + getErrorHistory(itemId: string): AttemptRecord[] { + return this.errorHistory.get(itemId) || []; + } + + /** + * Clear retry budgets and error history for an item + * Call this when a step succeeds or is skipped + */ + clearItemTracking(itemId: string): void { + // Clear all budget keys for this item + for (const key of this.retryBudgets.keys()) { + if (key.startsWith(`${itemId}:`)) { + this.retryBudgets.delete(key); + } + } + this.errorHistory.delete(itemId); + this.logger.debug(`Cleared tracking for item ${itemId}`); + } + + /** + * Clear all tracking for a goal run + * Call this when a goal run completes or fails + */ + clearGoalRunTracking(goalRunId: string, itemIds: string[]): void { + for (const itemId of itemIds) { + this.clearItemTracking(itemId); + } + this.logger.debug(`Cleared tracking for goal run ${goalRunId}`); + } + + // =========================================================================== + // Private Methods + // =========================================================================== + + /** + * Detect failure type from error message and context + */ + private detectFailureType( + errorMessage: string, + context: Record, + ): FailureType { + const msg = errorMessage.toLowerCase(); + + // Check for explicit type markers + if (context.failureType) { + return context.failureType as FailureType; + } + + // Heartbeat timeout patterns + if ( + msg.includes('heartbeat') || + msg.includes('consecutive unhealthy') || + msg.includes('heartbeat stopped') + ) { + return FailureType.HEARTBEAT_TIMEOUT; + } + + // Network error patterns + if ( + msg.includes('econnrefused') || + msg.includes('etimedout') || + msg.includes('enotfound') || + msg.includes('socket hang up') || + msg.includes('network error') || + msg.includes('fetch failed') + ) { + return FailureType.NETWORK_ERROR; + } + + // Service unavailable patterns + if ( + msg.includes('503') || + msg.includes('service unavailable') || + msg.includes('temporarily unavailable') + ) { + return FailureType.SERVICE_UNAVAILABLE; + } + + // Agent unreachable patterns + if ( + msg.includes('agent unreachable') || + msg.includes('task not found') || + msg.includes('404') || + msg.includes('[infra]') + ) { + return FailureType.AGENT_UNREACHABLE; + } + + // Capacity patterns + if ( + msg.includes('capacity') || + msg.includes('no available') || + msg.includes('pool exhausted') + ) { + return FailureType.CAPACITY_EXHAUSTED; + } + + // Resource contention patterns + if ( + msg.includes('workspace not ready') || + msg.includes('resource busy') || + msg.includes('lock') + ) { + return FailureType.RESOURCE_CONTENTION; + } + + // Permission denied (permanent) + if ( + msg.includes('permission denied') || + msg.includes('unauthorized') || + msg.includes('forbidden') || + msg.includes('403') + ) { + return FailureType.PERMISSION_DENIED; + } + + // Resource deleted (permanent) + if ( + msg.includes('not found') && + (msg.includes('deleted') || msg.includes('no longer exists')) + ) { + return FailureType.RESOURCE_DELETED; + } + + // Budget exhausted (permanent) + if ( + msg.includes('budget') || + msg.includes('quota') || + msg.includes('limit exceeded') + ) { + return FailureType.BUDGET_EXHAUSTED; + } + + // NEEDS_HELP pattern (semantic) + if ( + msg.includes('needs_help') || + msg.includes('needs help') || + msg.includes('user intervention') + ) { + return FailureType.NEEDS_HELP; + } + + // Validation error (semantic) + if ( + msg.includes('validation') || + msg.includes('invalid') || + msg.includes('malformed') + ) { + return FailureType.VALIDATION_ERROR; + } + + // Default to step failed (semantic) + return FailureType.STEP_FAILED; + } + + /** + * Get category for a failure type + */ + private getCategory(type: FailureType): FailureCategory { + switch (type) { + // Transient failures + case FailureType.HEARTBEAT_TIMEOUT: + case FailureType.NETWORK_ERROR: + case FailureType.SERVICE_UNAVAILABLE: + case FailureType.RESOURCE_CONTENTION: + case FailureType.CAPACITY_EXHAUSTED: + case FailureType.AGENT_UNREACHABLE: + return FailureCategory.TRANSIENT; + + // Permanent failures + case FailureType.RESOURCE_DELETED: + case FailureType.PERMISSION_DENIED: + case FailureType.BUDGET_EXHAUSTED: + case FailureType.GOAL_CANCELLED: + case FailureType.FATAL_ERROR: + return FailureCategory.PERMANENT; + + // Semantic failures (default) + case FailureType.STEP_FAILED: + case FailureType.VALIDATION_ERROR: + case FailureType.ASSERTION_FAILED: + case FailureType.NEEDS_HELP: + case FailureType.WRONG_APPROACH: + case FailureType.UNKNOWN: + default: + return FailureCategory.SEMANTIC; + } + } + + /** + * Create a retry budget for a category + */ + private createBudget( + category: FailureCategory, + type: FailureType, + ): RetryBudget { + if (type === FailureType.HEARTBEAT_TIMEOUT) { + return { + maxRetries: this.defaults.heartbeatMaxRetries, + currentRetries: 0, + baseDelayMs: this.defaults.heartbeatBaseDelayMs, + maxDelayMs: this.defaults.heartbeatMaxDelayMs, + }; + } + + if (category === FailureCategory.TRANSIENT) { + return { + maxRetries: this.defaults.transientMaxRetries, + currentRetries: 0, + baseDelayMs: this.defaults.transientBaseDelayMs, + maxDelayMs: this.defaults.transientMaxDelayMs, + }; + } + + // Semantic failures don't use retry budget (they use replan budget) + return { + maxRetries: 0, + currentRetries: 0, + baseDelayMs: 0, + maxDelayMs: 0, + }; + } + + /** + * Record an attempt in error history (Manus-style preservation) + */ + private recordAttempt( + itemId: string, + type: FailureType, + errorMessage: string, + ): void { + let history = this.errorHistory.get(itemId); + if (!history) { + history = []; + this.errorHistory.set(itemId, history); + } + + history.push({ + attemptNumber: history.length + 1, + timestamp: new Date(), + failureType: type, + errorMessage, + }); + + // Keep only last 10 attempts to avoid memory bloat + if (history.length > 10) { + history.shift(); + } + } + + /** + * Build diagnostic info for error preservation + */ + private buildDiagnosticInfo( + itemId: string, + errorMessage: string, + context: Record, + ): DiagnosticInfo { + const previousAttempts = this.getErrorHistory(itemId); + + // Generate suggested recovery based on error patterns + let suggestedRecovery: string | undefined; + if (previousAttempts.length >= 3) { + const failureTypes = previousAttempts.map((a) => a.failureType); + const allSameType = failureTypes.every((t) => t === failureTypes[0]); + if (allSameType) { + suggestedRecovery = `Repeated ${failureTypes[0]} failures. Consider: ` + + `1) Checking infrastructure status, 2) Reducing concurrency, ` + + `3) Manual intervention.`; + } + } + + return { + timestamp: new Date(), + errorMessage, + errorCode: context.errorCode, + stackTrace: context.stackTrace, + context, + previousAttempts, + suggestedRecovery, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/failure-prediction.service.ts b/packages/bytebot-workflow-orchestrator/src/services/failure-prediction.service.ts new file mode 100644 index 000000000..9eaa213cc --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/failure-prediction.service.ts @@ -0,0 +1,687 @@ +/** + * Failure Prediction Service + * v1.0.0: ML-Based Predictive Failure Prevention + * + * Implements industry-standard patterns for failure prediction: + * - Netflix: Anomaly detection with statistical models + * - Uber: Time-series analysis for degradation detection + * - Google SRE: Error budget and burndown tracking + * + * Key Features: + * 1. Real-time failure risk scoring (0-100) + * 2. Anomaly detection on step durations and error rates + * 3. Pattern-based failure prediction + * 4. Proactive alerting before failures occur + * 5. Historical pattern analysis for risk assessment + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS_V2.md + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { SchedulerRegistry } from '@nestjs/schedule'; + +// Risk Assessment Result +export interface RiskAssessment { + goalRunId: string; + timestamp: Date; + overallRisk: number; // 0-100 + riskLevel: 'low' | 'medium' | 'high' | 'critical'; + factors: RiskFactor[]; + recommendations: string[]; + predictedOutcome: 'likely_success' | 'uncertain' | 'likely_failure'; + confidenceScore: number; // 0-1 +} + +// Individual Risk Factor +export interface RiskFactor { + name: string; + score: number; // 0-100 contribution to overall risk + weight: number; // 0-1 importance + description: string; + trend: 'improving' | 'stable' | 'degrading'; + historicalAverage?: number; +} + +// Anomaly Detection Result +export interface Anomaly { + id: string; + type: 'duration' | 'error_rate' | 'pattern' | 'resource'; + severity: 'low' | 'medium' | 'high'; + detectedAt: Date; + value: number; + expectedRange: { min: number; max: number }; + deviation: number; // Standard deviations from mean + goalRunId?: string; + stepNumber?: number; + description: string; +} + +// Historical Pattern +export interface HistoricalPattern { + patternId: string; + patternType: 'failure_sequence' | 'success_pattern' | 'degradation'; + occurrences: number; + lastSeen: Date; + indicators: string[]; + outcome: 'failure' | 'success' | 'partial'; + avgDurationToOutcome: number; // ms +} + +// Metrics Window for analysis +interface MetricsWindow { + stepDurations: number[]; + stepErrors: number[]; + replanCount: number; + avgResponseTime: number; + errorRate: number; + lastStepStatus: string; +} + +@Injectable() +export class FailurePredictionService implements OnModuleInit { + private readonly logger = new Logger(FailurePredictionService.name); + private readonly enabled: boolean; + + // Metrics storage per goal + private goalMetrics: Map = new Map(); + + // Historical patterns learned from past executions + private learnedPatterns: HistoricalPattern[] = []; + + // Anomaly history + private recentAnomalies: Anomaly[] = []; + private readonly maxAnomalies = 100; + + // Statistical baselines (learned from historical data) + private baselines: { + avgStepDuration: number; + stdStepDuration: number; + avgErrorRate: number; + avgReplanRate: number; + avgStepsToCompletion: number; + } = { + avgStepDuration: 30000, // 30 seconds default + stdStepDuration: 15000, + avgErrorRate: 0.15, + avgReplanRate: 0.1, + avgStepsToCompletion: 5, + }; + + // Configuration + private readonly riskThresholds: { + low: number; + medium: number; + high: number; + }; + private readonly anomalyZScore: number; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly prisma: PrismaService, + private readonly schedulerRegistry: SchedulerRegistry, + ) { + this.enabled = this.configService.get('FAILURE_PREDICTION_ENABLED', 'true') === 'true'; + this.anomalyZScore = parseFloat(this.configService.get('ANOMALY_Z_SCORE_THRESHOLD', '2.0')); + this.riskThresholds = { + low: 30, + medium: 60, + high: 80, + }; + + this.logger.log(`Failure prediction ${this.enabled ? 'enabled' : 'disabled'}`); + } + + async onModuleInit(): Promise { + if (!this.enabled) return; + + // Learn baselines from historical data + await this.learnBaselines(); + + // Learn patterns from past failures + await this.learnPatterns(); + + // Start periodic risk assessment + this.startPeriodicAssessment(); + + this.logger.log('Failure prediction service initialized'); + } + + /** + * Assess failure risk for a goal run + */ + async assessRisk(goalRunId: string): Promise { + const metrics = this.goalMetrics.get(goalRunId) || this.createEmptyMetrics(); + const factors: RiskFactor[] = []; + let totalWeightedRisk = 0; + let totalWeight = 0; + + // Factor 1: Step Duration Anomaly + const durationRisk = this.assessDurationRisk(metrics); + factors.push(durationRisk); + totalWeightedRisk += durationRisk.score * durationRisk.weight; + totalWeight += durationRisk.weight; + + // Factor 2: Error Rate + const errorRisk = this.assessErrorRisk(metrics); + factors.push(errorRisk); + totalWeightedRisk += errorRisk.score * errorRisk.weight; + totalWeight += errorRisk.weight; + + // Factor 3: Replan Frequency + const replanRisk = this.assessReplanRisk(metrics); + factors.push(replanRisk); + totalWeightedRisk += replanRisk.score * replanRisk.weight; + totalWeight += replanRisk.weight; + + // Factor 4: Pattern Matching + const patternRisk = await this.assessPatternRisk(goalRunId, metrics); + factors.push(patternRisk); + totalWeightedRisk += patternRisk.score * patternRisk.weight; + totalWeight += patternRisk.weight; + + // Factor 5: Recent Anomalies + const anomalyRisk = this.assessAnomalyRisk(goalRunId); + factors.push(anomalyRisk); + totalWeightedRisk += anomalyRisk.score * anomalyRisk.weight; + totalWeight += anomalyRisk.weight; + + // Calculate overall risk + const overallRisk = totalWeight > 0 ? totalWeightedRisk / totalWeight : 0; + + // Determine risk level + let riskLevel: RiskAssessment['riskLevel'] = 'low'; + if (overallRisk >= this.riskThresholds.high) { + riskLevel = 'critical'; + } else if (overallRisk >= this.riskThresholds.medium) { + riskLevel = 'high'; + } else if (overallRisk >= this.riskThresholds.low) { + riskLevel = 'medium'; + } + + // Generate recommendations + const recommendations = this.generateRecommendations(factors, riskLevel); + + // Predict outcome + const predictedOutcome = this.predictOutcome(overallRisk, factors); + + // Calculate confidence based on available data + const confidenceScore = this.calculateConfidence(metrics, factors); + + const assessment: RiskAssessment = { + goalRunId, + timestamp: new Date(), + overallRisk: Math.round(overallRisk), + riskLevel, + factors, + recommendations, + predictedOutcome, + confidenceScore, + }; + + // Emit event for monitoring + this.eventEmitter.emit('prediction.risk.assessed', assessment); + + // Alert if high risk + if (riskLevel === 'critical' || riskLevel === 'high') { + this.eventEmitter.emit('prediction.risk.alert', { + goalRunId, + riskLevel, + overallRisk, + message: `High failure risk detected: ${recommendations[0] || 'Review execution'}`, + }); + } + + return assessment; + } + + /** + * Detect anomalies in current execution + */ + detectAnomalies(goalRunId: string): Anomaly[] { + const metrics = this.goalMetrics.get(goalRunId); + if (!metrics) return []; + + const anomalies: Anomaly[] = []; + + // Check step duration anomaly + if (metrics.stepDurations.length > 0) { + const latestDuration = metrics.stepDurations[metrics.stepDurations.length - 1]; + const zScore = (latestDuration - this.baselines.avgStepDuration) / this.baselines.stdStepDuration; + + if (Math.abs(zScore) > this.anomalyZScore) { + anomalies.push({ + id: `anom-${Date.now()}-dur`, + type: 'duration', + severity: zScore > 3 ? 'high' : 'medium', + detectedAt: new Date(), + value: latestDuration, + expectedRange: { + min: this.baselines.avgStepDuration - this.baselines.stdStepDuration * 2, + max: this.baselines.avgStepDuration + this.baselines.stdStepDuration * 2, + }, + deviation: zScore, + goalRunId, + stepNumber: metrics.stepDurations.length, + description: zScore > 0 + ? `Step took ${Math.round(latestDuration / 1000)}s, ${Math.abs(zScore).toFixed(1)} std devs above average` + : `Step completed unusually fast`, + }); + } + } + + // Check error rate anomaly + if (metrics.errorRate > this.baselines.avgErrorRate * 2) { + anomalies.push({ + id: `anom-${Date.now()}-err`, + type: 'error_rate', + severity: metrics.errorRate > 0.5 ? 'high' : 'medium', + detectedAt: new Date(), + value: metrics.errorRate, + expectedRange: { min: 0, max: this.baselines.avgErrorRate }, + deviation: (metrics.errorRate - this.baselines.avgErrorRate) / this.baselines.avgErrorRate, + goalRunId, + description: `Error rate ${(metrics.errorRate * 100).toFixed(0)}% exceeds expected ${(this.baselines.avgErrorRate * 100).toFixed(0)}%`, + }); + } + + // Store and emit anomalies + for (const anomaly of anomalies) { + this.recentAnomalies.unshift(anomaly); + this.eventEmitter.emit('prediction.anomaly.detected', anomaly); + } + + // Trim anomaly history + if (this.recentAnomalies.length > this.maxAnomalies) { + this.recentAnomalies = this.recentAnomalies.slice(0, this.maxAnomalies); + } + + return anomalies; + } + + /** + * Record step completion for metrics + */ + @OnEvent('activity.STEP_COMPLETED') + handleStepCompleted(payload: { + goalRunId: string; + duration?: number; + stepNumber?: number; + }): void { + if (!this.enabled) return; + + let metrics = this.goalMetrics.get(payload.goalRunId); + if (!metrics) { + metrics = this.createEmptyMetrics(); + this.goalMetrics.set(payload.goalRunId, metrics); + } + + if (payload.duration) { + metrics.stepDurations.push(payload.duration); + metrics.avgResponseTime = this.calculateAverage(metrics.stepDurations); + } + + metrics.lastStepStatus = 'completed'; + + // Check for anomalies + this.detectAnomalies(payload.goalRunId); + } + + /** + * Record step failure for metrics + */ + @OnEvent('activity.STEP_FAILED') + handleStepFailed(payload: { + goalRunId: string; + error?: string; + stepNumber?: number; + }): void { + if (!this.enabled) return; + + let metrics = this.goalMetrics.get(payload.goalRunId); + if (!metrics) { + metrics = this.createEmptyMetrics(); + this.goalMetrics.set(payload.goalRunId, metrics); + } + + metrics.stepErrors.push(Date.now()); + metrics.errorRate = this.calculateErrorRate(metrics); + metrics.lastStepStatus = 'failed'; + + // Check for anomalies + this.detectAnomalies(payload.goalRunId); + + // Immediate risk assessment on failure + this.assessRisk(payload.goalRunId); + } + + /** + * Record replan event + */ + @OnEvent('plan.replanned') + handleReplan(payload: { goalRunId: string }): void { + if (!this.enabled) return; + + let metrics = this.goalMetrics.get(payload.goalRunId); + if (!metrics) { + metrics = this.createEmptyMetrics(); + this.goalMetrics.set(payload.goalRunId, metrics); + } + + metrics.replanCount++; + } + + /** + * Get recent anomalies + */ + getRecentAnomalies(limit: number = 20): Anomaly[] { + return this.recentAnomalies.slice(0, limit); + } + + /** + * Get anomalies for a specific goal + */ + getGoalAnomalies(goalRunId: string): Anomaly[] { + return this.recentAnomalies.filter(a => a.goalRunId === goalRunId); + } + + /** + * Clear metrics for a completed/failed goal + */ + clearGoalMetrics(goalRunId: string): void { + this.goalMetrics.delete(goalRunId); + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + private async learnBaselines(): Promise { + try { + // Learn from historical goal runs + const recentGoals = await this.prisma.goalRun.findMany({ + where: { + status: { in: ['COMPLETED', 'FAILED'] }, + completedAt: { gte: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000) }, // Last 7 days + }, + include: { + planVersions: { + include: { + checklistItems: true, + }, + }, + }, + take: 100, + }); + + if (recentGoals.length < 10) { + this.logger.debug('Not enough historical data for baseline learning'); + return; + } + + // Calculate step duration statistics + const allDurations: number[] = []; + let totalReplans = 0; + let failedGoals = 0; + + for (const goal of recentGoals) { + if (goal.status === 'FAILED') failedGoals++; + totalReplans += goal.planVersions.length - 1; + + for (const plan of goal.planVersions) { + for (const item of plan.checklistItems) { + if (item.startedAt && item.completedAt) { + const duration = new Date(item.completedAt).getTime() - new Date(item.startedAt).getTime(); + if (duration > 0 && duration < 600000) { // Exclude outliers > 10 min + allDurations.push(duration); + } + } + } + } + } + + if (allDurations.length > 10) { + this.baselines.avgStepDuration = this.calculateAverage(allDurations); + this.baselines.stdStepDuration = this.calculateStdDev(allDurations); + } + + this.baselines.avgErrorRate = failedGoals / recentGoals.length; + this.baselines.avgReplanRate = totalReplans / recentGoals.length; + + this.logger.log( + `Learned baselines: avgDuration=${Math.round(this.baselines.avgStepDuration)}ms, ` + + `errorRate=${(this.baselines.avgErrorRate * 100).toFixed(1)}%` + ); + } catch (error) { + this.logger.warn(`Failed to learn baselines: ${(error as Error).message}`); + } + } + + private async learnPatterns(): Promise { + // Learn failure patterns from historical data + // This would analyze sequences of events that led to failures + this.logger.debug('Pattern learning not yet implemented'); + } + + private startPeriodicAssessment(): void { + const interval = setInterval(async () => { + for (const goalRunId of this.goalMetrics.keys()) { + await this.assessRisk(goalRunId); + } + }, 30000); // Every 30 seconds + + this.schedulerRegistry.addInterval('failure-prediction-assessment', interval); + } + + private createEmptyMetrics(): MetricsWindow { + return { + stepDurations: [], + stepErrors: [], + replanCount: 0, + avgResponseTime: 0, + errorRate: 0, + lastStepStatus: 'pending', + }; + } + + private assessDurationRisk(metrics: MetricsWindow): RiskFactor { + if (metrics.stepDurations.length === 0) { + return { + name: 'Step Duration', + score: 0, + weight: 0.2, + description: 'No duration data available', + trend: 'stable', + }; + } + + const recent = metrics.stepDurations.slice(-3); + const avgRecent = this.calculateAverage(recent); + const deviation = (avgRecent - this.baselines.avgStepDuration) / this.baselines.stdStepDuration; + + const score = Math.min(100, Math.max(0, deviation * 25)); + + // Detect trend + let trend: RiskFactor['trend'] = 'stable'; + if (recent.length >= 2) { + const change = recent[recent.length - 1] - recent[0]; + if (change > this.baselines.stdStepDuration) trend = 'degrading'; + else if (change < -this.baselines.stdStepDuration) trend = 'improving'; + } + + return { + name: 'Step Duration', + score, + weight: 0.2, + description: `Average step takes ${Math.round(avgRecent / 1000)}s (baseline: ${Math.round(this.baselines.avgStepDuration / 1000)}s)`, + trend, + historicalAverage: this.baselines.avgStepDuration, + }; + } + + private assessErrorRisk(metrics: MetricsWindow): RiskFactor { + const score = Math.min(100, metrics.errorRate * 200); // 50% error rate = 100 score + + return { + name: 'Error Rate', + score, + weight: 0.3, + description: `Current error rate: ${(metrics.errorRate * 100).toFixed(0)}%`, + trend: metrics.lastStepStatus === 'failed' ? 'degrading' : 'stable', + historicalAverage: this.baselines.avgErrorRate, + }; + } + + private assessReplanRisk(metrics: MetricsWindow): RiskFactor { + const score = Math.min(100, metrics.replanCount * 25); // 4 replans = 100 score + + return { + name: 'Replan Frequency', + score, + weight: 0.2, + description: `${metrics.replanCount} replans (avg: ${this.baselines.avgReplanRate.toFixed(1)})`, + trend: metrics.replanCount > this.baselines.avgReplanRate * 2 ? 'degrading' : 'stable', + historicalAverage: this.baselines.avgReplanRate, + }; + } + + private async assessPatternRisk( + goalRunId: string, + metrics: MetricsWindow, + ): Promise { + // Match against learned failure patterns + // For now, use simple heuristics + + let score = 0; + const patterns: string[] = []; + + // Pattern: Multiple consecutive failures + if (metrics.stepErrors.length >= 2) { + const lastTwo = metrics.stepErrors.slice(-2); + if (lastTwo[1] - lastTwo[0] < 60000) { // Within 1 minute + score += 30; + patterns.push('consecutive failures'); + } + } + + // Pattern: High replan + high error + if (metrics.replanCount >= 2 && metrics.errorRate > 0.3) { + score += 40; + patterns.push('replan-error spiral'); + } + + // Pattern: Increasing step durations + if (metrics.stepDurations.length >= 3) { + const recent = metrics.stepDurations.slice(-3); + if (recent[2] > recent[1] && recent[1] > recent[0]) { + score += 20; + patterns.push('increasing latency'); + } + } + + return { + name: 'Pattern Matching', + score: Math.min(100, score), + weight: 0.2, + description: patterns.length > 0 + ? `Matched patterns: ${patterns.join(', ')}` + : 'No concerning patterns detected', + trend: score > 30 ? 'degrading' : 'stable', + }; + } + + private assessAnomalyRisk(goalRunId: string): RiskFactor { + const goalAnomalies = this.getGoalAnomalies(goalRunId); + const recentAnomalies = goalAnomalies.filter( + a => Date.now() - a.detectedAt.getTime() < 300000 // Last 5 minutes + ); + + const score = Math.min(100, recentAnomalies.length * 20); + const highSeverity = recentAnomalies.filter(a => a.severity === 'high').length; + + return { + name: 'Recent Anomalies', + score: score + highSeverity * 15, + weight: 0.1, + description: `${recentAnomalies.length} anomalies detected (${highSeverity} high severity)`, + trend: recentAnomalies.length > 0 ? 'degrading' : 'stable', + }; + } + + private generateRecommendations( + factors: RiskFactor[], + riskLevel: RiskAssessment['riskLevel'], + ): string[] { + const recommendations: string[] = []; + + const highRiskFactors = factors.filter(f => f.score > 50); + + for (const factor of highRiskFactors) { + switch (factor.name) { + case 'Step Duration': + recommendations.push('Consider increasing step timeout or breaking into smaller steps'); + break; + case 'Error Rate': + recommendations.push('Review recent failures and consider adjusting approach'); + break; + case 'Replan Frequency': + recommendations.push('Goal may need clearer requirements or simpler steps'); + break; + case 'Pattern Matching': + recommendations.push('Similar patterns have led to failures - consider manual intervention'); + break; + case 'Recent Anomalies': + recommendations.push('Unusual behavior detected - monitor closely'); + break; + } + } + + if (riskLevel === 'critical') { + recommendations.unshift('CRITICAL: Consider pausing execution for manual review'); + } + + return recommendations.slice(0, 3); + } + + private predictOutcome( + overallRisk: number, + factors: RiskFactor[], + ): RiskAssessment['predictedOutcome'] { + if (overallRisk >= 70) return 'likely_failure'; + if (overallRisk <= 30) return 'likely_success'; + return 'uncertain'; + } + + private calculateConfidence(metrics: MetricsWindow, factors: RiskFactor[]): number { + // More data = higher confidence + const dataPoints = metrics.stepDurations.length + metrics.stepErrors.length; + const dataConfidence = Math.min(1, dataPoints / 10); + + // More degrading trends = lower confidence + const degradingCount = factors.filter(f => f.trend === 'degrading').length; + const trendConfidence = 1 - (degradingCount * 0.15); + + return Math.max(0.3, Math.min(1, (dataConfidence + trendConfidence) / 2)); + } + + private calculateAverage(values: number[]): number { + if (values.length === 0) return 0; + return values.reduce((a, b) => a + b, 0) / values.length; + } + + private calculateStdDev(values: number[]): number { + if (values.length < 2) return 0; + const avg = this.calculateAverage(values); + const squaredDiffs = values.map(v => Math.pow(v - avg, 2)); + return Math.sqrt(this.calculateAverage(squaredDiffs)); + } + + private calculateErrorRate(metrics: MetricsWindow): number { + const totalSteps = metrics.stepDurations.length + metrics.stepErrors.length; + if (totalSteps === 0) return 0; + return metrics.stepErrors.length / totalSteps; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/git-integration.service.ts b/packages/bytebot-workflow-orchestrator/src/services/git-integration.service.ts new file mode 100644 index 000000000..35d1e1e2a --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/git-integration.service.ts @@ -0,0 +1,758 @@ +/** + * Git Integration Service + * Phase 8 (v5.3.0): External Integrations - GitHub/GitLab webhook handling + * + * Features: + * - GitHub webhook verification (HMAC-SHA256) + * - GitLab webhook verification (secret token) + * - Event processing (push, pull_request, merge_request) + * - Goal run triggering from Git events + * - Repository management + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import * as crypto from 'crypto'; +import { PrismaService } from './prisma.service'; +import { GoalRunService } from './goal-run.service'; + +/** + * Git provider types + */ +export enum GitProvider { + GITHUB = 'GITHUB', + GITLAB = 'GITLAB', +} + +/** + * Git event types we support + */ +export enum GitEventType { + // GitHub events + PUSH = 'push', + PULL_REQUEST = 'pull_request', + PULL_REQUEST_REVIEW = 'pull_request_review', + ISSUES = 'issues', + ISSUE_COMMENT = 'issue_comment', + RELEASE = 'release', + WORKFLOW_RUN = 'workflow_run', + // GitLab events + MERGE_REQUEST = 'merge_request', + PIPELINE = 'pipeline', + TAG_PUSH = 'tag_push', + NOTE = 'note', +} + +/** + * Git event actions + */ +export enum GitEventAction { + OPENED = 'opened', + CLOSED = 'closed', + MERGED = 'merged', + REOPENED = 'reopened', + SYNCHRONIZE = 'synchronize', + CREATED = 'created', + EDITED = 'edited', + DELETED = 'deleted', + APPROVED = 'approved', + CHANGES_REQUESTED = 'changes_requested', + PUBLISHED = 'published', + COMPLETED = 'completed', + SUCCESS = 'success', + FAILED = 'failed', +} + +/** + * Trigger configuration for goal runs + */ +interface TriggerConfig { + enabled: boolean; + events: string[]; + branches?: string[]; + paths?: string[]; + goalTemplateId?: string; + goalPattern?: string; + constraints?: Record; + variableMapping?: Record; +} + +/** + * Parsed webhook event + */ +interface ParsedGitEvent { + provider: GitProvider; + eventType: string; + eventAction?: string; + repository: { + owner: string; + name: string; + fullName: string; + url: string; + }; + ref?: string; + branch?: string; + commitSha?: string; + prNumber?: number; + prTitle?: string; + prBody?: string; + sender: { + login: string; + avatarUrl?: string; + }; + payload: Record; +} + +/** + * Integration creation params + */ +interface CreateIntegrationParams { + tenantId: string; + provider: GitProvider; + name: string; + owner: string; + repository: string; + branch?: string; + webhookSecret: string; + subscribedEvents: string[]; + triggerConfig: TriggerConfig; +} + +@Injectable() +export class GitIntegrationService { + private readonly logger = new Logger(GitIntegrationService.name); + + constructor( + private readonly prisma: PrismaService, + private readonly eventEmitter: EventEmitter2, + private readonly goalRunService: GoalRunService, + ) { + this.logger.log('GitIntegrationService initialized'); + } + + /** + * Create a new Git integration + */ + async createIntegration(params: CreateIntegrationParams) { + const { + tenantId, + provider, + name, + owner, + repository, + branch = 'main', + webhookSecret, + subscribedEvents, + triggerConfig, + } = params; + + // Generate a unique webhook ID for routing + const webhookId = crypto.randomUUID(); + + const integration = await this.prisma.gitIntegration.create({ + data: { + tenantId, + provider, + name, + owner, + repository, + branch, + webhookId, + webhookSecret, + subscribedEvents, + triggerConfig: triggerConfig as any, + enabled: true, + }, + }); + + this.logger.log( + `Created ${provider} integration ${integration.id} for ${owner}/${repository}`, + ); + + return { + integration, + webhookUrl: this.getWebhookUrl(integration.id), + }; + } + + /** + * Get webhook URL for an integration + */ + getWebhookUrl(integrationId: string): string { + const baseUrl = process.env.API_BASE_URL || 'http://localhost:8080'; + return `${baseUrl}/api/v1/git/webhooks/${integrationId}`; + } + + /** + * Update an integration + */ + async updateIntegration( + integrationId: string, + updates: Partial<{ + name: string; + branch: string; + subscribedEvents: string[]; + triggerConfig: TriggerConfig; + enabled: boolean; + }>, + ) { + const integration = await this.prisma.gitIntegration.update({ + where: { id: integrationId }, + data: { + ...(updates.name && { name: updates.name }), + ...(updates.branch && { branch: updates.branch }), + ...(updates.subscribedEvents && { + subscribedEvents: updates.subscribedEvents, + }), + ...(updates.triggerConfig && { + triggerConfig: updates.triggerConfig as any, + }), + ...(updates.enabled !== undefined && { enabled: updates.enabled }), + }, + }); + + this.logger.log(`Updated Git integration ${integrationId}`); + return integration; + } + + /** + * Delete an integration + */ + async deleteIntegration(integrationId: string) { + await this.prisma.gitIntegration.delete({ + where: { id: integrationId }, + }); + + this.logger.log(`Deleted Git integration ${integrationId}`); + } + + /** + * Get integration by ID + */ + async getIntegration(integrationId: string) { + return this.prisma.gitIntegration.findUnique({ + where: { id: integrationId }, + include: { + events: { + orderBy: { receivedAt: 'desc' }, + take: 10, + }, + }, + }); + } + + /** + * List integrations for a tenant + */ + async listIntegrations( + tenantId: string, + options: { limit?: number; offset?: number; provider?: GitProvider } = {}, + ) { + const { limit = 20, offset = 0, provider } = options; + + const where = { + tenantId, + ...(provider && { provider }), + }; + + const [integrations, total] = await Promise.all([ + this.prisma.gitIntegration.findMany({ + where, + orderBy: { createdAt: 'desc' }, + skip: offset, + take: limit, + }), + this.prisma.gitIntegration.count({ where }), + ]); + + return { + integrations, + pagination: { + total, + limit, + offset, + hasMore: offset + integrations.length < total, + }, + }; + } + + /** + * Verify GitHub webhook signature + */ + verifyGitHubSignature( + payload: string, + signature: string, + secret: string, + ): boolean { + if (!signature || !signature.startsWith('sha256=')) { + return false; + } + + const expectedSignature = `sha256=${crypto + .createHmac('sha256', secret) + .update(payload) + .digest('hex')}`; + + return crypto.timingSafeEqual( + Buffer.from(signature), + Buffer.from(expectedSignature), + ); + } + + /** + * Verify GitLab webhook token + */ + verifyGitLabToken(token: string, secret: string): boolean { + return crypto.timingSafeEqual(Buffer.from(token), Buffer.from(secret)); + } + + /** + * Process incoming webhook + */ + async processWebhook( + integrationId: string, + headers: Record, + rawPayload: string, + parsedPayload: Record, + ): Promise<{ success: boolean; eventId?: string; goalRunId?: string; error?: string }> { + // Get the integration + const integration = await this.prisma.gitIntegration.findUnique({ + where: { id: integrationId }, + }); + + if (!integration) { + return { success: false, error: 'Integration not found' }; + } + + if (!integration.enabled) { + return { success: false, error: 'Integration is disabled' }; + } + + // Verify signature based on provider + const isValid = + integration.provider === GitProvider.GITHUB + ? this.verifyGitHubSignature( + rawPayload, + headers['x-hub-signature-256'] || '', + integration.webhookSecret || '', + ) + : this.verifyGitLabToken( + headers['x-gitlab-token'] || '', + integration.webhookSecret || '', + ); + + if (!isValid) { + this.logger.warn( + `Invalid webhook signature for integration ${integrationId}`, + ); + return { success: false, error: 'Invalid signature' }; + } + + // Parse the event + const event = this.parseWebhookEvent( + integration.provider as GitProvider, + headers, + parsedPayload, + ); + + // Check if we're subscribed to this event + if (!integration.subscribedEvents.includes(event.eventType)) { + this.logger.debug( + `Ignoring unsubscribed event ${event.eventType} for integration ${integrationId}`, + ); + return { success: true, error: 'Event not subscribed' }; + } + + // Store the event + const storedEvent = await this.prisma.gitIntegrationEvent.create({ + data: { + integrationId, + eventType: event.eventType, + eventAction: event.eventAction, + ref: event.ref, + commitSha: event.commitSha, + prNumber: event.prNumber, + payload: event.payload as any, + processed: false, + }, + }); + + // Check if we should trigger a goal run + const triggerConfig = integration.triggerConfig as unknown as TriggerConfig; + + if (triggerConfig?.enabled && this.shouldTriggerGoalRun(event, triggerConfig)) { + try { + const goalRunId = await this.triggerGoalRun( + integration.tenantId, + event, + triggerConfig, + ); + + // Update event with goal run ID + await this.prisma.gitIntegrationEvent.update({ + where: { id: storedEvent.id }, + data: { + processed: true, + goalRunId, + }, + }); + + this.logger.log( + `Triggered goal run ${goalRunId} from ${event.eventType} event`, + ); + + return { + success: true, + eventId: storedEvent.id, + goalRunId, + }; + } catch (error: any) { + this.logger.error( + `Failed to trigger goal run from event: ${error.message}`, + ); + + await this.prisma.gitIntegrationEvent.update({ + where: { id: storedEvent.id }, + data: { + processed: true, + error: error.message, + }, + }); + + return { + success: false, + eventId: storedEvent.id, + error: error.message, + }; + } + } + + // Mark as processed even if no goal run triggered + await this.prisma.gitIntegrationEvent.update({ + where: { id: storedEvent.id }, + data: { processed: true }, + }); + + // Emit event for other listeners + this.eventEmitter.emit('git.event', { + integrationId, + event, + eventId: storedEvent.id, + }); + + return { + success: true, + eventId: storedEvent.id, + }; + } + + /** + * Parse webhook event from payload + */ + private parseWebhookEvent( + provider: GitProvider, + headers: Record, + payload: Record, + ): ParsedGitEvent { + if (provider === GitProvider.GITHUB) { + return this.parseGitHubEvent(headers, payload); + } else { + return this.parseGitLabEvent(headers, payload); + } + } + + /** + * Parse GitHub webhook event + */ + private parseGitHubEvent( + headers: Record, + payload: Record, + ): ParsedGitEvent { + const eventType = headers['x-github-event'] || 'unknown'; + const repo = payload.repository || {}; + + let branch: string | undefined; + let commitSha: string | undefined; + let prNumber: number | undefined; + let prTitle: string | undefined; + let prBody: string | undefined; + + // Extract branch/ref info based on event type + if (eventType === 'push') { + const ref = payload.ref || ''; + branch = ref.replace('refs/heads/', ''); + commitSha = payload.after || payload.head_commit?.id; + } else if (eventType === 'pull_request') { + prNumber = payload.pull_request?.number; + prTitle = payload.pull_request?.title; + prBody = payload.pull_request?.body; + branch = payload.pull_request?.head?.ref; + commitSha = payload.pull_request?.head?.sha; + } + + return { + provider: GitProvider.GITHUB, + eventType, + eventAction: payload.action, + repository: { + owner: repo.owner?.login || '', + name: repo.name || '', + fullName: repo.full_name || '', + url: repo.html_url || '', + }, + ref: payload.ref, + branch, + commitSha, + prNumber, + prTitle, + prBody, + sender: { + login: payload.sender?.login || '', + avatarUrl: payload.sender?.avatar_url, + }, + payload, + }; + } + + /** + * Parse GitLab webhook event + */ + private parseGitLabEvent( + headers: Record, + payload: Record, + ): ParsedGitEvent { + const eventType = payload.object_kind || headers['x-gitlab-event'] || 'unknown'; + const project = payload.project || {}; + + let branch: string | undefined; + let commitSha: string | undefined; + let prNumber: number | undefined; + let prTitle: string | undefined; + let prBody: string | undefined; + + // Extract info based on event type + if (eventType === 'push') { + const ref = payload.ref || ''; + branch = ref.replace('refs/heads/', ''); + commitSha = payload.after || payload.checkout_sha; + } else if (eventType === 'merge_request') { + const mr = payload.object_attributes || {}; + prNumber = mr.iid; + prTitle = mr.title; + prBody = mr.description; + branch = mr.source_branch; + commitSha = mr.last_commit?.id; + } + + return { + provider: GitProvider.GITLAB, + eventType, + eventAction: payload.object_attributes?.action, + repository: { + owner: project.namespace || '', + name: project.name || '', + fullName: project.path_with_namespace || '', + url: project.web_url || '', + }, + ref: payload.ref, + branch, + commitSha, + prNumber, + prTitle, + prBody, + sender: { + login: payload.user?.username || payload.user_username || '', + avatarUrl: payload.user?.avatar_url, + }, + payload, + }; + } + + /** + * Check if event should trigger a goal run + */ + private shouldTriggerGoalRun( + event: ParsedGitEvent, + config: TriggerConfig, + ): boolean { + // Check if event type matches + const eventKey = event.eventAction + ? `${event.eventType}:${event.eventAction}` + : event.eventType; + + const eventMatches = config.events.some((e) => { + if (e === eventKey) return true; + if (e === event.eventType) return true; + if (e.includes('*')) { + const regex = new RegExp('^' + e.replace('*', '.*') + '$'); + return regex.test(eventKey); + } + return false; + }); + + if (!eventMatches) { + return false; + } + + // Check branch filter + if (config.branches && config.branches.length > 0 && event.branch) { + const branchMatches = config.branches.some((b) => { + if (b === event.branch) return true; + if (b.includes('*')) { + const regex = new RegExp('^' + b.replace('*', '.*') + '$'); + return regex.test(event.branch!); + } + return false; + }); + + if (!branchMatches) { + return false; + } + } + + // Check path filters (for push events) + if (config.paths && config.paths.length > 0 && event.payload.commits) { + const modifiedFiles = new Set(); + for (const commit of event.payload.commits) { + (commit.added || []).forEach((f: string) => modifiedFiles.add(f)); + (commit.modified || []).forEach((f: string) => modifiedFiles.add(f)); + (commit.removed || []).forEach((f: string) => modifiedFiles.add(f)); + } + + const pathMatches = config.paths.some((p) => { + const regex = new RegExp('^' + p.replace('**', '.*').replace('*', '[^/]*') + '$'); + return Array.from(modifiedFiles).some((f) => regex.test(f)); + }); + + if (!pathMatches) { + return false; + } + } + + return true; + } + + /** + * Trigger a goal run from Git event + */ + private async triggerGoalRun( + tenantId: string, + event: ParsedGitEvent, + config: TriggerConfig, + ): Promise { + // Build goal text from pattern or template + let goal = config.goalPattern || ''; + + // Replace variables in goal pattern + const variables: Record = { + repo: event.repository.fullName, + owner: event.repository.owner, + repository: event.repository.name, + branch: event.branch || '', + commit: event.commitSha || '', + pr_number: event.prNumber?.toString() || '', + pr_title: event.prTitle || '', + sender: event.sender.login, + event_type: event.eventType, + event_action: event.eventAction || '', + }; + + // Apply variable mapping if provided + if (config.variableMapping) { + for (const [key, path] of Object.entries(config.variableMapping)) { + const value = this.getNestedValue(event.payload, path); + if (value !== undefined) { + variables[key] = String(value); + } + } + } + + // Replace {{variable}} patterns + goal = goal.replace(/\{\{(\w+)\}\}/g, (_, key) => variables[key] || ''); + + // Include git event info in goal text for context + const goalWithContext = `${goal}\n\n[Git Event: ${event.provider} ${event.eventType}${event.eventAction ? ':' + event.eventAction : ''} on ${event.repository.fullName}${event.branch ? ' branch ' + event.branch : ''}${event.commitSha ? ' commit ' + event.commitSha.substring(0, 7) : ''}]`; + + // Create the goal run + const goalRun = await this.goalRunService.createFromGoal({ + tenantId, + goal: goalWithContext, + constraints: config.constraints || {}, + autoStart: true, + }); + + return goalRun.id; + } + + /** + * Get nested value from object using dot notation + */ + private getNestedValue(obj: Record, path: string): any { + return path.split('.').reduce((acc, part) => acc?.[part], obj); + } + + /** + * Rotate webhook secret for an integration + */ + async rotateWebhookSecret(integrationId: string): Promise<{ secret: string }> { + const newSecret = crypto.randomBytes(32).toString('hex'); + + await this.prisma.gitIntegration.update({ + where: { id: integrationId }, + data: { webhookSecret: newSecret }, + }); + + this.logger.log(`Rotated webhook secret for integration ${integrationId}`); + + return { secret: newSecret }; + } + + /** + * Get event history for an integration + */ + async getEventHistory( + integrationId: string, + options: { limit?: number; offset?: number } = {}, + ) { + const { limit = 50, offset = 0 } = options; + + const [events, total] = await Promise.all([ + this.prisma.gitIntegrationEvent.findMany({ + where: { integrationId }, + orderBy: { receivedAt: 'desc' }, + skip: offset, + take: limit, + }), + this.prisma.gitIntegrationEvent.count({ where: { integrationId } }), + ]); + + return { + events, + pagination: { + total, + limit, + offset, + hasMore: offset + events.length < total, + }, + }; + } + + /** + * Test an integration by sending a ping + */ + async testIntegration(integrationId: string): Promise<{ success: boolean; message: string }> { + const integration = await this.prisma.gitIntegration.findUnique({ + where: { id: integrationId }, + }); + + if (!integration) { + return { success: false, message: 'Integration not found' }; + } + + // Just verify the configuration is valid + return { + success: true, + message: `Integration ${integration.name} is configured for ${integration.provider} repository ${integration.owner}/${integration.repository}`, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-checkpoint.service.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-checkpoint.service.ts new file mode 100644 index 000000000..bcb229322 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-checkpoint.service.ts @@ -0,0 +1,406 @@ +/** + * Goal Checkpoint Service + * v1.0.0: Manus-style External State Management + * + * This service implements external state persistence patterns inspired by: + * - Manus AI: todo.md file that's "constantly rewritten" to keep progress visible + * - LangGraph: Checkpoint at every superstep for fault tolerance + * - OpenAI Assistants: Thread state persistence across interactions + * + * Key Features: + * 1. Maintains JSON checkpoint of goal state after each step + * 2. Provides structured context for replanning + * 3. Enables recovery from any point without re-running successful steps + * 4. Keeps completed work "in the model's recent attention span" (Manus pattern) + * + * The checkpoint is stored in the database but can optionally be + * serialized to a file for debugging/recovery. + * + * @see /documentation/2026-01-03-CONTEXT_PRESERVING_REPLAN_FIX.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; + +// Checkpoint structure - Manus-style todo.md in JSON form +export interface GoalCheckpoint { + // Metadata + goalRunId: string; + goalDescription: string; + version: number; + checkpointedAt: Date; + currentPhase: string; + + // Progress summary (Manus-style "constantly rewritten" todo) + progressSummary: { + totalSteps: number; + completedSteps: number; + failedSteps: number; + pendingSteps: number; + percentComplete: number; + }; + + // Completed work with outcomes (the key context for replanning) + completedWork: { + stepNumber: number; + description: string; + outcome: string; + completedAt: Date; + }[]; + + // Current context for the agent + currentContext: { + lastSuccessfulStep?: string; + lastSuccessfulOutcome?: string; + currentStep?: string; + failureReason?: string; + accumulatedKnowledge: string[]; // Key facts learned during execution + }; + + // Remaining work + remainingSteps: { + stepNumber: number; + description: string; + status: string; + dependsOnCompleted: boolean; + }[]; +} + +// Checkpoint storage in database +export interface CheckpointRecord { + goalRunId: string; + checkpoint: GoalCheckpoint; + createdAt: Date; +} + +@Injectable() +export class GoalCheckpointService { + private readonly logger = new Logger(GoalCheckpointService.name); + private readonly enabled: boolean; + + // In-memory cache for quick access (also persisted to DB) + private checkpointCache: Map = new Map(); + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.enabled = this.configService.get('CHECKPOINT_ENABLED', 'true') === 'true'; + this.logger.log(`Goal checkpoint service ${this.enabled ? 'enabled' : 'disabled'}`); + } + + /** + * Create or update checkpoint for a goal run + * + * Called after each step completion to maintain current state. + * Implements Manus-style "constantly rewriting the todo list". + */ + async updateCheckpoint(goalRunId: string): Promise { + if (!this.enabled) { + return null; + } + + this.logger.debug(`Updating checkpoint for goal run ${goalRunId}`); + + try { + // Fetch current goal run state with all related data + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) { + this.logger.warn(`Goal run ${goalRunId} not found for checkpoint`); + return null; + } + + const currentPlan = goalRun.planVersions[0]; + const items = currentPlan?.checklistItems || []; + + // Build checkpoint + const checkpoint = this.buildCheckpoint(goalRun, items); + + // Store in cache + this.checkpointCache.set(goalRunId, checkpoint); + + // Persist to database (store in goal run's JSON field if available) + await this.persistCheckpoint(goalRunId, checkpoint); + + // Emit event for monitoring + this.eventEmitter.emit('checkpoint.updated', { + goalRunId, + version: checkpoint.version, + percentComplete: checkpoint.progressSummary.percentComplete, + }); + + this.logger.debug( + `Checkpoint updated: ${checkpoint.progressSummary.completedSteps}/${checkpoint.progressSummary.totalSteps} complete`, + ); + + return checkpoint; + } catch (error) { + this.logger.error(`Failed to update checkpoint: ${(error as Error).message}`); + return null; + } + } + + /** + * Get the current checkpoint for a goal run + */ + async getCheckpoint(goalRunId: string): Promise { + // Check cache first + const cached = this.checkpointCache.get(goalRunId); + if (cached) { + return cached; + } + + // Load from database + return this.loadCheckpoint(goalRunId); + } + + /** + * Get checkpoint as formatted string for LLM context + * + * This implements the Manus-style "todo.md" format - a concise, + * human-readable summary of progress that can be included in prompts. + */ + async getCheckpointAsContext(goalRunId: string): Promise { + const checkpoint = await this.getCheckpoint(goalRunId); + if (!checkpoint) { + return ''; + } + + return this.formatCheckpointForLLM(checkpoint); + } + + /** + * Build checkpoint from goal run state + */ + private buildCheckpoint(goalRun: any, items: any[]): GoalCheckpoint { + const completedItems = items.filter(i => i.status === 'COMPLETED'); + const failedItems = items.filter(i => i.status === 'FAILED'); + const pendingItems = items.filter(i => i.status === 'PENDING' || i.status === 'IN_PROGRESS'); + const inProgressItem = items.find(i => i.status === 'IN_PROGRESS'); + + // Extract accumulated knowledge from completed outcomes + const accumulatedKnowledge = this.extractKnowledge(completedItems); + + // Find last successful step + const lastCompleted = completedItems[completedItems.length - 1]; + + return { + goalRunId: goalRun.id, + goalDescription: goalRun.goal, + version: goalRun.currentPlanVersion || 1, + checkpointedAt: new Date(), + currentPhase: goalRun.phase, + + progressSummary: { + totalSteps: items.length, + completedSteps: completedItems.length, + failedSteps: failedItems.length, + pendingSteps: pendingItems.length, + percentComplete: items.length > 0 + ? Math.round((completedItems.length / items.length) * 100) + : 0, + }, + + completedWork: completedItems.map(item => ({ + stepNumber: item.order, + description: item.description, + outcome: item.actualOutcome || 'Completed successfully', + completedAt: item.completedAt || new Date(), + })), + + currentContext: { + lastSuccessfulStep: lastCompleted?.description, + lastSuccessfulOutcome: lastCompleted?.actualOutcome, + currentStep: inProgressItem?.description, + failureReason: failedItems[0]?.actualOutcome, + accumulatedKnowledge, + }, + + remainingSteps: pendingItems.map(item => ({ + stepNumber: item.order, + description: item.description, + status: item.status, + dependsOnCompleted: item.order > 1 && completedItems.length > 0, + })), + }; + } + + /** + * Extract key knowledge/facts from completed step outcomes + * + * This helps maintain context even when replanning. + */ + private extractKnowledge(completedItems: any[]): string[] { + const knowledge: string[] = []; + + for (const item of completedItems) { + if (!item.actualOutcome) continue; + + try { + // Try to parse JSON outcome for structured data + const parsed = JSON.parse(item.actualOutcome); + if (typeof parsed === 'object' && parsed !== null) { + // Extract key facts from structured outcome + if (parsed.summary) knowledge.push(parsed.summary); + if (parsed.result) knowledge.push(String(parsed.result)); + if (parsed.found) knowledge.push(`Found: ${JSON.stringify(parsed.found)}`); + } + } catch { + // Plain text outcome - use directly if not too long + if (item.actualOutcome.length < 200) { + knowledge.push(`Step ${item.order}: ${item.actualOutcome}`); + } else { + knowledge.push(`Step ${item.order}: ${item.actualOutcome.substring(0, 200)}...`); + } + } + } + + return knowledge; + } + + /** + * Format checkpoint as context string for LLM prompts + * + * This is the Manus-style "todo.md" format - keeps completed work + * in the model's recent attention span. + */ + private formatCheckpointForLLM(checkpoint: GoalCheckpoint): string { + const lines: string[] = []; + + lines.push(`# Goal Progress Checkpoint (v${checkpoint.version})`); + lines.push(`Goal: ${checkpoint.goalDescription}`); + lines.push(`Progress: ${checkpoint.progressSummary.completedSteps}/${checkpoint.progressSummary.totalSteps} steps (${checkpoint.progressSummary.percentComplete}%)`); + lines.push(''); + + if (checkpoint.completedWork.length > 0) { + lines.push('## Completed Work'); + for (const work of checkpoint.completedWork) { + lines.push(`- [x] Step ${work.stepNumber}: ${work.description}`); + lines.push(` Result: ${work.outcome}`); + } + lines.push(''); + } + + if (checkpoint.currentContext.accumulatedKnowledge.length > 0) { + lines.push('## Key Information Gathered'); + for (const knowledge of checkpoint.currentContext.accumulatedKnowledge) { + lines.push(`- ${knowledge}`); + } + lines.push(''); + } + + if (checkpoint.remainingSteps.length > 0) { + lines.push('## Remaining Steps'); + for (const step of checkpoint.remainingSteps) { + const status = step.status === 'IN_PROGRESS' ? '[~]' : '[ ]'; + lines.push(`- ${status} Step ${step.stepNumber}: ${step.description}`); + } + lines.push(''); + } + + if (checkpoint.currentContext.failureReason) { + lines.push('## Last Failure'); + lines.push(`Reason: ${checkpoint.currentContext.failureReason}`); + lines.push(''); + } + + return lines.join('\n'); + } + + /** + * Persist checkpoint to database + * + * We store the checkpoint in a dedicated JSON column if available, + * or in the goal run's existing JSON fields. + */ + private async persistCheckpoint(goalRunId: string, checkpoint: GoalCheckpoint): Promise { + try { + // Store in constraints field (which is JSON) with a special key + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { constraints: true }, + }); + + const constraints = (goalRun?.constraints as any) || {}; + constraints._checkpoint = checkpoint; + + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + constraints, + }, + }); + } catch (error) { + this.logger.warn(`Failed to persist checkpoint: ${(error as Error).message}`); + } + } + + /** + * Load checkpoint from database + */ + private async loadCheckpoint(goalRunId: string): Promise { + try { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { constraints: true }, + }); + + const constraints = (goalRun?.constraints as any) || {}; + const checkpoint = constraints._checkpoint as GoalCheckpoint; + + if (checkpoint) { + // Update cache + this.checkpointCache.set(goalRunId, checkpoint); + return checkpoint; + } + + return null; + } catch (error) { + this.logger.warn(`Failed to load checkpoint: ${(error as Error).message}`); + return null; + } + } + + /** + * Clear checkpoint (on goal completion or cancellation) + */ + async clearCheckpoint(goalRunId: string): Promise { + this.checkpointCache.delete(goalRunId); + + try { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { constraints: true }, + }); + + if (goalRun) { + const constraints = (goalRun.constraints as any) || {}; + delete constraints._checkpoint; + + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { constraints }, + }); + } + } catch (error) { + this.logger.warn(`Failed to clear checkpoint: ${(error as Error).message}`); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-intake.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-intake.service.spec.ts new file mode 100644 index 000000000..62f43d4a5 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-intake.service.spec.ts @@ -0,0 +1,231 @@ +import { GoalIntakeService } from './goal-intake.service'; +import { GoalRunPhase, GoalRunWaitReason, GoalSpecStatus, UserPromptKind } from '@prisma/client'; + +describe('GoalIntakeService GoalSpec gate before planning', () => { + it('returns ready=true and creates a minimal COMPLETE GoalSpec when none exists', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + }, + goalSpec: { + findUnique: jest.fn(), + create: jest.fn(), + }, + } as any; + + prisma.goalRun.findUnique.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + phase: GoalRunPhase.INITIALIZING, + }); + prisma.goalSpec.findUnique.mockResolvedValueOnce(null); + prisma.goalSpec.create.mockResolvedValueOnce({ + id: 'gs-1', + status: GoalSpecStatus.COMPLETE, + schemaId: 'goal_intake.v1', + schemaVersion: 1, + jsonSchema: {}, + uiSchema: {}, + values: {}, + }); + + const userPromptService = { ensureOpenGoalSpecPrompt: jest.fn() } as any; + const outboxService = { enqueueOnce: jest.fn() } as any; + const goalRunService = { createActivityEvent: jest.fn() } as any; + const eventEmitter = { emit: jest.fn() } as any; + const configService = { get: jest.fn(() => '') } as any; + const goalIntakeStartedTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + + const service = new GoalIntakeService( + prisma, + userPromptService, + outboxService, + goalRunService, + eventEmitter, + configService, + goalIntakeStartedTotal, + ); + + const result = await service.ensureGoalSpecReadyForPlanning({ goalRunId: 'gr-1', tenantId: 't-1' }); + + expect(result).toEqual({ ready: true, goalSpecId: 'gs-1' }); + expect(userPromptService.ensureOpenGoalSpecPrompt).not.toHaveBeenCalled(); + expect(outboxService.enqueueOnce).not.toHaveBeenCalled(); + expect(goalRunService.createActivityEvent).not.toHaveBeenCalled(); + }); + + it('creates/ensures an OPEN GOAL_INTAKE prompt and moves run to WAITING_USER_INPUT when GoalSpec is INCOMPLETE', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + goalSpec: { + findUnique: jest.fn(), + }, + } as any; + + prisma.goalRun.findUnique.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + phase: GoalRunPhase.INITIALIZING, + }); + prisma.goalSpec.findUnique.mockResolvedValueOnce({ + id: 'gs-1', + status: GoalSpecStatus.INCOMPLETE, + schemaId: 'goal_intake.v1', + schemaVersion: 1, + jsonSchema: { title: 'Goal Intake' }, + uiSchema: { notes: { 'ui:widget': 'textarea' } }, + values: {}, + }); + + const prompt = { + id: 'p-1', + kind: UserPromptKind.GOAL_INTAKE, + dedupeKey: 'prompt:gr-1:goalSpec:gs-1:GOAL_INTAKE', + }; + + const userPromptService = { + ensureOpenGoalSpecPrompt: jest.fn().mockResolvedValueOnce(prompt), + } as any; + const outboxService = { enqueueOnce: jest.fn() } as any; + const goalRunService = { createActivityEvent: jest.fn() } as any; + const eventEmitter = { emit: jest.fn() } as any; + const configService = { get: jest.fn(() => '') } as any; + const goalIntakeStartedTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }); + + const service = new GoalIntakeService( + prisma, + userPromptService, + outboxService, + goalRunService, + eventEmitter, + configService, + goalIntakeStartedTotal, + ); + + const result = await service.ensureGoalSpecReadyForPlanning({ goalRunId: 'gr-1', tenantId: 't-1' }); + + expect(result).toEqual({ ready: false, goalSpecId: 'gs-1', promptId: 'p-1' }); + expect(goalIntakeStartedTotal.labels).toHaveBeenCalledWith('gate'); + expect(userPromptService.ensureOpenGoalSpecPrompt).toHaveBeenCalledWith( + expect.objectContaining({ + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: 'gs-1', + kind: UserPromptKind.GOAL_INTAKE, + }), + ); + expect(prisma.goalRun.updateMany).toHaveBeenCalledWith({ + where: { + id: 'gr-1', + phase: { in: [GoalRunPhase.INITIALIZING, GoalRunPhase.PLANNING, GoalRunPhase.EXECUTING, GoalRunPhase.REPLANNING] }, + }, + data: expect.objectContaining({ + phase: GoalRunPhase.WAITING_USER_INPUT, + waitReason: GoalRunWaitReason.USER_INPUT, + waitStartedAt: expect.any(Date), + }), + }); + expect(goalRunService.createActivityEvent).toHaveBeenCalledTimes(1); + expect(outboxService.enqueueOnce).toHaveBeenCalledWith({ + dedupeKey: prompt.dedupeKey, + aggregateId: 'gr-1', + eventType: 'user_prompt.created', + payload: expect.objectContaining({ + promptId: 'p-1', + goalRunId: 'gr-1', + tenantId: 't-1', + goalSpecId: 'gs-1', + kind: UserPromptKind.GOAL_INTAKE, + }), + }); + }); + + it('creates a default INCOMPLETE GoalSpec when GOAL_INTAKE_FORCE_TENANTS includes the tenant (so planning blocks before first plan)', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + goalSpec: { + findUnique: jest.fn(), + create: jest.fn(), + }, + } as any; + + prisma.goalRun.findUnique.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + phase: GoalRunPhase.INITIALIZING, + }); + prisma.goalSpec.findUnique.mockResolvedValueOnce(null); + prisma.goalSpec.create.mockResolvedValueOnce({ + id: 'gs-1', + status: GoalSpecStatus.INCOMPLETE, + schemaId: 'goal_intake.v1', + schemaVersion: 1, + jsonSchema: { title: 'Goal Intake' }, + uiSchema: { notes: { 'ui:widget': 'textarea' } }, + values: {}, + }); + + const prompt = { + id: 'p-1', + kind: UserPromptKind.GOAL_INTAKE, + dedupeKey: 'prompt:gr-1:goalSpec:gs-1:GOAL_INTAKE', + }; + + const userPromptService = { + ensureOpenGoalSpecPrompt: jest.fn().mockResolvedValueOnce(prompt), + } as any; + const outboxService = { enqueueOnce: jest.fn() } as any; + const goalRunService = { createActivityEvent: jest.fn() } as any; + const eventEmitter = { emit: jest.fn() } as any; + const configService = { get: jest.fn(() => 't-1') } as any; + const goalIntakeStartedTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }); + + const service = new GoalIntakeService( + prisma, + userPromptService, + outboxService, + goalRunService, + eventEmitter, + configService, + goalIntakeStartedTotal, + ); + + const result = await service.ensureGoalSpecReadyForPlanning({ goalRunId: 'gr-1', tenantId: 't-1' }); + + expect(result).toEqual({ ready: false, goalSpecId: 'gs-1', promptId: 'p-1' }); + expect(prisma.goalSpec.create).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + goalRunId: 'gr-1', + tenantId: 't-1', + status: GoalSpecStatus.INCOMPLETE, + }), + }), + ); + expect(outboxService.enqueueOnce).toHaveBeenCalledWith({ + dedupeKey: prompt.dedupeKey, + aggregateId: 'gr-1', + eventType: 'user_prompt.created', + payload: expect.objectContaining({ + promptId: 'p-1', + goalRunId: 'gr-1', + tenantId: 't-1', + goalSpecId: 'gs-1', + kind: UserPromptKind.GOAL_INTAKE, + }), + }); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-intake.service.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-intake.service.ts new file mode 100644 index 000000000..fb36b3ad3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-intake.service.ts @@ -0,0 +1,398 @@ +/** + * Goal Intake Service (GoalSpec gate before planning) + * + * Converts "prompt-first" planner output into an External Input Request (EIR) that must + * be satisfied before planning proceeds. + * + * Design: + * - GoalSpec is the durable, typed intake state for a run. + * - A GOAL_INTAKE UserPrompt is created exactly-once (dedupe) and the run enters WAITING_USER_INPUT. + * - When the prompt is resolved, GoalSpec is marked COMPLETE and the run returns to INITIALIZING + * so planning can restart with the provided inputs. + */ + +import { Injectable, Logger, NotFoundException } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { createId } from '@paralleldrive/cuid2'; +import { GoalRunPhase, GoalRunWaitReason, GoalSpecStatus, UserPromptKind } from '@prisma/client'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import type { Counter } from 'prom-client'; +import { PrismaService } from './prisma.service'; +import { UserPromptService } from './user-prompt.service'; +import { OutboxService } from './outbox.service'; +import { GoalRunService } from './goal-run.service'; +import { PlannerFirstStepUserInputError } from './planner.errors'; + +@Injectable() +export class GoalIntakeService { + private readonly logger = new Logger(GoalIntakeService.name); + private cachedForceTenantsRaw = ''; + private cachedForceTenants = new Set(); + + constructor( + private readonly prisma: PrismaService, + private readonly userPromptService: UserPromptService, + private readonly outboxService: OutboxService, + private readonly goalRunService: GoalRunService, + private readonly eventEmitter: EventEmitter2, + private readonly configService: ConfigService, + @InjectMetric('goal_intake_started_total') + private readonly goalIntakeStartedTotal: Counter, + ) {} + + private getForcedTenants(): Set { + const raw = this.configService.get('GOAL_INTAKE_FORCE_TENANTS', ''); + if (raw === this.cachedForceTenantsRaw) return this.cachedForceTenants; + + const parsed = new Set( + raw + .split(',') + .map((s) => s.trim()) + .filter(Boolean), + ); + + this.cachedForceTenantsRaw = raw; + this.cachedForceTenants = parsed; + return parsed; + } + + private shouldForceGoalIntakeForNewRun(tenantId: string): boolean { + return this.getForcedTenants().has(tenantId); + } + + private buildDefaultGoalSpecSchema(): { + schemaId: string; + schemaVersion: number; + jsonSchema: Record; + uiSchema: Record; + } { + const schemaId = 'goal_intake.v1'; + const schemaVersion = 1; + const jsonSchema = { + title: 'Goal Intake', + type: 'object', + properties: { + notes: { + type: 'string', + title: 'Required details', + description: 'Provide any missing details needed to start this run (do not paste secrets).', + minLength: 1, + }, + }, + required: ['notes'], + additionalProperties: true, + } as const; + + const uiSchema = { + notes: { + 'ui:widget': 'textarea', + 'ui:options': { rows: 6 }, + }, + } as const; + + return { schemaId, schemaVersion, jsonSchema: jsonSchema as any, uiSchema: uiSchema as any }; + } + + /** + * Proactive GoalSpec gate before planning. + * + * Contract: + * - If GoalSpec is INCOMPLETE, orchestrator MUST NOT proceed to PLANNING. + * - Instead, ensure an OPEN GOAL_INTAKE prompt exists and the run is WAITING_USER_INPUT. + * - If no GoalSpec exists yet, create a minimal COMPLETE GoalSpec (minimum viable spec). + * + * This makes "planner prompt-first output" a safety net, not the primary intake mechanism. + */ + async ensureGoalSpecReadyForPlanning(request: { + goalRunId: string; + tenantId: string; + }): Promise< + | { ready: true; goalSpecId: string } + | { ready: false; goalSpecId: string; promptId: string } + > { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: request.goalRunId }, + select: { id: true, tenantId: true, goal: true, phase: true }, + }); + if (!goalRun) throw new NotFoundException(`GoalRun ${request.goalRunId} not found`); + + const now = new Date(); + + const existingGoalSpec = await this.prisma.goalSpec.findUnique({ + where: { goalRunId: request.goalRunId }, + select: { + id: true, + status: true, + schemaId: true, + schemaVersion: true, + jsonSchema: true, + uiSchema: true, + values: true, + }, + }); + + const defaults = this.buildDefaultGoalSpecSchema(); + const initialStatus = this.shouldForceGoalIntakeForNewRun(goalRun.tenantId) + ? GoalSpecStatus.INCOMPLETE + : GoalSpecStatus.COMPLETE; + + const goalSpec = + existingGoalSpec ?? + (await this.prisma.goalSpec.create({ + data: { + id: createId(), + goalRunId: request.goalRunId, + tenantId: request.tenantId, + status: initialStatus, + schemaId: defaults.schemaId, + schemaVersion: defaults.schemaVersion, + jsonSchema: defaults.jsonSchema as any, + uiSchema: defaults.uiSchema as any, + values: {}, + }, + select: { + id: true, + status: true, + schemaId: true, + schemaVersion: true, + jsonSchema: true, + uiSchema: true, + values: true, + }, + })); + + if (goalSpec.status === GoalSpecStatus.COMPLETE) { + return { ready: true, goalSpecId: goalSpec.id }; + } + + const prompt = await this.userPromptService.ensureOpenGoalSpecPrompt({ + tenantId: request.tenantId, + goalRunId: request.goalRunId, + goalSpecId: goalSpec.id, + kind: UserPromptKind.GOAL_INTAKE, + schemaId: goalSpec.schemaId ?? defaults.schemaId, + schemaVersion: goalSpec.schemaVersion ?? defaults.schemaVersion, + jsonSchema: (goalSpec.jsonSchema ?? defaults.jsonSchema) as any, + uiSchema: (goalSpec.uiSchema ?? defaults.uiSchema) as any, + validatorVersion: 'ajv@8', + payload: { + kind: 'GOAL_INTAKE', + goalRunId: request.goalRunId, + goal: goalRun.goal, + reason: { + code: 'GOAL_SPEC_INCOMPLETE', + message: 'Goal intake is required before planning can begin', + }, + question: 'Provide the required information to begin planning.', + schemaId: goalSpec.schemaId ?? defaults.schemaId, + schemaVersion: goalSpec.schemaVersion ?? defaults.schemaVersion, + jsonSchema: goalSpec.jsonSchema ?? defaults.jsonSchema, + uiSchema: goalSpec.uiSchema ?? defaults.uiSchema, + existingValues: goalSpec.values ?? {}, + }, + // Default to 24h expiry; policy can be tightened later. + expiresAt: new Date(now.getTime() + 24 * 60 * 60 * 1000), + }); + + const phaseUpdated = await this.prisma.goalRun.updateMany({ + where: { + id: request.goalRunId, + phase: { + in: [GoalRunPhase.INITIALIZING, GoalRunPhase.PLANNING, GoalRunPhase.EXECUTING, GoalRunPhase.REPLANNING], + }, + }, + data: { + phase: GoalRunPhase.WAITING_USER_INPUT, + waitReason: GoalRunWaitReason.USER_INPUT, + waitStartedAt: now, + waitUntil: null, + waitDetail: { + kind: 'GOAL_INTAKE', + promptId: prompt.id, + promptKind: prompt.kind, + goalSpecId: goalSpec.id, + } as any, + }, + }); + + if (phaseUpdated.count > 0) { + this.goalIntakeStartedTotal.labels('gate').inc(); + + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId: request.goalRunId, + previousPhase: goalRun.phase, + newPhase: GoalRunPhase.WAITING_USER_INPUT, + }); + + await this.goalRunService.createActivityEvent(request.goalRunId, { + eventType: 'USER_PROMPT_CREATED', + title: 'Goal intake required (before planning)', + description: 'Goal intake required before planning can begin', + severity: 'warning', + details: { + promptId: prompt.id, + promptKind: prompt.kind, + dedupeKey: prompt.dedupeKey, + goalSpecId: goalSpec.id, + }, + }); + } + + await this.outboxService.enqueueOnce({ + dedupeKey: prompt.dedupeKey, + aggregateId: request.goalRunId, + eventType: 'user_prompt.created', + payload: { + promptId: prompt.id, + goalRunId: request.goalRunId, + tenantId: request.tenantId, + checklistItemId: null, + goalSpecId: goalSpec.id, + kind: prompt.kind, + stepDescription: 'Goal intake required before planning can begin', + }, + }); + + this.logger.warn( + `Goal intake gate triggered for goalRunId=${request.goalRunId} promptId=${prompt.id} goalSpecId=${goalSpec.id}`, + ); + + return { ready: false, goalSpecId: goalSpec.id, promptId: prompt.id }; + } + + async requestGoalIntakeFromPlannerError(request: { + goalRunId: string; + tenantId: string; + error: PlannerFirstStepUserInputError; + }): Promise<{ goalSpecId: string; promptId: string }> { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: request.goalRunId }, + select: { id: true, tenantId: true, goal: true, phase: true }, + }); + if (!goalRun) throw new NotFoundException(`GoalRun ${request.goalRunId} not found`); + + const now = new Date(); + + const { schemaId, schemaVersion, jsonSchema, uiSchema } = this.buildDefaultGoalSpecSchema(); + + const goalSpec = await this.prisma.goalSpec.upsert({ + where: { goalRunId: request.goalRunId }, + create: { + id: createId(), + goalRunId: request.goalRunId, + tenantId: request.tenantId, + status: GoalSpecStatus.INCOMPLETE, + schemaId, + schemaVersion, + jsonSchema: jsonSchema as any, + uiSchema: uiSchema as any, + values: {}, + }, + update: { + status: GoalSpecStatus.INCOMPLETE, + schemaId, + schemaVersion, + jsonSchema: jsonSchema as any, + uiSchema: uiSchema as any, + }, + select: { id: true, values: true }, + }); + + const question = request.error.firstStep.description; + const prompt = await this.userPromptService.ensureOpenGoalSpecPrompt({ + tenantId: request.tenantId, + goalRunId: request.goalRunId, + goalSpecId: goalSpec.id, + kind: UserPromptKind.GOAL_INTAKE, + schemaId, + schemaVersion, + jsonSchema: jsonSchema as any, + uiSchema: uiSchema as any, + validatorVersion: 'ajv@8', + payload: { + kind: 'GOAL_INTAKE', + goalRunId: request.goalRunId, + goal: goalRun.goal, + reason: { + code: request.error.reason, + message: 'Planner required external input before planning could begin', + }, + question, + schemaId, + schemaVersion, + jsonSchema, + uiSchema, + existingValues: goalSpec.values, + }, + // Default to 24h expiry; policy can be tightened later. + expiresAt: new Date(now.getTime() + 24 * 60 * 60 * 1000), + }); + + const phaseUpdated = await this.prisma.goalRun.updateMany({ + where: { + id: request.goalRunId, + phase: { + in: [GoalRunPhase.INITIALIZING, GoalRunPhase.PLANNING, GoalRunPhase.EXECUTING, GoalRunPhase.REPLANNING], + }, + }, + data: { + phase: GoalRunPhase.WAITING_USER_INPUT, + waitReason: GoalRunWaitReason.USER_INPUT, + waitStartedAt: now, + waitUntil: null, + waitDetail: { + kind: 'GOAL_INTAKE', + promptId: prompt.id, + promptKind: prompt.kind, + goalSpecId: goalSpec.id, + source: 'PLANNER_FIRST_STEP_USER_INPUT', + } as any, + }, + }); + + if (phaseUpdated.count > 0) { + this.goalIntakeStartedTotal.labels('planner_error').inc(); + + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId: request.goalRunId, + previousPhase: goalRun.phase, + newPhase: GoalRunPhase.WAITING_USER_INPUT, + }); + + await this.goalRunService.createActivityEvent(request.goalRunId, { + eventType: 'USER_PROMPT_CREATED', + title: 'Goal intake required (before planning)', + description: question, + severity: 'warning', + details: { + promptId: prompt.id, + promptKind: prompt.kind, + dedupeKey: prompt.dedupeKey, + goalSpecId: goalSpec.id, + }, + }); + } + + await this.outboxService.enqueueOnce({ + dedupeKey: prompt.dedupeKey, + aggregateId: request.goalRunId, + eventType: 'user_prompt.created', + payload: { + promptId: prompt.id, + goalRunId: request.goalRunId, + tenantId: request.tenantId, + checklistItemId: null, + goalSpecId: goalSpec.id, + kind: prompt.kind, + stepDescription: question, + }, + }); + + this.logger.warn( + `Goal intake requested for goalRunId=${request.goalRunId} promptId=${prompt.id} goalSpecId=${goalSpec.id}`, + ); + + return { goalSpecId: goalSpec.id, promptId: prompt.id }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-refinement.service.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-refinement.service.ts new file mode 100644 index 000000000..a5f98ce8c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-refinement.service.ts @@ -0,0 +1,638 @@ +/** + * Goal Refinement Service + * Phase 9 (v5.4.0): Advanced AI Features + * + * Responsibilities: + * - Analyze vague/ambiguous goals + * - Generate refined SMART goal suggestions + * - Provide clarifying questions for ambiguity + * - Estimate goal complexity and feasibility + * - Decompose complex goals into sub-goals + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { z } from 'zod'; + +// Zod schemas for LLM output validation +const RefinedGoalSchema = z.object({ + refined: z.string().min(10).max(1000), + reasoning: z.string().optional(), + improvements: z.array(z.string()).optional(), + specificity: z.number().min(0).max(1).optional(), +}); + +const GoalRefinementOutputSchema = z.object({ + originalAnalysis: z.object({ + clarity: z.number().min(0).max(1), + specificity: z.number().min(0).max(1), + actionability: z.number().min(0).max(1), + measurability: z.number().min(0).max(1), + issues: z.array(z.string()), + }), + refinedSuggestions: z.array(RefinedGoalSchema).min(1).max(5), + clarifyingQuestions: z.array(z.string()).max(5).optional(), + decomposition: z.array(z.object({ + subGoal: z.string(), + order: z.number(), + dependency: z.string().optional(), + })).optional(), + estimatedComplexity: z.enum(['simple', 'moderate', 'complex', 'very_complex']).optional(), + suggestedConstraints: z.record(z.any()).optional(), +}); + +type GoalRefinementOutput = z.infer; + +// Public interfaces +export interface RefinementRequest { + tenantId: string; + goal: string; + context?: { + previousGoals?: string[]; + userPreferences?: Record; + domain?: string; + }; + options?: { + maxSuggestions?: number; + includeQuestions?: boolean; + includeDecomposition?: boolean; + style?: 'concise' | 'detailed' | 'technical'; + }; +} + +export interface RefinementResult { + originalGoal: string; + analysis: { + clarity: number; + specificity: number; + actionability: number; + measurability: number; + overallScore: number; + issues: string[]; + }; + suggestions: Array<{ + refinedGoal: string; + reasoning?: string; + improvements?: string[]; + matchScore: number; + }>; + clarifyingQuestions?: string[]; + decomposition?: Array<{ + subGoal: string; + order: number; + dependency?: string; + }>; + complexity: 'simple' | 'moderate' | 'complex' | 'very_complex'; + suggestedConstraints?: Record; + tokensUsed?: number; + cached: boolean; +} + +export interface QuickAnalysisResult { + clarity: number; + specificity: number; + actionability: number; + issues: string[]; + needsRefinement: boolean; +} + +@Injectable() +export class GoalRefinementService { + private readonly logger = new Logger(GoalRefinementService.name); + private readonly llmModel: string; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + private readonly cacheEnabled: boolean; + private readonly cacheTTLMs: number; + + // Simple in-memory cache for goal analysis + private analysisCache = new Map(); + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.cacheEnabled = this.configService.get('GOAL_REFINEMENT_CACHE_ENABLED', 'true') === 'true'; + this.cacheTTLMs = parseInt(this.configService.get('GOAL_REFINEMENT_CACHE_TTL_MS', '3600000'), 10); // 1 hour default + + this.logger.log('GoalRefinementService initialized'); + } + + /** + * Analyze and refine a goal + */ + async refineGoal(request: RefinementRequest): Promise { + const { tenantId, goal, context, options } = request; + + this.logger.log(`Refining goal for tenant ${tenantId}: "${goal.substring(0, 50)}..."`); + + // Check cache + const cacheKey = this.getCacheKey(goal, options); + if (this.cacheEnabled) { + const cached = this.analysisCache.get(cacheKey); + if (cached && Date.now() - cached.timestamp < this.cacheTTLMs) { + this.logger.debug('Returning cached refinement result'); + return { ...cached.result, cached: true }; + } + } + + // Build and execute LLM prompt + const prompt = this.buildRefinementPrompt(goal, context, options); + + try { + const llmResponse = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(llmResponse); + + // Validate response + const validated = GoalRefinementOutputSchema.safeParse(parsed); + + let result: RefinementResult; + + if (validated.success) { + result = this.transformToResult(goal, validated.data); + } else { + this.logger.warn(`LLM response validation failed: ${validated.error.message}`); + result = this.generateFallbackResult(goal); + } + + // Cache result + if (this.cacheEnabled) { + this.analysisCache.set(cacheKey, { result, timestamp: Date.now() }); + } + + // Emit event + this.eventEmitter.emit('goal-refinement.completed', { + tenantId, + originalGoal: goal, + suggestionsCount: result.suggestions.length, + overallScore: result.analysis.overallScore, + }); + + // Store refinement record + await this.storeRefinementRecord(tenantId, goal, result); + + return result; + } catch (error: any) { + this.logger.error(`Goal refinement failed: ${error.message}`); + + // Emit failure event + this.eventEmitter.emit('goal-refinement.failed', { + tenantId, + originalGoal: goal, + error: error.message, + }); + + // Return fallback + return this.generateFallbackResult(goal); + } + } + + /** + * Quick analysis without full refinement (heuristic-based) + */ + async quickAnalyze(goal: string): Promise { + const lowerGoal = goal.toLowerCase().trim(); + const issues: string[] = []; + + // Clarity check - is it understandable? + let clarity = 1.0; + if (goal.length < 10) { + clarity -= 0.4; + issues.push('Goal is too short to be clear'); + } + if (goal.split(' ').length < 3) { + clarity -= 0.2; + issues.push('Goal needs more descriptive words'); + } + if (/\?$/.test(goal)) { + clarity -= 0.1; + issues.push('Goal should be a statement, not a question'); + } + + // Specificity check - does it have concrete details? + let specificity = 1.0; + const vagueWords = ['something', 'stuff', 'thing', 'things', 'somehow', 'whatever', 'etc']; + const vagueCount = vagueWords.filter(w => lowerGoal.includes(w)).length; + specificity -= vagueCount * 0.15; + if (vagueCount > 0) { + issues.push('Goal contains vague words that should be made specific'); + } + + // Check for quantifiable elements + const hasNumbers = /\d+/.test(goal); + const hasTimeframe = /\b(today|tomorrow|week|month|hour|minute|by|before|after|until)\b/i.test(goal); + if (!hasNumbers && !hasTimeframe) { + specificity -= 0.2; + issues.push('Consider adding specific quantities or timeframes'); + } + + // Actionability check - does it start with an action verb? + let actionability = 1.0; + const actionVerbs = [ + 'create', 'build', 'write', 'send', 'download', 'upload', 'login', 'log in', + 'navigate', 'open', 'search', 'find', 'update', 'delete', 'add', 'remove', + 'configure', 'setup', 'set up', 'install', 'deploy', 'test', 'verify', + 'analyze', 'review', 'check', 'monitor', 'generate', 'export', 'import', + ]; + const startsWithAction = actionVerbs.some(v => lowerGoal.startsWith(v)); + if (!startsWithAction) { + actionability -= 0.3; + issues.push('Goal should start with an action verb'); + } + + // Measurability - can success be determined? + let measurability = 0.7; // Default + const measurableIndicators = [ + 'successfully', 'complete', 'finish', 'ensure', 'verify', 'confirm', + 'all', 'each', 'every', 'must', 'should', 'will' + ]; + const hasMeasurable = measurableIndicators.some(i => lowerGoal.includes(i)); + if (hasMeasurable) { + measurability = 0.9; + } + + // Clamp values + clarity = Math.max(0, Math.min(1, clarity)); + specificity = Math.max(0, Math.min(1, specificity)); + actionability = Math.max(0, Math.min(1, actionability)); + measurability = Math.max(0, Math.min(1, measurability)); + + const avgScore = (clarity + specificity + actionability + measurability) / 4; + const needsRefinement = avgScore < 0.7 || issues.length > 2; + + return { + clarity, + specificity, + actionability, + issues, + needsRefinement, + }; + } + + /** + * Get refinement suggestions based on historical patterns + */ + async getSuggestionsFromHistory( + tenantId: string, + partialGoal: string, + limit: number = 5, + ): Promise> { + // Find similar successful goals from history + const recentGoals = await this.prisma.goalRun.findMany({ + where: { + tenantId, + status: 'COMPLETED', + }, + select: { + goal: true, + }, + orderBy: { createdAt: 'desc' }, + take: 100, + }); + + // Simple similarity scoring based on word overlap + const partialWords = new Set(partialGoal.toLowerCase().split(/\s+/)); + + const scored = recentGoals.map(g => { + const goalWords = new Set(g.goal.toLowerCase().split(/\s+/)); + const intersection = [...partialWords].filter(w => goalWords.has(w)); + const similarity = intersection.length / Math.max(partialWords.size, goalWords.size); + return { goal: g.goal, similarity }; + }); + + // Count occurrences and sort by similarity + const goalCounts = new Map(); + scored.forEach(s => { + goalCounts.set(s.goal, (goalCounts.get(s.goal) || 0) + 1); + }); + + const unique = [...new Map(scored.map(s => [s.goal, s])).values()]; + + return unique + .filter(s => s.similarity > 0.2) + .sort((a, b) => b.similarity - a.similarity) + .slice(0, limit) + .map(s => ({ + goal: s.goal, + similarity: Math.round(s.similarity * 100) / 100, + usageCount: goalCounts.get(s.goal) || 1, + })); + } + + /** + * Decompose a complex goal into sub-goals + */ + async decomposeGoal( + tenantId: string, + goal: string, + maxSubGoals: number = 5, + ): Promise> { + const prompt = this.buildDecompositionPrompt(goal, maxSubGoals); + + try { + const response = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(response); + + if (Array.isArray(parsed.subGoals)) { + return parsed.subGoals.map((sg: any, idx: number) => ({ + subGoal: sg.goal || sg.subGoal || sg, + order: sg.order || idx + 1, + estimatedDuration: sg.duration || sg.estimatedDuration, + })); + } + + return this.generateFallbackDecomposition(goal); + } catch (error: any) { + this.logger.error(`Goal decomposition failed: ${error.message}`); + return this.generateFallbackDecomposition(goal); + } + } + + // Private methods + + private buildRefinementPrompt( + goal: string, + context?: RefinementRequest['context'], + options?: RefinementRequest['options'], + ): string { + const style = options?.style || 'detailed'; + const maxSuggestions = options?.maxSuggestions || 3; + const includeQuestions = options?.includeQuestions !== false; + const includeDecomposition = options?.includeDecomposition !== false; + + let prompt = `You are an expert goal analyst. Analyze the following user goal and provide refinement suggestions. + +USER GOAL: "${goal}" + +${context?.domain ? `DOMAIN CONTEXT: ${context.domain}` : ''} +${context?.previousGoals?.length ? `PREVIOUS GOALS BY USER:\n${context.previousGoals.slice(0, 3).join('\n')}` : ''} + +ANALYSIS TASK: +1. Evaluate the goal on these dimensions (0-1 scale): + - Clarity: Is it unambiguous and easy to understand? + - Specificity: Does it have concrete, measurable elements? + - Actionability: Can it be directly executed? + - Measurability: Can success be objectively determined? + +2. Identify any issues with the goal (vagueness, ambiguity, missing context). + +3. Provide ${maxSuggestions} refined versions of the goal that are SMART: + - Specific: Well-defined and clear + - Measurable: Quantifiable outcome + - Achievable: Realistic scope + - Relevant: Maintains original intent + - Time-bound: Has implicit or explicit timeframe + +${includeQuestions ? '4. Generate clarifying questions that would help refine the goal further.' : ''} + +${includeDecomposition ? '5. If the goal is complex, break it into ordered sub-goals.' : ''} + +6. Estimate complexity: simple, moderate, complex, or very_complex + +7. Suggest any constraints that might be helpful (workspace mode, required tools, etc.) + +OUTPUT FORMAT (JSON): +{ + "originalAnalysis": { + "clarity": 0.0-1.0, + "specificity": 0.0-1.0, + "actionability": 0.0-1.0, + "measurability": 0.0-1.0, + "issues": ["issue1", "issue2"] + }, + "refinedSuggestions": [ + { + "refined": "Improved goal text", + "reasoning": "Why this is better", + "improvements": ["improvement1", "improvement2"], + "specificity": 0.0-1.0 + } + ], + ${includeQuestions ? '"clarifyingQuestions": ["question1", "question2"],' : ''} + ${includeDecomposition ? '"decomposition": [{"subGoal": "...", "order": 1, "dependency": null}],' : ''} + "estimatedComplexity": "simple|moderate|complex|very_complex", + "suggestedConstraints": {} +} + +${style === 'concise' ? 'Keep suggestions concise and direct.' : ''} +${style === 'technical' ? 'Use technical language appropriate for automation.' : ''} + +Analyze and refine the goal:`; + + return prompt; + } + + private buildDecompositionPrompt(goal: string, maxSubGoals: number): string { + return `Break down this goal into ${maxSubGoals} or fewer sequential sub-goals: + +GOAL: "${goal}" + +OUTPUT FORMAT (JSON): +{ + "subGoals": [ + {"goal": "Sub-goal 1", "order": 1, "duration": "estimated time"}, + {"goal": "Sub-goal 2", "order": 2, "duration": "estimated time"} + ] +} + +Each sub-goal should be: +- Independently verifiable +- In logical execution order +- Specific and actionable + +Decompose the goal:`; + } + + private async callLLM(prompt: string): Promise { + if (!this.llmApiKey) { + this.logger.warn('No LLM API key configured, using mock response'); + return this.getMockLLMResponse(prompt); + } + + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: this.llmModel, + max_tokens: 2000, + messages: [{ role: 'user', content: prompt }], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json(); + return data.content[0].text; + } + + private parseLLMResponse(response: string): any { + // Extract JSON from response (handle markdown code blocks) + const jsonMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/); + const jsonStr = jsonMatch ? jsonMatch[1] : response; + + try { + return JSON.parse(jsonStr.trim()); + } catch { + const objectMatch = response.match(/\{[\s\S]*\}/); + if (objectMatch) { + return JSON.parse(objectMatch[0]); + } + throw new Error('Failed to parse LLM response as JSON'); + } + } + + private transformToResult( + originalGoal: string, + output: GoalRefinementOutput, + ): RefinementResult { + const analysis = output.originalAnalysis; + const overallScore = ( + analysis.clarity + + analysis.specificity + + analysis.actionability + + analysis.measurability + ) / 4; + + return { + originalGoal, + analysis: { + ...analysis, + overallScore: Math.round(overallScore * 100) / 100, + }, + suggestions: output.refinedSuggestions.map((s, idx) => ({ + refinedGoal: s.refined, + reasoning: s.reasoning, + improvements: s.improvements, + matchScore: s.specificity || (1 - idx * 0.1), // First suggestion is best + })), + clarifyingQuestions: output.clarifyingQuestions, + decomposition: output.decomposition, + complexity: output.estimatedComplexity || 'moderate', + suggestedConstraints: output.suggestedConstraints, + cached: false, + }; + } + + private generateFallbackResult(goal: string): RefinementResult { + const quickAnalysis = this.quickAnalyzeSync(goal); + + return { + originalGoal: goal, + analysis: { + clarity: quickAnalysis.clarity, + specificity: quickAnalysis.specificity, + actionability: quickAnalysis.actionability, + measurability: 0.7, + overallScore: (quickAnalysis.clarity + quickAnalysis.specificity + quickAnalysis.actionability + 0.7) / 4, + issues: quickAnalysis.issues, + }, + suggestions: [ + { + refinedGoal: goal, + reasoning: 'Original goal (analysis unavailable)', + matchScore: 1.0, + }, + ], + complexity: 'moderate', + cached: false, + }; + } + + private quickAnalyzeSync(goal: string): { clarity: number; specificity: number; actionability: number; issues: string[] } { + const issues: string[] = []; + let clarity = goal.length >= 10 ? 0.8 : 0.5; + let specificity = goal.length >= 20 ? 0.7 : 0.5; + let actionability = 0.7; + + if (goal.length < 10) issues.push('Goal is too short'); + if (!/^[A-Z]/.test(goal) && !/^[a-z]+\s/.test(goal)) issues.push('Goal should start with action verb'); + + return { clarity, specificity, actionability, issues }; + } + + private generateFallbackDecomposition(goal: string): Array<{ subGoal: string; order: number }> { + return [ + { subGoal: `Understand requirements for: ${goal.substring(0, 50)}`, order: 1 }, + { subGoal: 'Execute the main objective', order: 2 }, + { subGoal: 'Verify successful completion', order: 3 }, + ]; + } + + private getCacheKey(goal: string, options?: RefinementRequest['options']): string { + const normalized = goal.toLowerCase().trim(); + const optionsKey = JSON.stringify(options || {}); + return `${normalized}::${optionsKey}`; + } + + private async storeRefinementRecord( + tenantId: string, + originalGoal: string, + result: RefinementResult, + ): Promise { + try { + await this.prisma.goalRefinementSuggestion.create({ + data: { + tenantId, + originalGoal, + analysisScore: result.analysis.overallScore, + suggestionsCount: result.suggestions.length, + topSuggestion: result.suggestions[0]?.refinedGoal || originalGoal, + complexity: result.complexity, + issues: result.analysis.issues, + }, + }); + } catch (error: any) { + // Log but don't fail if storage fails + this.logger.warn(`Failed to store refinement record: ${error.message}`); + } + } + + private getMockLLMResponse(prompt: string): string { + const goalMatch = prompt.match(/USER GOAL: "(.+?)"/); + const goal = goalMatch ? goalMatch[1] : 'Unknown goal'; + + return JSON.stringify({ + originalAnalysis: { + clarity: 0.7, + specificity: 0.6, + actionability: 0.8, + measurability: 0.5, + issues: ['Could be more specific', 'Consider adding measurable criteria'], + }, + refinedSuggestions: [ + { + refined: `${goal} with specific success criteria and verification`, + reasoning: 'Added measurability', + improvements: ['Added success criteria', 'Made outcome verifiable'], + specificity: 0.9, + }, + { + refined: `Complete the following: ${goal}`, + reasoning: 'Made more actionable', + improvements: ['Added action verb'], + specificity: 0.8, + }, + ], + clarifyingQuestions: [ + 'What specific outcome are you looking for?', + 'Are there any constraints or preferences?', + ], + estimatedComplexity: 'moderate', + suggestedConstraints: { + workspaceMode: 'SHARED', + }, + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-run.execution-engine.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-run.execution-engine.spec.ts new file mode 100644 index 000000000..c2336511c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-run.execution-engine.spec.ts @@ -0,0 +1,334 @@ +import { GoalRunService } from './goal-run.service'; +import { GoalRunExecutionEngine, GoalRunPhase, GoalRunStatus } from '@prisma/client'; + +describe('GoalRunService execution_engine (immutable per-run engine)', () => { + it('sets executionEngine=LEGACY_DB_LOOP when Temporal is disabled', async () => { + const prisma = { + goalRun: { + create: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const workflowService = {} as any; + const eventEmitter = { emit: jest.fn() } as any; + const temporalWorkflowService = { isEnabled: jest.fn().mockReturnValue(false) } as any; + const featureFlagService = { shouldUseTemporalWorkflow: jest.fn() } as any; + + prisma.goalRun.create.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + constraints: {}, + phase: GoalRunPhase.INITIALIZING, + status: GoalRunStatus.PENDING, + executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP, + workflowRunId: null, + currentPlanVersion: 0, + error: null, + createdAt: new Date(), + updatedAt: new Date(), + startedAt: null, + completedAt: null, + }); + + const service = new GoalRunService( + prisma, + workflowService, + eventEmitter, + temporalWorkflowService, + featureFlagService, + ); + + await service.createFromGoal({ tenantId: 't-1', goal: 'Do the thing', autoStart: false }); + + expect(prisma.goalRun.create).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP, + }), + }), + ); + expect(featureFlagService.shouldUseTemporalWorkflow).not.toHaveBeenCalled(); + }); + + it('sets executionEngine=TEMPORAL_WORKFLOW when Temporal is enabled and feature flag routes it', async () => { + const prisma = { + goalRun: { + create: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const workflowService = {} as any; + const eventEmitter = { emit: jest.fn() } as any; + const temporalWorkflowService = { + isEnabled: jest.fn().mockReturnValue(true), + getWorkflowId: jest.fn((goalRunId: string) => `goal-run-${goalRunId}`), + } as any; + const featureFlagService = { + shouldUseTemporalWorkflow: jest.fn().mockReturnValue({ enabled: true, reason: 'test' }), + } as any; + + prisma.goalRun.create.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + constraints: {}, + phase: GoalRunPhase.INITIALIZING, + status: GoalRunStatus.PENDING, + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + workflowRunId: null, + currentPlanVersion: 0, + error: null, + createdAt: new Date(), + updatedAt: new Date(), + startedAt: null, + completedAt: null, + }); + + const service = new GoalRunService( + prisma, + workflowService, + eventEmitter, + temporalWorkflowService, + featureFlagService, + ); + + await service.createFromGoal({ tenantId: 't-1', goal: 'Do the thing', autoStart: false }); + + expect(prisma.goalRun.create).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + temporalWorkflowId: expect.stringMatching(/^goal-run-gr-/), + }), + }), + ); + expect(featureFlagService.shouldUseTemporalWorkflow).toHaveBeenCalledTimes(1); + }); + + it('startGoalRun uses stored engine=LEGACY_DB_LOOP and does not re-route via feature flags', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + update: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const workflowService = {} as any; + const eventEmitter = { emit: jest.fn() } as any; + const temporalWorkflowService = { + isEnabled: jest.fn().mockReturnValue(true), + startGoalRunWorkflow: jest.fn(), + } as any; + const featureFlagService = { shouldUseTemporalWorkflow: jest.fn() } as any; + + prisma.goalRun.findUnique.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP, + }); + prisma.goalRun.update.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + constraints: {}, + phase: GoalRunPhase.INITIALIZING, + status: GoalRunStatus.RUNNING, + executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP, + workflowRunId: null, + currentPlanVersion: 0, + error: null, + createdAt: new Date(), + updatedAt: new Date(), + startedAt: new Date(), + completedAt: null, + }); + + const service = new GoalRunService( + prisma, + workflowService, + eventEmitter, + temporalWorkflowService, + featureFlagService, + ); + + await service.startGoalRun('gr-1'); + + expect(featureFlagService.shouldUseTemporalWorkflow).not.toHaveBeenCalled(); + expect(temporalWorkflowService.startGoalRunWorkflow).not.toHaveBeenCalled(); + expect(eventEmitter.emit).toHaveBeenCalledWith('goal-run.started', { goalRunId: 'gr-1' }); + }); + + it('startGoalRun uses stored engine=TEMPORAL_WORKFLOW and does not emit goal-run.started', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + update: jest.fn(), + updateMany: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const workflowService = {} as any; + const eventEmitter = { emit: jest.fn() } as any; + const temporalWorkflowService = { + isEnabled: jest.fn().mockReturnValue(true), + startGoalRunWorkflow: jest.fn().mockResolvedValue({ workflowId: 'wf-1', runId: 'wr-1' }), + } as any; + const featureFlagService = { shouldUseTemporalWorkflow: jest.fn() } as any; + + prisma.goalRun.findUnique.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + }); + prisma.goalRun.update.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Do the thing', + constraints: {}, + phase: GoalRunPhase.INITIALIZING, + status: GoalRunStatus.RUNNING, + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + workflowRunId: null, + currentPlanVersion: 0, + error: null, + createdAt: new Date(), + updatedAt: new Date(), + startedAt: new Date(), + completedAt: null, + }); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }); + + const service = new GoalRunService( + prisma, + workflowService, + eventEmitter, + temporalWorkflowService, + featureFlagService, + ); + + await service.startGoalRun('gr-1'); + + expect(featureFlagService.shouldUseTemporalWorkflow).not.toHaveBeenCalled(); + expect(temporalWorkflowService.startGoalRunWorkflow).toHaveBeenCalledTimes(1); + expect(prisma.goalRun.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: expect.objectContaining({ id: 'gr-1' }), + data: expect.objectContaining({ temporalWorkflowId: 'wf-1', temporalRunId: 'wr-1' }), + }), + ); + expect(eventEmitter.emit).not.toHaveBeenCalledWith('goal-run.started', expect.anything()); + }); + + it('startGoalRun provisions a workspace anchor for DESKTOP-required Temporal goals', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + update: jest.fn(), + updateMany: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const workflowService = { + getOrCreateGoalRunWorkspace: jest.fn().mockResolvedValue({ + workflowRunId: 'wf-gr-1', + workspaceId: 'ws-gr-1', + created: true, + }), + } as any; + + const eventEmitter = { emit: jest.fn() } as any; + const temporalWorkflowService = { + isEnabled: jest.fn().mockReturnValue(true), + startGoalRunWorkflow: jest.fn().mockResolvedValue({ workflowId: 'twf-1', runId: 'tr-1' }), + } as any; + const featureFlagService = { shouldUseTemporalWorkflow: jest.fn() } as any; + + prisma.goalRun.findUnique.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Find flights from DTW to DEN next week', + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + workflowRunId: null, + temporalStartedAt: null, + temporalRunId: null, + temporalWorkflowId: null, + }); + + prisma.goalRun.update.mockResolvedValueOnce({ + id: 'gr-1', + tenantId: 't-1', + goal: 'Find flights from DTW to DEN next week', + constraints: {}, + phase: GoalRunPhase.INITIALIZING, + status: GoalRunStatus.RUNNING, + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + workflowRunId: null, + currentPlanVersion: 0, + error: null, + createdAt: new Date(), + updatedAt: new Date(), + startedAt: new Date(), + completedAt: null, + }); + + prisma.goalRun.updateMany + .mockResolvedValueOnce({ count: 1 }) // workflowRunId link + .mockResolvedValueOnce({ count: 1 }); // temporal identifiers + + const service = new GoalRunService( + prisma, + workflowService, + eventEmitter, + temporalWorkflowService, + featureFlagService, + ); + + await service.startGoalRun('gr-1'); + + expect(workflowService.getOrCreateGoalRunWorkspace).toHaveBeenCalledWith( + expect.objectContaining({ goalRunId: 'gr-1', tenantId: 't-1' }), + ); + + expect(prisma.goalRun.updateMany).toHaveBeenNthCalledWith( + 1, + expect.objectContaining({ + where: expect.objectContaining({ id: 'gr-1', workflowRunId: null }), + data: expect.objectContaining({ workflowRunId: 'wf-gr-1' }), + }), + ); + + expect(temporalWorkflowService.startGoalRunWorkflow).toHaveBeenCalledWith( + expect.objectContaining({ + goalRunId: 'gr-1', + tenantId: 't-1', + workspaceId: 'ws-gr-1', + }), + ); + + expect(prisma.goalRun.updateMany).toHaveBeenNthCalledWith( + 2, + expect.objectContaining({ + where: expect.objectContaining({ id: 'gr-1' }), + data: expect.objectContaining({ temporalWorkflowId: 'twf-1', temporalRunId: 'tr-1' }), + }), + ); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-run.prompt-cleanup.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-run.prompt-cleanup.spec.ts new file mode 100644 index 000000000..248057d04 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-run.prompt-cleanup.spec.ts @@ -0,0 +1,109 @@ +import { GoalRunService } from './goal-run.service'; +import { GoalRunStatus, UserPromptCancelReason, UserPromptStatus } from '@prisma/client'; + +describe(GoalRunService.name, () => { + describe('prompt cleanup on terminal goal run states', () => { + it('cancels OPEN prompts when a goal run is cancelled', async () => { + const prisma = { + $transaction: jest.fn(async (ops: any[]) => Promise.all(ops)), + goalRun: { + update: jest.fn(async () => ({ id: 'gr-1', status: GoalRunStatus.CANCELLED, workflowRunId: null })), + }, + userPrompt: { + updateMany: jest.fn(async () => ({ count: 2 })), + }, + } as any; + + const service = new GoalRunService( + prisma, + { cancelWorkflow: jest.fn() } as any, + { emit: jest.fn() } as any, + undefined, + undefined, + undefined, + ); + + (service as any).createActivityEvent = jest.fn(); + + await service.cancelGoalRun('gr-1', 'test cancel'); + + expect(prisma.userPrompt.updateMany).toHaveBeenCalledWith({ + where: { goalRunId: 'gr-1', status: UserPromptStatus.OPEN }, + data: expect.objectContaining({ + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.RUN_ENDED, + cancelledAt: expect.any(Date), + }), + }); + }); + + it('cancels OPEN prompts when a goal run fails', async () => { + const prisma = { + $transaction: jest.fn(async (ops: any[]) => Promise.all(ops)), + goalRun: { + update: jest.fn(async () => ({ id: 'gr-1', status: GoalRunStatus.FAILED, workflowRunId: null })), + }, + userPrompt: { + updateMany: jest.fn(async () => ({ count: 1 })), + }, + } as any; + + const service = new GoalRunService( + prisma, + { cancelWorkflow: jest.fn() } as any, + { emit: jest.fn() } as any, + undefined, + undefined, + undefined, + ); + + (service as any).createActivityEvent = jest.fn(); + + await service.failGoalRun('gr-1', 'boom'); + + expect(prisma.userPrompt.updateMany).toHaveBeenCalledWith({ + where: { goalRunId: 'gr-1', status: UserPromptStatus.OPEN }, + data: expect.objectContaining({ + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.RUN_ENDED, + cancelledAt: expect.any(Date), + }), + }); + }); + + it('cancels OPEN prompts when a goal run completes', async () => { + const prisma = { + $transaction: jest.fn(async (ops: any[]) => Promise.all(ops)), + goalRun: { + update: jest.fn(async () => ({ id: 'gr-1', status: GoalRunStatus.COMPLETED, workflowRunId: null })), + }, + userPrompt: { + updateMany: jest.fn(async () => ({ count: 3 })), + }, + } as any; + + const service = new GoalRunService( + prisma, + { cancelWorkflow: jest.fn() } as any, + { emit: jest.fn() } as any, + undefined, + undefined, + undefined, + ); + + (service as any).createActivityEvent = jest.fn(); + + await service.completeGoalRun('gr-1'); + + expect(prisma.userPrompt.updateMany).toHaveBeenCalledWith({ + where: { goalRunId: 'gr-1', status: UserPromptStatus.OPEN }, + data: expect.objectContaining({ + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.RUN_ENDED, + cancelledAt: expect.any(Date), + }), + }); + }); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-run.service.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-run.service.ts new file mode 100644 index 000000000..7b89d7ca2 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-run.service.ts @@ -0,0 +1,1468 @@ +/** + * Goal Run Service + * v1.0.1: Fixed startGoalRun to not pre-set PLANNING phase (allows orchestrator atomic transition) + * v1.0.0: Manus-style goal-first orchestration + * + * Responsibilities: + * - Create and manage goal runs + * - Track goal run state transitions + * - Coordinate with planner, executor, verifier services + * - Manage the orchestrator loop lifecycle + * + * Phase Transition Fix (v1.0.1): + * - startGoalRun no longer sets phase to PLANNING + * - Leaves phase as INITIALIZING for orchestrator loop to handle atomically + * - Prevents race condition regression where planning never starts + */ + +import { Injectable, Logger, NotFoundException, Optional } from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { WorkflowService } from './workflow.service'; +import { createId } from '@paralleldrive/cuid2'; +import { + GoalRunExecutionEngine, + GoalRunPhase, + GoalRunStatus, + GoalRunWaitReason, + ChecklistItemStatus, + ExecutionSurface, + Prisma, + StepType, + UserPromptCancelReason, + UserPromptStatus, +} from '@prisma/client'; +import { TemporalWorkflowService } from '../temporal/temporal-workflow.service'; +import { FeatureFlagService } from '../temporal/feature-flag.service'; +import { hasUserInteractionTool } from '../contracts/planner-tools'; +import { TemporalCapabilityProbeService } from '../temporal/temporal-capability-probe.service'; +import { inferGoalFeasibility } from '../contracts/goal-feasibility'; + +// Re-export enums for convenience +export { GoalRunPhase, GoalRunStatus, ChecklistItemStatus }; + +// Input types +export interface CreateGoalRunInput { + tenantId: string; + goal: string; + constraints?: GoalConstraints; + autoStart?: boolean; +} + +export interface GoalConstraints { + workspaceMode?: 'EXCLUSIVE' | 'SHARED'; + allowedTools?: string[]; + riskPolicy?: { + requireApproval?: string[]; + blockTools?: string[]; + }; + deadlineMinutes?: number; +} + +export interface SteeringInput { + type: 'PAUSE' | 'RESUME' | 'CANCEL' | 'MODIFY_PLAN' | 'APPROVE' | 'REJECT' | 'INSTRUCTION'; + content?: string; + targetItemId?: string; + userId?: string; + userEmail?: string; +} + +// Response types +export interface GoalRunResponse { + id: string; + tenantId: string; + goal: string; + constraints: GoalConstraints; + phase: GoalRunPhase; + status: GoalRunStatus; + executionEngine: GoalRunExecutionEngine; + waitReason?: GoalRunWaitReason | null; + waitDetail?: any | null; + waitStartedAt?: Date | null; + waitUntil?: Date | null; + workflowRunId?: string | null; + temporalWorkflowId?: string | null; + temporalRunId?: string | null; + temporalStartedAt?: Date | null; + currentPlanVersion: number; + error?: string | null; + createdAt: Date; + updatedAt: Date; + startedAt?: Date | null; + completedAt?: Date | null; +} + +export interface GoalRunWithPlan extends GoalRunResponse { + currentPlan?: { + version: number; + summary?: string | null; + items: ChecklistItemResponse[]; + }; + progress: { + completed: number; + total: number; + }; +} + +export interface ChecklistItemResponse { + id: string; + order: number; + description: string; + status: ChecklistItemStatus; + type?: StepType; + suggestedTools?: string[]; + requiresDesktop?: boolean; + executionSurface?: ExecutionSurface; + // Derived, explicit “external input required” signal for clients/UI (legacy-safe). + isExternalInput?: boolean; + // True when `suggestedTools=["ASK_USER"]` is present but type is not USER_INPUT_REQUIRED. + // This indicates a pre-fix misclassification that should be rendered as a prompt in UI. + isLegacyPromptStep?: boolean; + expectedOutcome?: string | null; + actualOutcome?: string | null; + startedAt?: Date | null; + completedAt?: Date | null; +} + +export interface PlanVersionResponse { + id: string; + version: number; + summary?: string | null; + replanReason?: string | null; + confidence?: number | null; + createdAt: Date; + items: ChecklistItemResponse[]; +} + +export interface PaginatedResponse { + data: T[]; + total: number; + page: number; + pageSize: number; + hasMore: boolean; +} + +export interface GoalRunFilters { + status?: GoalRunStatus; + phase?: GoalRunPhase; + page?: number; + pageSize?: number; +} + +@Injectable() +export class GoalRunService { + private readonly logger = new Logger(GoalRunService.name); + + constructor( + private prisma: PrismaService, + private workflowService: WorkflowService, + private eventEmitter: EventEmitter2, + @Optional() private temporalWorkflowService?: TemporalWorkflowService, + @Optional() private featureFlagService?: FeatureFlagService, + @Optional() private temporalCapabilityProbeService?: TemporalCapabilityProbeService, + ) {} + + /** + * Check if Temporal workflow should be used for this goal run + */ + private shouldUseTemporalWorkflow(goalRunId: string, tenantId: string, goal?: string): boolean { + if (!this.temporalWorkflowService?.isEnabled() || !this.featureFlagService) { + return false; + } + + // Defense-in-depth: if the Temporal capability probe is red, do not route new runs to Temporal. + if (this.temporalCapabilityProbeService && !this.temporalCapabilityProbeService.isHealthyForTraffic()) { + const reason = this.temporalCapabilityProbeService.getLastError() ?? 'unknown'; + this.logger.warn(`Temporal capability probe is red; routing ${goalRunId} to legacy. reason=${reason}`); + return false; + } + + const result = this.featureFlagService.shouldUseTemporalWorkflow({ + goalRunId, + tenantId, + goalDescription: goal, + }); + + if (result.enabled) { + this.logger.log(`Using Temporal workflow for ${goalRunId}: ${result.reason}`); + } + + return result.enabled; + } + + /** + * Create a new goal run from a natural language goal + */ + async createFromGoal(input: CreateGoalRunInput): Promise { + const goalRunId = `gr-${createId()}`; + + this.logger.log(`Creating goal run ${goalRunId} for goal: "${input.goal.substring(0, 50)}..."`); + + const executionEngine = this.shouldUseTemporalWorkflow(goalRunId, input.tenantId, input.goal) + ? GoalRunExecutionEngine.TEMPORAL_WORKFLOW + : GoalRunExecutionEngine.LEGACY_DB_LOOP; + + const temporalWorkflowId = + executionEngine === GoalRunExecutionEngine.TEMPORAL_WORKFLOW && this.temporalWorkflowService + ? this.temporalWorkflowService.getWorkflowId(goalRunId) + : null; + + const goalRun = await this.prisma.goalRun.create({ + data: { + id: goalRunId, + tenantId: input.tenantId, + goal: input.goal, + constraints: (input.constraints || {}) as object, + phase: GoalRunPhase.INITIALIZING, + status: GoalRunStatus.PENDING, + currentPlanVersion: 0, + executionEngine, + ...(temporalWorkflowId ? { temporalWorkflowId } : {}), + }, + }); + + // Emit goal created event + this.eventEmitter.emit('goal-run.created', { + goalRunId, + tenantId: input.tenantId, + goal: input.goal, + }); + + // Create activity event + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_CREATED', + title: 'Goal run created', + description: `Goal: ${input.goal}`, + }); + + // Auto-start if requested + if (input.autoStart !== false) { + await this.startGoalRun(goalRunId); + } + + return this.toGoalRunResponse(goalRun); + } + + /** + * Get goal run by ID + */ + async findById(goalRunId: string): Promise { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${goalRunId} not found`); + } + + return this.toGoalRunResponse(goalRun); + } + + /** + * Get goal run with current plan + */ + async findByIdWithPlan(goalRunId: string): Promise { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${goalRunId} not found`); + } + + const currentPlanVersion = goalRun.planVersions[0]; + const items = currentPlanVersion?.checklistItems || []; + + const completedCount = items.filter( + (item) => item.status === ChecklistItemStatus.COMPLETED, + ).length; + + return { + ...this.toGoalRunResponse(goalRun), + currentPlan: currentPlanVersion + ? { + version: currentPlanVersion.version, + summary: currentPlanVersion.summary, + items: items.map(this.toChecklistItemResponse), + } + : undefined, + progress: { + completed: completedCount, + total: items.length, + }, + }; + } + + /** + * List goal runs for a tenant + */ + async findByTenant( + tenantId: string, + filters?: GoalRunFilters, + ): Promise> { + const page = filters?.page || 1; + const pageSize = filters?.pageSize || 20; + const skip = (page - 1) * pageSize; + + const where: Prisma.GoalRunWhereInput = { + tenantId, + ...(filters?.status && { status: filters.status }), + ...(filters?.phase && { phase: filters.phase }), + }; + + const [goalRuns, total] = await Promise.all([ + this.prisma.goalRun.findMany({ + where, + orderBy: { createdAt: 'desc' }, + skip, + take: pageSize, + }), + this.prisma.goalRun.count({ where }), + ]); + + return { + data: goalRuns.map(this.toGoalRunResponse), + total, + page, + pageSize, + hasMore: skip + goalRuns.length < total, + }; + } + + /** + * Start goal run execution + * v1.0.1: No longer sets phase to PLANNING - lets orchestrator handle phase transition atomically + * v1.1.0: Added Temporal workflow support with feature flag + * v5.18.12: Execution engine is pinned at creation (no per-start routing) + */ + async startGoalRun(goalRunId: string): Promise { + this.logger.log(`Starting goal run ${goalRunId}`); + + const existingGoalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + if (!existingGoalRun) { + throw new NotFoundException(`Goal run ${goalRunId} not found`); + } + + const executionEngine = + existingGoalRun.executionEngine ?? GoalRunExecutionEngine.LEGACY_DB_LOOP; + + const goalRun = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + status: GoalRunStatus.RUNNING, + // v1.0.1: Don't set phase here - leave as INITIALIZING + // The orchestrator loop will atomically transition INITIALIZING -> PLANNING + // This prevents race conditions where multiple iterations try to plan + startedAt: new Date(), + }, + }); + + if ( + executionEngine === GoalRunExecutionEngine.TEMPORAL_WORKFLOW && + (!this.temporalWorkflowService || !this.temporalWorkflowService.isEnabled()) + ) { + const error = 'Run execution_engine=TEMPORAL_WORKFLOW but Temporal workflows are disabled'; + + this.logger.error(`${error} (goalRunId=${goalRunId})`); + + await this.createActivityEvent(goalRunId, { + eventType: 'TEMPORAL_ENGINE_DISABLED', + title: 'Temporal workflow engine disabled', + description: error, + severity: 'error', + }); + + const failed = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + status: GoalRunStatus.FAILED, + phase: GoalRunPhase.FAILED, + error, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + + return this.toGoalRunResponse(failed); + } + + // Start Temporal workflow if engine is pinned to TEMPORAL_WORKFLOW + if (executionEngine === GoalRunExecutionEngine.TEMPORAL_WORKFLOW && this.temporalWorkflowService) { + // Temporal runs can still dispatch DESKTOP tasks via the agent stack. + // For DESKTOP-required goals we must anchor a Workspace + WorkflowRun so tasks have a stable workspaceId. + const goalFeasibility = inferGoalFeasibility(existingGoalRun.goal); + const mustUseDesktopWorkspace = goalFeasibility?.requiredSurface === ExecutionSurface.DESKTOP; + + let temporalWorkspaceId: string | undefined; + + if (existingGoalRun.workflowRunId) { + // If a workspace anchor already exists, reuse it. + (goalRun as any).workflowRunId = existingGoalRun.workflowRunId; + try { + const workflowRun = await this.prisma.workflowRun.findUnique({ + where: { id: existingGoalRun.workflowRunId }, + select: { workspaceId: true }, + }); + temporalWorkspaceId = workflowRun?.workspaceId ?? undefined; + } catch { + temporalWorkspaceId = undefined; + } + } else if (mustUseDesktopWorkspace) { + const workspaceAnchor = await this.workflowService.getOrCreateGoalRunWorkspace({ + goalRunId, + tenantId: existingGoalRun.tenantId, + }); + + temporalWorkspaceId = workspaceAnchor.workspaceId; + + const linked = await this.prisma.goalRun.updateMany({ + where: { id: goalRunId, workflowRunId: null }, + data: { workflowRunId: workspaceAnchor.workflowRunId }, + }); + + if (linked.count > 0) { + (goalRun as any).workflowRunId = workspaceAnchor.workflowRunId; + } else if (existingGoalRun.workflowRunId) { + (goalRun as any).workflowRunId = existingGoalRun.workflowRunId; + } + } + + // Idempotency: if we already recorded Temporal workflow identifiers, do not start a second workflow. + if (existingGoalRun.temporalStartedAt || existingGoalRun.temporalRunId) { + this.logger.log( + `Temporal workflow already started for ${goalRunId}: ${existingGoalRun.temporalWorkflowId ?? this.temporalWorkflowService.getWorkflowId(goalRunId)} (run: ${existingGoalRun.temporalRunId ?? 'unknown'})`, + ); + } else { + try { + const constraints = goalRun.constraints as GoalConstraints; + const { workflowId, runId } = await this.temporalWorkflowService.startGoalRunWorkflow({ + goalRunId, + tenantId: goalRun.tenantId, + userId: 'system', // TODO: Pass actual user ID from context + goalDescription: goalRun.goal, + workspaceId: temporalWorkspaceId, + constraints: { + maxSteps: 50, + maxRetries: 3, + maxReplans: 5, + timeoutMs: constraints?.deadlineMinutes ? constraints.deadlineMinutes * 60 * 1000 : 3600000, + requireApprovalForHighRisk: constraints?.riskPolicy?.requireApproval?.length ? true : false, + }, + }); + + this.logger.log(`Temporal workflow started: ${workflowId} (run: ${runId})`); + + // Persist Temporal identifiers for audit/debug (write-once semantics). + const temporalStartedAt = new Date(); + await this.prisma.goalRun.updateMany({ + where: { + id: goalRunId, + temporalRunId: null, + }, + data: { + temporalWorkflowId: workflowId, + temporalRunId: runId, + temporalStartedAt, + }, + }); + + (goalRun as any).temporalWorkflowId = workflowId; + (goalRun as any).temporalRunId = runId; + (goalRun as any).temporalStartedAt = temporalStartedAt; + + // Note: workflowRunId (if present) is an orchestrator-owned workspace anchor. + // Temporal workflows themselves are tracked via temporalWorkflowId/temporalRunId. + + await this.createActivityEvent(goalRunId, { + eventType: 'TEMPORAL_WORKFLOW_STARTED', + title: 'Temporal workflow started', + description: `Workflow ID: ${workflowId}`, + details: { workflowId, runId }, + }); + } catch (error: any) { + this.logger.error(`Failed to start Temporal workflow for ${goalRunId}: ${error.message}`); + // Do not silently change engines; make the failure explicit and fail closed. + await this.createActivityEvent(goalRunId, { + eventType: 'TEMPORAL_WORKFLOW_START_FAILED', + title: 'Temporal workflow failed to start', + description: error.message, + severity: 'error', + }); + + const failed = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + status: GoalRunStatus.FAILED, + phase: GoalRunPhase.FAILED, + error: `Temporal workflow start failed: ${error.message}`, + }, + }); + + return this.toGoalRunResponse(failed); + } + } + } + + // Emit start event to trigger orchestrator loop (legacy path or fallback) + if (executionEngine === GoalRunExecutionEngine.LEGACY_DB_LOOP) { + this.eventEmitter.emit('goal-run.started', { goalRunId }); + } + + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_STARTED', + title: 'Goal run started', + description: + executionEngine === GoalRunExecutionEngine.TEMPORAL_WORKFLOW + ? 'Temporal workflow initiated' + : 'Orchestrator loop initiated', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Update goal run phase + */ + async updatePhase( + goalRunId: string, + phase: GoalRunPhase, + opts?: { + waitReason?: GoalRunWaitReason; + waitDetail?: Prisma.InputJsonValue | null; + waitUntil?: Date | null; + }, + ): Promise { + this.logger.log(`Updating goal run ${goalRunId} phase to ${phase}`); + + const previousGoalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + if (!previousGoalRun) { + throw new NotFoundException(`Goal run ${goalRunId} not found`); + } + + const defaultWaitReasonByPhase: Partial> = { + [GoalRunPhase.WAITING_USER_INPUT]: GoalRunWaitReason.USER_INPUT, + [GoalRunPhase.WAITING_APPROVAL]: GoalRunWaitReason.APPROVAL, + [GoalRunPhase.WAITING_PROVIDER]: GoalRunWaitReason.PROVIDER, + [GoalRunPhase.WAITING_CAPACITY]: GoalRunWaitReason.CAPACITY, + [GoalRunPhase.PAUSED]: GoalRunWaitReason.POLICY, + }; + + const derivedWaitReason: GoalRunWaitReason | null = + opts?.waitReason ?? defaultWaitReasonByPhase[phase] ?? null; + + const now = new Date(); + const isSamePhase = previousGoalRun.phase === phase; + const isSameWaitReason = (previousGoalRun as any).waitReason === derivedWaitReason; + + const nextWaitStartedAt = + derivedWaitReason && isSamePhase && isSameWaitReason && (previousGoalRun as any).waitStartedAt + ? (previousGoalRun as any).waitStartedAt + : derivedWaitReason + ? now + : null; + + const nextWaitUntil = + derivedWaitReason && isSamePhase + ? (opts?.waitUntil ?? (previousGoalRun as any).waitUntil ?? null) + : derivedWaitReason + ? (opts?.waitUntil ?? null) + : null; + + const nextWaitDetail = + derivedWaitReason && isSamePhase + ? (opts?.waitDetail ?? (previousGoalRun as any).waitDetail ?? null) + : derivedWaitReason + ? (opts?.waitDetail ?? null) + : null; + + const needsUpdate = + !isSamePhase || + (previousGoalRun as any).waitReason !== derivedWaitReason || + (previousGoalRun as any).waitStartedAt !== nextWaitStartedAt || + (previousGoalRun as any).waitUntil !== nextWaitUntil || + (previousGoalRun as any).waitDetail !== nextWaitDetail; + + if (!needsUpdate) { + return this.toGoalRunResponse(previousGoalRun); + } + + const data: Prisma.GoalRunUpdateInput = { + phase, + waitReason: derivedWaitReason, + waitStartedAt: nextWaitStartedAt, + waitUntil: nextWaitUntil, + waitDetail: nextWaitDetail === null ? Prisma.DbNull : nextWaitDetail, + }; + + const goalRun = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data, + }); + + // Emit phase change event + if (!isSamePhase) { + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId, + previousPhase: previousGoalRun.phase, + newPhase: phase, + }); + + await this.createActivityEvent(goalRunId, { + eventType: 'PHASE_CHANGED', + title: `Phase changed to ${phase}`, + description: `Previous phase: ${previousGoalRun.phase}`, + }); + } + + return this.toGoalRunResponse(goalRun); + } + + /** + * Update goal run status + */ + async updateStatus(goalRunId: string, status: GoalRunStatus): Promise { + this.logger.log(`Updating goal run ${goalRunId} status to ${status}`); + + const data: Prisma.GoalRunUpdateInput = { status }; + + if (status === GoalRunStatus.COMPLETED || status === GoalRunStatus.FAILED) { + data.completedAt = new Date(); + data.phase = + status === GoalRunStatus.COMPLETED ? GoalRunPhase.COMPLETED : GoalRunPhase.FAILED; + // Terminal statuses must not retain stale WAIT fields. + (data as any).waitReason = null; + (data as any).waitDetail = Prisma.DbNull; + (data as any).waitStartedAt = null; + (data as any).waitUntil = null; + } + + const goalRun = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data, + }); + + this.eventEmitter.emit('goal-run.status-changed', { + goalRunId, + status, + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Link a workflow run to this goal run + */ + async linkWorkflowRun(goalRunId: string, workflowRunId: string): Promise { + this.logger.log(`Linking workflow run ${workflowRunId} to goal run ${goalRunId}`); + + const goalRun = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { workflowRunId }, + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Pause goal run + * v1.1.0: Added Temporal workflow signal support + */ + async pauseGoalRun(goalRunId: string): Promise { + this.logger.log(`Pausing goal run ${goalRunId}`); + + const goalRun = await this.updatePhase(goalRunId, GoalRunPhase.PAUSED); + + // Send pause signal to Temporal workflow if active + if (goalRun.workflowRunId && this.temporalWorkflowService?.isEnabled()) { + try { + await this.temporalWorkflowService.pauseWorkflow(goalRunId); + this.logger.log(`Sent pause signal to Temporal workflow for ${goalRunId}`); + } catch (error: any) { + this.logger.warn(`Failed to send pause signal to Temporal: ${error.message}`); + } + } + + this.eventEmitter.emit('goal-run.paused', { goalRunId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_PAUSED', + title: 'Goal run paused', + severity: 'warning', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Resume goal run + * v1.1.0: Added Temporal workflow signal support + */ + async resumeGoalRun(goalRunId: string): Promise { + this.logger.log(`Resuming goal run ${goalRunId}`); + + // Get current state to determine resume phase + const currentGoalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + }, + }, + }); + + if (!currentGoalRun) { + throw new NotFoundException(`Goal run ${goalRunId} not found`); + } + + // v6.0.0: Resume from WAITING_PROVIDER by unblocking provider-waiting steps. + // These steps were blocked after exhausting transient infra retries (capacity/gateway issues). + if (currentGoalRun.phase === GoalRunPhase.WAITING_PROVIDER) { + const unblocked = await this.prisma.checklistItem.updateMany({ + where: { + status: ChecklistItemStatus.BLOCKED, + planVersion: { goalRunId }, + // Machine-authored marker (no NL heuristics): set by OrchestratorLoopService.enterWaitingProvider + actualOutcome: { contains: 'WAITING_PROVIDER' }, + }, + data: { + status: ChecklistItemStatus.PENDING, + startedAt: null, + completedAt: null, + }, + }); + + if (unblocked.count > 0) { + this.logger.log( + `Unblocked ${unblocked.count} WAITING_PROVIDER step(s) for goal run ${goalRunId}`, + ); + } + } + + // Resume to EXECUTING if we have a plan, otherwise PLANNING + const resumePhase = currentGoalRun.planVersions.length > 0 + ? GoalRunPhase.EXECUTING + : GoalRunPhase.PLANNING; + + const goalRun = await this.updatePhase(goalRunId, resumePhase); + + // Send resume signal to Temporal workflow if active + if (goalRun.workflowRunId && this.temporalWorkflowService?.isEnabled()) { + try { + await this.temporalWorkflowService.resumeWorkflow(goalRunId); + this.logger.log(`Sent resume signal to Temporal workflow for ${goalRunId}`); + } catch (error: any) { + this.logger.warn(`Failed to send resume signal to Temporal: ${error.message}`); + } + } + + this.eventEmitter.emit('goal-run.resumed', { goalRunId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_RESUMED', + title: 'Goal run resumed', + description: `Resuming in ${resumePhase} phase`, + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Cancel goal run + * v1.1.0: Added Temporal workflow signal support + */ + async cancelGoalRun(goalRunId: string, reason?: string): Promise { + this.logger.log(`Cancelling goal run ${goalRunId}: ${reason}`); + + const now = new Date(); + + const [goalRun, cancelledPrompts] = await this.prisma.$transaction([ + this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + status: GoalRunStatus.CANCELLED, + phase: GoalRunPhase.FAILED, + error: reason || 'Cancelled by user', + completedAt: now, + }, + }), + this.prisma.userPrompt.updateMany({ + where: { + goalRunId, + status: UserPromptStatus.OPEN, + }, + data: { + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.RUN_ENDED, + cancelledAt: now, + }, + }), + ]); + + if (cancelledPrompts.count > 0) { + this.logger.log(`Cancelled ${cancelledPrompts.count} OPEN user prompt(s) for goal run ${goalRunId}`); + } + + const executionEngine = + goalRun.executionEngine ?? GoalRunExecutionEngine.LEGACY_DB_LOOP; + + // Cancel Temporal workflow if active (preferred path for TEMPORAL_WORKFLOW runs). + if (executionEngine === GoalRunExecutionEngine.TEMPORAL_WORKFLOW && this.temporalWorkflowService?.isEnabled()) { + try { + await this.temporalWorkflowService.cancelWorkflow(goalRunId, reason || 'Cancelled by user'); + this.logger.log(`Sent cancel signal to Temporal workflow for ${goalRunId}`); + } catch (error: any) { + this.logger.warn(`Failed to send cancel signal to Temporal: ${error.message}`); + } + } + + // Cancel linked workspace/workflow anchor (applies to legacy and Temporal runs when present). + if (goalRun.workflowRunId) { + try { + await this.workflowService.cancelWorkflow(goalRun.workflowRunId, reason); + } catch (error: any) { + this.logger.warn(`Failed to cancel linked workflow: ${error.message}`); + } + } + + this.eventEmitter.emit('goal-run.cancelled', { goalRunId, reason }); + + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_CANCELLED', + title: 'Goal run cancelled', + description: reason, + severity: 'warning', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Complete goal run successfully + */ + async completeGoalRun(goalRunId: string): Promise { + this.logger.log(`Completing goal run ${goalRunId}`); + + const now = new Date(); + + const [goalRun, cancelledPrompts] = await this.prisma.$transaction([ + this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + status: GoalRunStatus.COMPLETED, + phase: GoalRunPhase.COMPLETED, + completedAt: now, + }, + }), + this.prisma.userPrompt.updateMany({ + where: { + goalRunId, + status: UserPromptStatus.OPEN, + }, + data: { + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.RUN_ENDED, + cancelledAt: now, + }, + }), + ]); + + if (cancelledPrompts.count > 0) { + this.logger.log(`Cancelled ${cancelledPrompts.count} OPEN user prompt(s) for goal run ${goalRunId}`); + } + + this.eventEmitter.emit('goal-run.completed', { goalRunId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_COMPLETED', + title: 'Goal completed successfully', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Fail goal run + */ + async failGoalRun(goalRunId: string, error: string): Promise { + this.logger.log(`Failing goal run ${goalRunId}: ${error}`); + + const now = new Date(); + + const [goalRun, cancelledPrompts] = await this.prisma.$transaction([ + this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + status: GoalRunStatus.FAILED, + phase: GoalRunPhase.FAILED, + error, + completedAt: now, + }, + }), + this.prisma.userPrompt.updateMany({ + where: { + goalRunId, + status: UserPromptStatus.OPEN, + }, + data: { + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.RUN_ENDED, + cancelledAt: now, + }, + }), + ]); + + if (cancelledPrompts.count > 0) { + this.logger.log(`Cancelled ${cancelledPrompts.count} OPEN user prompt(s) for goal run ${goalRunId}`); + } + + this.eventEmitter.emit('goal-run.failed', { goalRunId, error }); + + await this.createActivityEvent(goalRunId, { + eventType: 'GOAL_FAILED', + title: 'Goal run failed', + description: error, + severity: 'error', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Get current plan for a goal run + */ + async getCurrentPlan(goalRunId: string): Promise { + const planVersion = await this.prisma.planVersion.findFirst({ + where: { goalRunId }, + orderBy: { version: 'desc' }, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }); + + if (!planVersion) { + return null; + } + + return { + id: planVersion.id, + version: planVersion.version, + summary: planVersion.summary, + replanReason: planVersion.replanReason, + confidence: planVersion.confidence, + createdAt: planVersion.createdAt, + items: planVersion.checklistItems.map(this.toChecklistItemResponse), + }; + } + + /** + * Get plan history for a goal run + */ + async getPlanHistory(goalRunId: string): Promise { + const planVersions = await this.prisma.planVersion.findMany({ + where: { goalRunId }, + orderBy: { version: 'desc' }, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }); + + return planVersions.map((pv) => ({ + id: pv.id, + version: pv.version, + summary: pv.summary, + replanReason: pv.replanReason, + confidence: pv.confidence, + createdAt: pv.createdAt, + items: pv.checklistItems.map(this.toChecklistItemResponse), + })); + } + + /** + * Submit steering message + * v1.1.0: Added Temporal workflow signal support for steering + */ + async submitSteering(goalRunId: string, input: SteeringInput): Promise { + this.logger.log(`Steering message for goal run ${goalRunId}: ${input.type}`); + + // Get goal run to check if Temporal workflow is active + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + const hasTemporalWorkflow = goalRun?.workflowRunId && this.temporalWorkflowService?.isEnabled(); + + await this.prisma.steeringMessage.create({ + data: { + goalRunId, + type: input.type, + content: input.content, + targetItemId: input.targetItemId, + userId: input.userId, + userEmail: input.userEmail, + }, + }); + + // Handle immediate actions + switch (input.type) { + case 'PAUSE': + await this.pauseGoalRun(goalRunId); + break; + case 'RESUME': + await this.resumeGoalRun(goalRunId); + break; + case 'CANCEL': + await this.cancelGoalRun(goalRunId, input.content); + break; + case 'APPROVE': + // Send approve signal to Temporal if active + if (hasTemporalWorkflow && input.targetItemId) { + try { + await this.temporalWorkflowService!.approveStep( + goalRunId, + input.targetItemId, + input.userId || input.userEmail || 'unknown', + ); + this.logger.log(`Sent approve signal to Temporal for step ${input.targetItemId}`); + } catch (error: any) { + this.logger.warn(`Failed to send approve signal to Temporal: ${error.message}`); + } + } + break; + case 'REJECT': + // Send reject signal to Temporal if active + if (hasTemporalWorkflow && input.targetItemId) { + try { + await this.temporalWorkflowService!.rejectStep( + goalRunId, + input.targetItemId, + input.content || 'No reason provided', + input.userId || input.userEmail, + ); + this.logger.log(`Sent reject signal to Temporal for step ${input.targetItemId}`); + } catch (error: any) { + this.logger.warn(`Failed to send reject signal to Temporal: ${error.message}`); + } + } + break; + case 'INSTRUCTION': + // Send steering instruction to Temporal workflow + if (hasTemporalWorkflow && input.content) { + try { + await this.temporalWorkflowService!.sendSteeringInstruction( + goalRunId, + input.content, + 'NORMAL', + ); + this.logger.log(`Sent steering instruction to Temporal for ${goalRunId}`); + } catch (error: any) { + this.logger.warn(`Failed to send steering instruction to Temporal: ${error.message}`); + } + } + // Fall through to emit event for legacy orchestrator + this.eventEmitter.emit('goal-run.steering-received', { + goalRunId, + type: input.type, + content: input.content, + }); + break; + default: + // Other steering types handled by orchestrator loop + this.eventEmitter.emit('goal-run.steering-received', { + goalRunId, + type: input.type, + content: input.content, + }); + } + + await this.createActivityEvent(goalRunId, { + eventType: 'STEERING_RECEIVED', + title: `Steering: ${input.type}`, + description: input.content, + }); + } + + /** + * Get pending steering messages + */ + async getPendingSteering(goalRunId: string): Promise { + return this.prisma.steeringMessage.findFirst({ + where: { + goalRunId, + acknowledged: false, + }, + orderBy: { createdAt: 'asc' }, + }); + } + + /** + * Acknowledge steering message + */ + async acknowledgeSteering(steeringId: string): Promise { + await this.prisma.steeringMessage.update({ + where: { id: steeringId }, + data: { + acknowledged: true, + acknowledgedAt: new Date(), + }, + }); + } + + /** + * Create activity event + */ + async createActivityEvent( + goalRunId: string, + event: { + eventType: string; + title: string; + description?: string; + severity?: string; + details?: Record; + planVersionId?: string; + checklistItemId?: string; + workflowNodeId?: string; + }, + ): Promise { + await this.prisma.activityEvent.create({ + data: { + goalRunId, + eventType: event.eventType, + title: event.title, + description: event.description, + severity: event.severity || 'info', + details: event.details || {}, + planVersionId: event.planVersionId, + checklistItemId: event.checklistItemId, + workflowNodeId: event.workflowNodeId, + }, + }); + + // Emit for real-time delivery + this.eventEmitter.emit('activity-event.created', { + goalRunId, + ...event, + }); + } + + /** + * Get activity feed for a goal run + */ + async getActivityFeed( + goalRunId: string, + options?: { page?: number; pageSize?: number }, + ): Promise> { + const page = options?.page || 1; + const pageSize = options?.pageSize || 50; + const skip = (page - 1) * pageSize; + + const [events, total] = await Promise.all([ + this.prisma.activityEvent.findMany({ + where: { goalRunId }, + orderBy: { createdAt: 'desc' }, + skip, + take: pageSize, + }), + this.prisma.activityEvent.count({ where: { goalRunId } }), + ]); + + return { + data: events, + total, + page, + pageSize, + hasMore: skip + events.length < total, + }; + } + + /** + * User intervention - take control from agent (Phase 4) + */ + async intervene(goalRunId: string, userId?: string): Promise { + this.logger.log(`User intervention on goal run ${goalRunId}`); + + const goalRun = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + phase: GoalRunPhase.CONTROLLING_DESKTOP, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + + this.eventEmitter.emit('goal-run.intervened', { goalRunId, userId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'USER_INTERVENED', + title: 'User took control', + description: 'Agent paused, user controlling desktop', + severity: 'warning', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Return control to agent after intervention (Phase 4) + */ + async returnControl(goalRunId: string, userId?: string): Promise { + this.logger.log(`Returning control to agent for goal run ${goalRunId}`); + + const goalRun = await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + phase: GoalRunPhase.EXECUTING, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + + this.eventEmitter.emit('goal-run.control-returned', { goalRunId, userId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'CONTROL_RETURNED', + title: 'Control returned to agent', + description: 'Resuming autonomous execution', + }); + + return this.toGoalRunResponse(goalRun); + } + + /** + * Approve a step (Phase 4) + * v1.1.0: Added Temporal workflow signal support + */ + async approveStep(goalRunId: string, stepId: string, userId?: string): Promise { + this.logger.log(`Approving step ${stepId} for goal run ${goalRunId}`); + + const item = await this.prisma.checklistItem.update({ + where: { id: stepId }, + data: { + status: ChecklistItemStatus.COMPLETED, + actualOutcome: 'Approved by user', + completedAt: new Date(), + }, + }); + + // Update goal run phase if it was waiting for approval + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + if (goalRun?.phase === GoalRunPhase.WAITING_APPROVAL) { + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + phase: GoalRunPhase.EXECUTING, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + } + + // Send approve signal to Temporal workflow if active + if (goalRun?.workflowRunId && this.temporalWorkflowService?.isEnabled()) { + try { + await this.temporalWorkflowService.approveStep(goalRunId, stepId, userId || 'unknown'); + this.logger.log(`Sent approve signal to Temporal for step ${stepId}`); + } catch (error: any) { + this.logger.warn(`Failed to send approve signal to Temporal: ${error.message}`); + } + } + + this.eventEmitter.emit('step.approved', { goalRunId, stepId, userId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'STEP_APPROVED', + title: 'Step approved', + description: item.description, + checklistItemId: stepId, + }); + + return this.toChecklistItemResponse(item); + } + + /** + * Reject a step (Phase 4) + * v1.1.0: Added Temporal workflow signal support + */ + async rejectStep(goalRunId: string, stepId: string, reason: string, userId?: string): Promise { + this.logger.log(`Rejecting step ${stepId} for goal run ${goalRunId}: ${reason}`); + + const item = await this.prisma.checklistItem.update({ + where: { id: stepId }, + data: { + status: ChecklistItemStatus.FAILED, + actualOutcome: `Rejected: ${reason}`, + completedAt: new Date(), + }, + }); + + // Get goal run for Temporal check + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + // Trigger replan + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { phase: GoalRunPhase.REPLANNING }, + }); + + // Send reject signal to Temporal workflow if active + if (goalRun?.workflowRunId && this.temporalWorkflowService?.isEnabled()) { + try { + await this.temporalWorkflowService.rejectStep(goalRunId, stepId, reason, userId); + this.logger.log(`Sent reject signal to Temporal for step ${stepId}`); + } catch (error: any) { + this.logger.warn(`Failed to send reject signal to Temporal: ${error.message}`); + } + } + + this.eventEmitter.emit('step.rejected', { goalRunId, stepId, reason, userId }); + + await this.createActivityEvent(goalRunId, { + eventType: 'STEP_REJECTED', + title: 'Step rejected', + description: `${item.description}: ${reason}`, + severity: 'warning', + checklistItemId: stepId, + }); + + return this.toChecklistItemResponse(item); + } + + /** + * Get goal run metrics + */ + async getMetrics(goalRunId: string): Promise<{ + totalSteps: number; + completedSteps: number; + failedSteps: number; + replanCount: number; + durationMs?: number; + }> { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + include: { + checklistItems: true, + }, + }, + }, + }); + + if (!goalRun) { + throw new NotFoundException(`Goal run ${goalRunId} not found`); + } + + const currentPlan = goalRun.planVersions.sort((a, b) => b.version - a.version)[0]; + const items = currentPlan?.checklistItems || []; + + const completedSteps = items.filter( + (item) => item.status === ChecklistItemStatus.COMPLETED, + ).length; + const failedSteps = items.filter( + (item) => item.status === ChecklistItemStatus.FAILED, + ).length; + + const durationMs = + goalRun.startedAt && goalRun.completedAt + ? goalRun.completedAt.getTime() - goalRun.startedAt.getTime() + : goalRun.startedAt + ? Date.now() - goalRun.startedAt.getTime() + : undefined; + + return { + totalSteps: items.length, + completedSteps, + failedSteps, + replanCount: goalRun.planVersions.length - 1, + durationMs, + }; + } + + // Helper methods + + private toGoalRunResponse(goalRun: any): GoalRunResponse { + return { + id: goalRun.id, + tenantId: goalRun.tenantId, + goal: goalRun.goal, + constraints: goalRun.constraints as GoalConstraints, + phase: goalRun.phase, + status: goalRun.status, + executionEngine: + goalRun.executionEngine ?? GoalRunExecutionEngine.LEGACY_DB_LOOP, + waitReason: goalRun.waitReason ?? null, + waitDetail: goalRun.waitDetail ?? null, + waitStartedAt: goalRun.waitStartedAt ?? null, + waitUntil: goalRun.waitUntil ?? null, + workflowRunId: goalRun.workflowRunId, + temporalWorkflowId: goalRun.temporalWorkflowId ?? null, + temporalRunId: goalRun.temporalRunId ?? null, + temporalStartedAt: goalRun.temporalStartedAt ?? null, + currentPlanVersion: goalRun.currentPlanVersion, + error: goalRun.error, + createdAt: goalRun.createdAt, + updatedAt: goalRun.updatedAt, + startedAt: goalRun.startedAt, + completedAt: goalRun.completedAt, + }; + } + + private toChecklistItemResponse(item: any): ChecklistItemResponse { + const type: StepType = item.type ?? StepType.EXECUTE; + const suggestedTools: string[] = Array.isArray(item.suggestedTools) ? item.suggestedTools : []; + const isExternalInput = type === StepType.USER_INPUT_REQUIRED || hasUserInteractionTool(suggestedTools); + const isLegacyPromptStep = type !== StepType.USER_INPUT_REQUIRED && hasUserInteractionTool(suggestedTools); + + return { + id: item.id, + order: item.order, + description: item.description, + status: item.status, + type, + suggestedTools, + requiresDesktop: item.requiresDesktop ?? false, + executionSurface: + item.executionSurface ?? ((item.requiresDesktop ?? false) ? ExecutionSurface.DESKTOP : ExecutionSurface.TEXT_ONLY), + isExternalInput, + isLegacyPromptStep, + expectedOutcome: item.expectedOutcome, + actualOutcome: item.actualOutcome, + startedAt: item.startedAt, + completedAt: item.completedAt, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/goal-template.service.ts b/packages/bytebot-workflow-orchestrator/src/services/goal-template.service.ts new file mode 100644 index 000000000..6ed40bc2a --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/goal-template.service.ts @@ -0,0 +1,783 @@ +/** + * Goal Template Service + * Phase 7: Enhanced Features + * + * Responsibilities: + * - CRUD operations for goal templates + * - Template instantiation (converting templates to goal runs) + * - Variable substitution and validation + * - Template versioning and publishing + * - Usage tracking and analytics + */ + +import { + Injectable, + Logger, + NotFoundException, + BadRequestException, + ConflictException, +} from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { GoalRunService, GoalConstraints, CreateGoalRunInput } from './goal-run.service'; +import { createId } from '@paralleldrive/cuid2'; +import { Prisma } from '@prisma/client'; + +// Input types +export interface CreateGoalTemplateInput { + tenantId: string; + name: string; + description?: string; + category?: string; + tags?: string[]; + icon?: string; + goalPattern: string; + defaultConstraints?: GoalConstraints; + variables?: TemplateVariable[]; + checklistTemplate?: ChecklistTemplateItem[]; + createdBy?: string; +} + +export interface UpdateGoalTemplateInput { + name?: string; + description?: string; + category?: string; + tags?: string[]; + icon?: string; + goalPattern?: string; + defaultConstraints?: GoalConstraints; + variables?: TemplateVariable[]; + checklistTemplate?: ChecklistTemplateItem[]; +} + +export interface TemplateVariable { + name: string; + type: 'string' | 'number' | 'boolean' | 'select'; + required: boolean; + default?: string | number | boolean; + description?: string; + options?: string[]; // For select type + validation?: { + minLength?: number; + maxLength?: number; + min?: number; + max?: number; + pattern?: string; + }; +} + +export interface ChecklistTemplateItem { + order: number; + descriptionTemplate: string; // Template with {{variables}} + expectedOutcomeTemplate?: string; + suggestedTools?: string[]; + requiresDesktop?: boolean; +} + +export interface CreateFromTemplateInput { + tenantId: string; + templateId: string; + variableValues: Record; + constraintOverrides?: Partial; + autoStart?: boolean; +} + +// Response types +export interface GoalTemplateResponse { + id: string; + tenantId: string; + name: string; + description?: string | null; + category?: string | null; + tags: string[]; + icon?: string | null; + goalPattern: string; + defaultConstraints: GoalConstraints; + variables: TemplateVariable[]; + checklistTemplate: ChecklistTemplateItem[]; + version: string; + isLatest: boolean; + isPublished: boolean; + isBuiltIn: boolean; + usageCount: number; + lastUsedAt?: Date | null; + createdBy?: string | null; + createdAt: Date; + updatedAt: Date; +} + +export interface PaginatedResponse { + data: T[]; + total: number; + page: number; + pageSize: number; + hasMore: boolean; +} + +export interface GoalTemplateFilters { + category?: string; + tags?: string[]; + isPublished?: boolean; + isBuiltIn?: boolean; + search?: string; + page?: number; + pageSize?: number; +} + +@Injectable() +export class GoalTemplateService { + private readonly logger = new Logger(GoalTemplateService.name); + + constructor( + private prisma: PrismaService, + private goalRunService: GoalRunService, + private eventEmitter: EventEmitter2, + ) {} + + /** + * Create a new goal template + */ + async create(input: CreateGoalTemplateInput): Promise { + const templateId = `gt-${createId()}`; + + this.logger.log(`Creating goal template ${templateId}: "${input.name}"`); + + // Validate variables in goal pattern + this.validateGoalPattern(input.goalPattern, input.variables || []); + + // Check for duplicate name + const existing = await this.prisma.goalTemplate.findFirst({ + where: { + tenantId: input.tenantId, + name: input.name, + isLatest: true, + }, + }); + + if (existing) { + throw new ConflictException(`Template with name "${input.name}" already exists`); + } + + const template = await this.prisma.goalTemplate.create({ + data: { + id: templateId, + tenantId: input.tenantId, + name: input.name, + description: input.description, + category: input.category, + tags: input.tags || [], + icon: input.icon, + goalPattern: input.goalPattern, + defaultConstraints: (input.defaultConstraints || {}) as object, + variables: (input.variables || []) as object, + checklistTemplate: (input.checklistTemplate || []) as object, + version: '1.0.0', + isLatest: true, + isPublished: false, + isBuiltIn: false, + createdBy: input.createdBy, + }, + }); + + this.eventEmitter.emit('goal-template.created', { + templateId, + tenantId: input.tenantId, + name: input.name, + }); + + return this.toTemplateResponse(template); + } + + /** + * Get template by ID + */ + async findById(templateId: string): Promise { + const template = await this.prisma.goalTemplate.findUnique({ + where: { id: templateId }, + }); + + if (!template) { + throw new NotFoundException(`Template ${templateId} not found`); + } + + return this.toTemplateResponse(template); + } + + /** + * List templates for a tenant + */ + async findByTenant( + tenantId: string, + filters?: GoalTemplateFilters, + ): Promise> { + const page = filters?.page || 1; + const pageSize = filters?.pageSize || 20; + const skip = (page - 1) * pageSize; + + const where: Prisma.GoalTemplateWhereInput = { + OR: [ + { tenantId }, + { isBuiltIn: true }, // Include built-in templates for all tenants + ], + isLatest: true, + ...(filters?.category && { category: filters.category }), + ...(filters?.isPublished !== undefined && { isPublished: filters.isPublished }), + ...(filters?.isBuiltIn !== undefined && { isBuiltIn: filters.isBuiltIn }), + ...(filters?.tags?.length && { tags: { hasSome: filters.tags } }), + ...(filters?.search && { + OR: [ + { name: { contains: filters.search, mode: Prisma.QueryMode.insensitive } }, + { description: { contains: filters.search, mode: Prisma.QueryMode.insensitive } }, + ], + }), + }; + + const [templates, total] = await Promise.all([ + this.prisma.goalTemplate.findMany({ + where, + orderBy: [{ usageCount: 'desc' }, { createdAt: 'desc' }], + skip, + take: pageSize, + }), + this.prisma.goalTemplate.count({ where }), + ]); + + return { + data: templates.map(this.toTemplateResponse), + total, + page, + pageSize, + hasMore: skip + templates.length < total, + }; + } + + /** + * Get templates by category + */ + async findByCategory(tenantId: string, category: string): Promise { + const templates = await this.prisma.goalTemplate.findMany({ + where: { + OR: [{ tenantId }, { isBuiltIn: true }], + category, + isLatest: true, + isPublished: true, + }, + orderBy: { usageCount: 'desc' }, + }); + + return templates.map(this.toTemplateResponse); + } + + /** + * Get all categories for a tenant + */ + async getCategories(tenantId: string): Promise<{ category: string; count: number }[]> { + const categories = await this.prisma.goalTemplate.groupBy({ + by: ['category'], + where: { + OR: [{ tenantId }, { isBuiltIn: true }], + isLatest: true, + isPublished: true, + category: { not: null }, + }, + _count: { category: true }, + }); + + return categories + .filter((c) => c.category) + .map((c) => ({ + category: c.category!, + count: c._count.category, + })); + } + + /** + * Update a template (creates new version) + */ + async update( + templateId: string, + input: UpdateGoalTemplateInput, + ): Promise { + const existing = await this.prisma.goalTemplate.findUnique({ + where: { id: templateId }, + }); + + if (!existing) { + throw new NotFoundException(`Template ${templateId} not found`); + } + + if (existing.isBuiltIn) { + throw new BadRequestException('Cannot update built-in templates'); + } + + // Validate if goal pattern changed + if (input.goalPattern) { + const variables = input.variables || (existing.variables as unknown as TemplateVariable[]); + this.validateGoalPattern(input.goalPattern, variables); + } + + const template = await this.prisma.goalTemplate.update({ + where: { id: templateId }, + data: { + name: input.name, + description: input.description, + category: input.category, + tags: input.tags, + icon: input.icon, + goalPattern: input.goalPattern, + ...(input.defaultConstraints && { + defaultConstraints: input.defaultConstraints as object, + }), + ...(input.variables && { variables: input.variables as object }), + ...(input.checklistTemplate && { + checklistTemplate: input.checklistTemplate as object, + }), + }, + }); + + this.eventEmitter.emit('goal-template.updated', { + templateId, + tenantId: template.tenantId, + }); + + return this.toTemplateResponse(template); + } + + /** + * Create new version of a template + */ + async createNewVersion( + templateId: string, + input: UpdateGoalTemplateInput, + ): Promise { + const existing = await this.prisma.goalTemplate.findUnique({ + where: { id: templateId }, + }); + + if (!existing) { + throw new NotFoundException(`Template ${templateId} not found`); + } + + // Parse and increment version + const [major, minor, patch] = existing.version.split('.').map(Number); + const newVersion = `${major}.${minor}.${patch + 1}`; + + // Mark old version as not latest + await this.prisma.goalTemplate.update({ + where: { id: templateId }, + data: { isLatest: false }, + }); + + // Create new version + const newTemplateId = `gt-${createId()}`; + const template = await this.prisma.goalTemplate.create({ + data: { + id: newTemplateId, + tenantId: existing.tenantId, + name: input.name || existing.name, + description: input.description ?? existing.description, + category: input.category ?? existing.category, + tags: input.tags || existing.tags, + icon: input.icon ?? existing.icon, + goalPattern: input.goalPattern || existing.goalPattern, + defaultConstraints: (input.defaultConstraints || + existing.defaultConstraints) as object, + variables: (input.variables || existing.variables) as object, + checklistTemplate: (input.checklistTemplate || + existing.checklistTemplate) as object, + version: newVersion, + isLatest: true, + isPublished: existing.isPublished, + isBuiltIn: false, + previousVersionId: templateId, + createdBy: existing.createdBy, + }, + }); + + this.eventEmitter.emit('goal-template.version-created', { + templateId: newTemplateId, + previousId: templateId, + version: newVersion, + }); + + return this.toTemplateResponse(template); + } + + /** + * Publish a template + */ + async publish(templateId: string): Promise { + const template = await this.prisma.goalTemplate.update({ + where: { id: templateId }, + data: { isPublished: true }, + }); + + this.eventEmitter.emit('goal-template.published', { + templateId, + tenantId: template.tenantId, + }); + + return this.toTemplateResponse(template); + } + + /** + * Unpublish a template + */ + async unpublish(templateId: string): Promise { + const template = await this.prisma.goalTemplate.update({ + where: { id: templateId }, + data: { isPublished: false }, + }); + + this.eventEmitter.emit('goal-template.unpublished', { + templateId, + tenantId: template.tenantId, + }); + + return this.toTemplateResponse(template); + } + + /** + * Delete a template + */ + async delete(templateId: string): Promise { + const template = await this.prisma.goalTemplate.findUnique({ + where: { id: templateId }, + }); + + if (!template) { + throw new NotFoundException(`Template ${templateId} not found`); + } + + if (template.isBuiltIn) { + throw new BadRequestException('Cannot delete built-in templates'); + } + + await this.prisma.goalTemplate.delete({ + where: { id: templateId }, + }); + + this.eventEmitter.emit('goal-template.deleted', { + templateId, + tenantId: template.tenantId, + }); + } + + /** + * Create a goal run from a template + */ + async createGoalRunFromTemplate( + input: CreateFromTemplateInput, + ): Promise { + const template = await this.prisma.goalTemplate.findUnique({ + where: { id: input.templateId }, + }); + + if (!template) { + throw new NotFoundException(`Template ${input.templateId} not found`); + } + + // Validate required variables + const variables = template.variables as unknown as TemplateVariable[]; + this.validateVariableValues(variables, input.variableValues); + + // Substitute variables in goal pattern + const goal = this.substituteVariables( + template.goalPattern, + input.variableValues, + ); + + // Merge constraints + const constraints: GoalConstraints = { + ...(template.defaultConstraints as GoalConstraints), + ...input.constraintOverrides, + }; + + // Create the goal run + const goalRunInput: CreateGoalRunInput = { + tenantId: input.tenantId, + goal, + constraints, + autoStart: input.autoStart, + }; + + const goalRun = await this.goalRunService.createFromGoal(goalRunInput); + + // Track template usage + await this.prisma.goalTemplate.update({ + where: { id: input.templateId }, + data: { + usageCount: { increment: 1 }, + lastUsedAt: new Date(), + }, + }); + + // Create junction record + await this.prisma.goalRunFromTemplate.create({ + data: { + goalRunId: goalRun.id, + templateId: input.templateId, + variableValues: input.variableValues as object, + }, + }); + + this.eventEmitter.emit('goal-template.used', { + templateId: input.templateId, + goalRunId: goalRun.id, + tenantId: input.tenantId, + }); + + return { + ...goalRun, + template: { + id: template.id, + name: template.name, + version: template.version, + }, + variableValues: input.variableValues, + }; + } + + /** + * Preview template instantiation + */ + async previewInstantiation( + templateId: string, + variableValues: Record, + ): Promise<{ + goal: string; + checklist: { order: number; description: string; expectedOutcome?: string }[]; + }> { + const template = await this.prisma.goalTemplate.findUnique({ + where: { id: templateId }, + }); + + if (!template) { + throw new NotFoundException(`Template ${templateId} not found`); + } + + const goal = this.substituteVariables(template.goalPattern, variableValues); + + const checklistTemplate = template.checklistTemplate as unknown as ChecklistTemplateItem[]; + const checklist = checklistTemplate.map((item) => ({ + order: item.order, + description: this.substituteVariables(item.descriptionTemplate, variableValues), + expectedOutcome: item.expectedOutcomeTemplate + ? this.substituteVariables(item.expectedOutcomeTemplate, variableValues) + : undefined, + })); + + return { goal, checklist }; + } + + /** + * Get template usage statistics + */ + async getUsageStats( + templateId: string, + ): Promise<{ + totalUsage: number; + last7Days: number; + last30Days: number; + successRate: number; + avgDurationMs: number; + }> { + const sevenDaysAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000); + const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000); + + const [template, recentUsage, monthUsage] = await Promise.all([ + this.prisma.goalTemplate.findUnique({ where: { id: templateId } }), + this.prisma.goalRunFromTemplate.count({ + where: { + templateId, + createdAt: { gte: sevenDaysAgo }, + }, + }), + this.prisma.goalRunFromTemplate.count({ + where: { + templateId, + createdAt: { gte: thirtyDaysAgo }, + }, + }), + ]); + + if (!template) { + throw new NotFoundException(`Template ${templateId} not found`); + } + + // Get completion stats + const goalRunIds = await this.prisma.goalRunFromTemplate.findMany({ + where: { templateId }, + select: { goalRunId: true }, + }); + + const stats = await this.prisma.goalRun.aggregate({ + where: { + id: { in: goalRunIds.map((g) => g.goalRunId) }, + status: 'COMPLETED', + }, + _count: true, + _avg: { + currentPlanVersion: true, + }, + }); + + // Calculate average duration from completed goal runs with both timestamps + const completedRuns = await this.prisma.goalRun.findMany({ + where: { + id: { in: goalRunIds.map((g) => g.goalRunId) }, + status: 'COMPLETED', + startedAt: { not: null }, + completedAt: { not: null }, + }, + select: { + startedAt: true, + completedAt: true, + }, + }); + + let avgDurationMs = 0; + if (completedRuns.length > 0) { + const totalDurationMs = completedRuns.reduce((sum, run) => { + if (run.startedAt && run.completedAt) { + return sum + (run.completedAt.getTime() - run.startedAt.getTime()); + } + return sum; + }, 0); + avgDurationMs = Math.round(totalDurationMs / completedRuns.length); + } + + const completedCount = stats._count || 0; + const totalCount = goalRunIds.length; + + return { + totalUsage: template.usageCount, + last7Days: recentUsage, + last30Days: monthUsage, + successRate: totalCount > 0 ? (completedCount / totalCount) * 100 : 0, + avgDurationMs, + }; + } + + // Helper methods + + private validateGoalPattern(pattern: string, variables: TemplateVariable[]): void { + // Find all variables in pattern + const patternVars = pattern.match(/\{\{(\w+)\}\}/g) || []; + const patternVarNames = patternVars.map((v) => v.replace(/\{\{|\}\}/g, '')); + + // Check all pattern variables are defined + const definedVarNames = variables.map((v) => v.name); + for (const varName of patternVarNames) { + if (!definedVarNames.includes(varName)) { + throw new BadRequestException( + `Variable "{{${varName}}}" used in pattern but not defined`, + ); + } + } + } + + private validateVariableValues( + variables: TemplateVariable[], + values: Record, + ): void { + for (const variable of variables) { + const value = values[variable.name]; + + if (variable.required && (value === undefined || value === null || value === '')) { + throw new BadRequestException(`Required variable "${variable.name}" is missing`); + } + + if (value !== undefined && value !== null) { + // Type validation + const actualType = typeof value; + if (variable.type === 'number' && actualType !== 'number') { + throw new BadRequestException( + `Variable "${variable.name}" must be a number`, + ); + } + if (variable.type === 'boolean' && actualType !== 'boolean') { + throw new BadRequestException( + `Variable "${variable.name}" must be a boolean`, + ); + } + if (variable.type === 'select' && variable.options) { + if (!variable.options.includes(String(value))) { + throw new BadRequestException( + `Variable "${variable.name}" must be one of: ${variable.options.join(', ')}`, + ); + } + } + + // Validation rules + if (variable.validation) { + const v = variable.validation; + if (typeof value === 'string') { + if (v.minLength && value.length < v.minLength) { + throw new BadRequestException( + `Variable "${variable.name}" must be at least ${v.minLength} characters`, + ); + } + if (v.maxLength && value.length > v.maxLength) { + throw new BadRequestException( + `Variable "${variable.name}" must be at most ${v.maxLength} characters`, + ); + } + if (v.pattern && !new RegExp(v.pattern).test(value)) { + throw new BadRequestException( + `Variable "${variable.name}" does not match required pattern`, + ); + } + } + if (typeof value === 'number') { + if (v.min !== undefined && value < v.min) { + throw new BadRequestException( + `Variable "${variable.name}" must be at least ${v.min}`, + ); + } + if (v.max !== undefined && value > v.max) { + throw new BadRequestException( + `Variable "${variable.name}" must be at most ${v.max}`, + ); + } + } + } + } + } + } + + private substituteVariables( + template: string, + values: Record, + ): string { + return template.replace(/\{\{(\w+)\}\}/g, (match, varName) => { + const value = values[varName]; + return value !== undefined ? String(value) : match; + }); + } + + private toTemplateResponse(template: any): GoalTemplateResponse { + return { + id: template.id, + tenantId: template.tenantId, + name: template.name, + description: template.description, + category: template.category, + tags: template.tags, + icon: template.icon, + goalPattern: template.goalPattern, + defaultConstraints: template.defaultConstraints as GoalConstraints, + variables: template.variables as TemplateVariable[], + checklistTemplate: template.checklistTemplate as ChecklistTemplateItem[], + version: template.version, + isLatest: template.isLatest, + isPublished: template.isPublished, + isBuiltIn: template.isBuiltIn, + usageCount: template.usageCount, + lastUsedAt: template.lastUsedAt, + createdBy: template.createdBy, + createdAt: template.createdAt, + updatedAt: template.updatedAt, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/high-risk.service.ts b/packages/bytebot-workflow-orchestrator/src/services/high-risk.service.ts new file mode 100644 index 000000000..594b12117 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/high-risk.service.ts @@ -0,0 +1,450 @@ +/** + * High-Risk Action Service + * v1.0.0 M5: Classifies actions and generates deterministic fingerprints + * + * Best Practices Applied: + * - Risk-based gating (categorize by risk level) + * - Deterministic fingerprinting for idempotency + * - Context preservation for approval decisions + * + * References: + * - https://blog.algomaster.io/p/idempotency-in-distributed-systems + * - https://www.permit.io/blog/human-in-the-loop-for-ai-agents-best-practices + */ + +import { Injectable, Logger } from '@nestjs/common'; +import * as crypto from 'crypto'; + +/** + * Risk levels for actions + */ +export enum RiskLevel { + LOW = 'LOW', + MEDIUM = 'MEDIUM', + HIGH = 'HIGH', + CRITICAL = 'CRITICAL', +} + +/** + * Ordinal mapping for risk level comparison + * Used because string comparison doesn't work correctly + * (e.g., 'HIGH' < 'MEDIUM' in ASCII) + */ +const RISK_LEVEL_ORDER: Record = { + [RiskLevel.LOW]: 0, + [RiskLevel.MEDIUM]: 1, + [RiskLevel.HIGH]: 2, + [RiskLevel.CRITICAL]: 3, +}; + +/** + * Compare risk levels using ordinal values + * @returns true if current >= threshold + */ +export function isRiskLevelAtLeast(current: RiskLevel, threshold: RiskLevel): boolean { + return RISK_LEVEL_ORDER[current] >= RISK_LEVEL_ORDER[threshold]; +} + +/** + * Get the numeric order value for a risk level + * Useful for sorting or custom comparisons + */ +export function getRiskLevelOrder(level: RiskLevel): number { + return RISK_LEVEL_ORDER[level]; +} + +/** + * Action context for risk assessment + */ +export interface ActionContext { + toolName: string; + toolParams: Record; + currentUrl?: string; + nodeRunId: string; + workspaceId: string; + tenantId: string; +} + +/** + * High-risk action classification result + */ +export interface ActionClassification { + riskLevel: RiskLevel; + requiresApproval: boolean; + reason: string; + actionHash: string; + previewData: ActionPreview; +} + +/** + * Preview data for approval UI + */ +export interface ActionPreview { + toolName: string; + category: string; + summary: string; + recipient?: string; + subject?: string; + amount?: string; + bodyPreview?: string; + context: Record; +} + +/** + * High-risk gateway tools requiring approval + */ +const HIGH_RISK_GATEWAY_TOOLS: Record) => Partial; +}> = { + 'communications_send_email': { + riskLevel: RiskLevel.HIGH, + reason: 'Sends email to external recipient - cannot be undone', + extractPreview: (params) => ({ + category: 'Communications', + summary: `Send email to ${params.to || params.recipient || 'unknown'}`, + recipient: params.to || params.recipient, + subject: params.subject, + bodyPreview: truncate(params.body || params.message, 200), + }), + }, + 'communications_send_sms': { + riskLevel: RiskLevel.HIGH, + reason: 'Sends SMS to external number - cannot be undone', + extractPreview: (params) => ({ + category: 'Communications', + summary: `Send SMS to ${params.to || params.phone_number || 'unknown'}`, + recipient: params.to || params.phone_number, + bodyPreview: truncate(params.message || params.body, 160), + }), + }, + 'integration_webhook': { + riskLevel: RiskLevel.MEDIUM, + reason: 'Sends data to external webhook endpoint', + extractPreview: (params) => ({ + category: 'Integration', + summary: `Send webhook to ${new URL(params.url || '').hostname || 'unknown'}`, + context: { url: params.url }, + }), + }, + 'integration_api_call': { + riskLevel: RiskLevel.MEDIUM, + reason: 'Makes external API call with potential side effects', + extractPreview: (params) => ({ + category: 'Integration', + summary: `API call to ${params.endpoint || params.url || 'unknown'}`, + context: { method: params.method, endpoint: params.endpoint }, + }), + }, + 'calendar_delete_event': { + riskLevel: RiskLevel.MEDIUM, + reason: 'Deletes calendar event - may affect scheduled meetings', + extractPreview: (params) => ({ + category: 'Calendar', + summary: `Delete calendar event: ${params.event_id || 'unknown'}`, + }), + }, + 'notes_delete': { + riskLevel: RiskLevel.MEDIUM, + reason: 'Deletes notes - data loss potential', + extractPreview: (params) => ({ + category: 'Notes', + summary: `Delete note: ${params.note_id || 'unknown'}`, + }), + }, + 'rag_ingest': { + riskLevel: RiskLevel.MEDIUM, + reason: 'Ingests documents into knowledge base', + extractPreview: (params) => ({ + category: 'RAG', + summary: `Ingest ${params.documents?.length || 1} document(s)`, + }), + }, +}; + +/** + * High-risk URL patterns for desktop actions + */ +const HIGH_RISK_URL_PATTERNS = [ + { pattern: /checkout\./i, reason: 'Checkout page - payment action' }, + { pattern: /payment\./i, reason: 'Payment page - financial action' }, + { pattern: /pay\./i, reason: 'Payment page - financial action' }, + { pattern: /buy\./i, reason: 'Purchase page - financial action' }, + { pattern: /order\./i, reason: 'Order page - purchase action' }, + { pattern: /cart.*confirm/i, reason: 'Cart confirmation - purchase action' }, + { pattern: /billing\./i, reason: 'Billing page - financial action' }, + { pattern: /subscribe/i, reason: 'Subscription page - recurring payment' }, + { pattern: /bank/i, reason: 'Banking page - financial action' }, + { pattern: /transfer/i, reason: 'Transfer page - financial action' }, +]; + +/** + * High-risk desktop action types + */ +const HIGH_RISK_DESKTOP_ACTIONS = [ + 'submit_form', + 'click_buy', + 'click_purchase', + 'click_confirm', + 'click_pay', + 'click_subscribe', + 'click_delete', + 'click_remove', + 'click_cancel', +]; + +/** + * Helper to truncate text for preview + */ +function truncate(text: string | undefined, maxLength: number): string | undefined { + if (!text) return undefined; + if (text.length <= maxLength) return text; + return text.substring(0, maxLength - 3) + '...'; +} + +/** + * Canonicalize object for deterministic hashing + * Sorts keys and removes undefined values + */ +function canonicalize(obj: Record): string { + const sortedKeys = Object.keys(obj).sort(); + const sortedObj: Record = {}; + + for (const key of sortedKeys) { + const value = obj[key]; + if (value !== undefined && value !== null) { + if (typeof value === 'object' && !Array.isArray(value)) { + sortedObj[key] = JSON.parse(canonicalize(value)); + } else { + sortedObj[key] = value; + } + } + } + + return JSON.stringify(sortedObj); +} + +@Injectable() +export class HighRiskService { + private readonly logger = new Logger(HighRiskService.name); + + /** + * Classify an action and determine if approval is required + */ + classifyAction(context: ActionContext): ActionClassification { + const { toolName, toolParams, currentUrl } = context; + + // Check if it's a known high-risk gateway tool + const gatewayRisk = HIGH_RISK_GATEWAY_TOOLS[toolName]; + if (gatewayRisk) { + const previewExtractor = gatewayRisk.extractPreview; + const previewData: ActionPreview = { + toolName, + category: 'Gateway', + summary: '', + context: {}, + ...previewExtractor(toolParams), + }; + + return { + riskLevel: gatewayRisk.riskLevel, + requiresApproval: isRiskLevelAtLeast(gatewayRisk.riskLevel, RiskLevel.MEDIUM), + reason: gatewayRisk.reason, + actionHash: this.generateActionHash(context), + previewData, + }; + } + + // Check desktop actions with URL context + if (toolName.startsWith('desktop_') || toolName === 'computer') { + const desktopRisk = this.classifyDesktopAction(toolName, toolParams, currentUrl); + if (desktopRisk.requiresApproval) { + return { + ...desktopRisk, + actionHash: this.generateActionHash(context), + }; + } + } + + // Default: low risk, no approval required + return { + riskLevel: RiskLevel.LOW, + requiresApproval: false, + reason: 'Standard operation', + actionHash: this.generateActionHash(context), + previewData: { + toolName, + category: this.getToolCategory(toolName), + summary: `Execute ${toolName}`, + context: toolParams, + }, + }; + } + + /** + * Classify desktop action based on action type and URL context + */ + private classifyDesktopAction( + toolName: string, + params: Record, + currentUrl?: string, + ): Omit { + // Check if action type is high-risk + const actionType = params.action || params.type || ''; + const isHighRiskAction = HIGH_RISK_DESKTOP_ACTIONS.some( + (risky) => actionType.toLowerCase().includes(risky.replace('click_', '').replace('_', '')), + ); + + // Check if URL is high-risk + let urlRiskReason: string | null = null; + if (currentUrl) { + for (const { pattern, reason } of HIGH_RISK_URL_PATTERNS) { + if (pattern.test(currentUrl)) { + urlRiskReason = reason; + break; + } + } + } + + // Determine overall risk + if (isHighRiskAction && urlRiskReason) { + return { + riskLevel: RiskLevel.CRITICAL, + requiresApproval: true, + reason: `${urlRiskReason} with ${actionType} action`, + previewData: { + toolName, + category: 'Desktop', + summary: `${actionType} on ${new URL(currentUrl!).hostname}`, + context: { url: currentUrl, action: actionType }, + }, + }; + } + + if (urlRiskReason) { + return { + riskLevel: RiskLevel.HIGH, + requiresApproval: true, + reason: urlRiskReason, + previewData: { + toolName, + category: 'Desktop', + summary: `Desktop action on ${new URL(currentUrl!).hostname}`, + context: { url: currentUrl, action: actionType }, + }, + }; + } + + if (isHighRiskAction) { + return { + riskLevel: RiskLevel.MEDIUM, + requiresApproval: true, + reason: `High-risk action type: ${actionType}`, + previewData: { + toolName, + category: 'Desktop', + summary: `${actionType} action`, + context: { action: actionType }, + }, + }; + } + + return { + riskLevel: RiskLevel.LOW, + requiresApproval: false, + reason: 'Standard desktop operation', + previewData: { + toolName, + category: 'Desktop', + summary: `Desktop ${actionType || 'action'}`, + context: params, + }, + }; + } + + /** + * Generate deterministic hash for an action + * Used for idempotency and approval tracking + * + * Best practice: Use canonical representation for deterministic hashing + */ + generateActionHash(context: ActionContext): string { + const { toolName, toolParams, currentUrl, nodeRunId, workspaceId } = context; + + // Extract key identifying fields based on tool type + const keyFields: Record = { + tool: toolName, + nodeRunId, + workspaceId, + }; + + // Add tool-specific identifying fields + if (toolName === 'communications_send_email') { + keyFields.recipient = toolParams.to || toolParams.recipient; + keyFields.subject = toolParams.subject; + } else if (toolName === 'communications_send_sms') { + keyFields.recipient = toolParams.to || toolParams.phone_number; + keyFields.message = toolParams.message?.substring(0, 50); // First 50 chars + } else if (currentUrl) { + // For desktop actions, include URL domain + try { + keyFields.domain = new URL(currentUrl).hostname; + } catch { + keyFields.domain = currentUrl; + } + } + + // Add critical params that identify the action + if (toolParams.amount) keyFields.amount = toolParams.amount; + if (toolParams.event_id) keyFields.event_id = toolParams.event_id; + if (toolParams.note_id) keyFields.note_id = toolParams.note_id; + + // Generate deterministic hash + const canonicalString = canonicalize(keyFields); + const hash = crypto + .createHash('sha256') + .update(canonicalString) + .digest('hex') + .substring(0, 16); + + this.logger.debug(`Generated action hash: ${hash} for ${toolName}`); + return hash; + } + + /** + * Get tool category for display + */ + private getToolCategory(toolName: string): string { + const [prefix] = toolName.split('_'); + const categories: Record = { + communications: 'Communications', + search: 'Search', + weather: 'Weather', + calendar: 'Calendar', + notes: 'Notes', + document: 'Document', + data: 'Data', + file: 'File', + integration: 'Integration', + rag: 'RAG', + desktop: 'Desktop', + computer: 'Desktop', + }; + return categories[prefix] || 'Other'; + } + + /** + * Check if a tool requires approval by default (without context) + */ + isHighRiskTool(toolName: string): boolean { + return toolName in HIGH_RISK_GATEWAY_TOOLS; + } + + /** + * Get list of high-risk tool names + */ + getHighRiskToolNames(): string[] { + return Object.keys(HIGH_RISK_GATEWAY_TOOLS); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/idempotency.service.ts b/packages/bytebot-workflow-orchestrator/src/services/idempotency.service.ts new file mode 100644 index 000000000..c2ae1fdad --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/idempotency.service.ts @@ -0,0 +1,370 @@ +/** + * Idempotency Service + * v1.0.0 M5: Ensures high-risk actions execute exactly once + * + * Best Practices Applied: + * - Unique idempotency keys (nodeRunId + actionHash) + * - Database-level uniqueness constraints + * - Status tracking (PROCESSING, COMPLETED, FAILED) + * - TTL-based cleanup for expired records + * - Cached results for duplicate requests + * + * References: + * - https://aws.amazon.com/builders-library/making-retries-safe-with-idempotent-APIs/ + * - https://microservices.io/patterns/communication-style/idempotent-consumer.html + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; + +/** + * Idempotency record status + */ +export enum IdempotencyStatus { + PROCESSING = 'PROCESSING', + COMPLETED = 'COMPLETED', + FAILED = 'FAILED', +} + +/** + * Idempotency check result + */ +export interface IdempotencyCheckResult { + isNew: boolean; + status: IdempotencyStatus; + result?: any; + errorMessage?: string; + recordId?: string; +} + +/** + * Execution result to store + */ +export interface ExecutionResult { + success: boolean; + result?: any; + errorMessage?: string; +} + +/** + * Default TTL for idempotency records in hours + */ +const DEFAULT_IDEMPOTENCY_TTL_HOURS = 24; + +@Injectable() +export class IdempotencyService { + private readonly logger = new Logger(IdempotencyService.name); + private readonly ttlHours: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + ) { + this.ttlHours = parseInt( + this.configService.get( + 'IDEMPOTENCY_TTL_HOURS', + String(DEFAULT_IDEMPOTENCY_TTL_HOURS), + ), + 10, + ); + + this.logger.log(`Idempotency TTL: ${this.ttlHours} hours`); + } + + /** + * Generate idempotency key from node run ID and action hash + */ + generateKey(nodeRunId: string, actionHash: string): string { + return `${nodeRunId}:${actionHash}`; + } + + /** + * Check if an action can be executed (acquire idempotency lock) + * + * Returns: + * - isNew: true if this is a new action that can be executed + * - status: current status if already exists + * - result: cached result if COMPLETED + */ + async checkAndAcquire( + nodeRunId: string, + actionHash: string, + ): Promise { + const idempotencyKey = this.generateKey(nodeRunId, actionHash); + const expiresAt = new Date(Date.now() + this.ttlHours * 60 * 60 * 1000); + + try { + // Try to create a new record (will fail if key exists due to unique constraint) + const record = await this.prisma.idempotencyRecord.create({ + data: { + idempotencyKey, + actionHash, + status: IdempotencyStatus.PROCESSING, + expiresAt, + }, + }); + + this.logger.debug(`Created idempotency record: ${record.id} for key ${idempotencyKey}`); + + return { + isNew: true, + status: IdempotencyStatus.PROCESSING, + recordId: record.id, + }; + } catch (error: any) { + // P2002 is Prisma's unique constraint violation + if (error.code === 'P2002') { + // Record already exists - check its status + const existing = await this.prisma.idempotencyRecord.findUnique({ + where: { idempotencyKey }, + }); + + if (!existing) { + // Race condition - try again + this.logger.warn(`Idempotency race condition for key ${idempotencyKey}, retrying`); + return this.checkAndAcquire(nodeRunId, actionHash); + } + + this.logger.debug( + `Idempotency key ${idempotencyKey} exists with status ${existing.status}`, + ); + + return { + isNew: false, + status: existing.status as IdempotencyStatus, + result: existing.result as any, + errorMessage: existing.errorMessage || undefined, + recordId: existing.id, + }; + } + + throw error; + } + } + + /** + * Mark action as completed with result + */ + async markCompleted( + nodeRunId: string, + actionHash: string, + result: any, + ): Promise { + const idempotencyKey = this.generateKey(nodeRunId, actionHash); + + await this.prisma.idempotencyRecord.update({ + where: { idempotencyKey }, + data: { + status: IdempotencyStatus.COMPLETED, + result, + completedAt: new Date(), + }, + }); + + this.logger.debug(`Marked idempotency key ${idempotencyKey} as COMPLETED`); + } + + /** + * Mark action as failed with error + */ + async markFailed( + nodeRunId: string, + actionHash: string, + errorMessage: string, + ): Promise { + const idempotencyKey = this.generateKey(nodeRunId, actionHash); + + await this.prisma.idempotencyRecord.update({ + where: { idempotencyKey }, + data: { + status: IdempotencyStatus.FAILED, + errorMessage, + completedAt: new Date(), + }, + }); + + this.logger.debug(`Marked idempotency key ${idempotencyKey} as FAILED: ${errorMessage}`); + } + + /** + * Get idempotency record by key + */ + async getRecord( + nodeRunId: string, + actionHash: string, + ): Promise<{ + id: string; + status: IdempotencyStatus; + result?: any; + errorMessage?: string; + createdAt: Date; + completedAt?: Date; + } | null> { + const idempotencyKey = this.generateKey(nodeRunId, actionHash); + + const record = await this.prisma.idempotencyRecord.findUnique({ + where: { idempotencyKey }, + }); + + if (!record) { + return null; + } + + return { + id: record.id, + status: record.status as IdempotencyStatus, + result: record.result as any, + errorMessage: record.errorMessage || undefined, + createdAt: record.createdAt, + completedAt: record.completedAt || undefined, + }; + } + + /** + * Execute an action with idempotency guarantee + * + * This is the main entry point for idempotent execution: + * 1. Check if action was already executed + * 2. If COMPLETED, return cached result + * 3. If PROCESSING, wait and check again (another process is handling it) + * 4. If new, execute and store result + */ + async executeWithIdempotency( + nodeRunId: string, + actionHash: string, + executor: () => Promise, + options?: { + waitForProcessingMs?: number; + maxWaitAttempts?: number; + }, + ): Promise<{ result: T; fromCache: boolean }> { + const { waitForProcessingMs = 2000, maxWaitAttempts = 30 } = options || {}; + + let attempts = 0; + + while (attempts < maxWaitAttempts) { + const check = await this.checkAndAcquire(nodeRunId, actionHash); + + if (check.isNew) { + // We have the lock - execute the action + try { + const result = await executor(); + await this.markCompleted(nodeRunId, actionHash, result); + return { result, fromCache: false }; + } catch (error: any) { + await this.markFailed(nodeRunId, actionHash, error.message); + throw error; + } + } + + switch (check.status) { + case IdempotencyStatus.COMPLETED: + // Action already completed - return cached result + this.logger.debug(`Returning cached result for ${nodeRunId}:${actionHash}`); + return { result: check.result as T, fromCache: true }; + + case IdempotencyStatus.FAILED: + // Action failed previously - throw the stored error + throw new Error(`Previous execution failed: ${check.errorMessage}`); + + case IdempotencyStatus.PROCESSING: + // Another process is executing - wait and retry + this.logger.debug( + `Action ${nodeRunId}:${actionHash} is being processed by another instance, waiting...`, + ); + await this.sleep(waitForProcessingMs); + attempts++; + break; + } + } + + throw new Error( + `Timeout waiting for action ${nodeRunId}:${actionHash} to complete (${maxWaitAttempts} attempts)`, + ); + } + + /** + * Clean up expired idempotency records + */ + async cleanupExpired(): Promise { + const result = await this.prisma.idempotencyRecord.deleteMany({ + where: { + expiresAt: { lt: new Date() }, + }, + }); + + if (result.count > 0) { + this.logger.log(`Cleaned up ${result.count} expired idempotency records`); + } + + return result.count; + } + + /** + * Reset a PROCESSING record (for recovery from crashes) + * Only use this if you know the processing instance has crashed + */ + async resetStaleProcessing( + nodeRunId: string, + actionHash: string, + staleDurationMs: number = 5 * 60 * 1000, // 5 minutes + ): Promise { + const idempotencyKey = this.generateKey(nodeRunId, actionHash); + + const staleThreshold = new Date(Date.now() - staleDurationMs); + + const result = await this.prisma.idempotencyRecord.updateMany({ + where: { + idempotencyKey, + status: IdempotencyStatus.PROCESSING, + createdAt: { lt: staleThreshold }, + }, + data: { + status: IdempotencyStatus.FAILED, + errorMessage: 'Reset due to stale processing state', + }, + }); + + if (result.count > 0) { + this.logger.warn(`Reset stale processing record for ${idempotencyKey}`); + return true; + } + + return false; + } + + /** + * Get statistics about idempotency records + */ + async getStats(): Promise<{ + total: number; + byStatus: Record; + expired: number; + }> { + const [total, processing, completed, failed, expired] = await Promise.all([ + this.prisma.idempotencyRecord.count(), + this.prisma.idempotencyRecord.count({ where: { status: IdempotencyStatus.PROCESSING } }), + this.prisma.idempotencyRecord.count({ where: { status: IdempotencyStatus.COMPLETED } }), + this.prisma.idempotencyRecord.count({ where: { status: IdempotencyStatus.FAILED } }), + this.prisma.idempotencyRecord.count({ where: { expiresAt: { lt: new Date() } } }), + ]); + + return { + total, + byStatus: { + [IdempotencyStatus.PROCESSING]: processing, + [IdempotencyStatus.COMPLETED]: completed, + [IdempotencyStatus.FAILED]: failed, + }, + expired, + }; + } + + /** + * Helper to sleep for a given number of milliseconds + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/interaction-sli-metrics.service.ts b/packages/bytebot-workflow-orchestrator/src/services/interaction-sli-metrics.service.ts new file mode 100644 index 000000000..513925bee --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/interaction-sli-metrics.service.ts @@ -0,0 +1,102 @@ +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import type { Gauge } from 'prom-client'; +import { PrismaService } from './prisma.service'; + +@Injectable() +export class InteractionSliMetricsService implements OnModuleInit { + private readonly logger = new Logger(InteractionSliMetricsService.name); + + constructor( + private readonly prisma: PrismaService, + @InjectMetric('runs_stuck_waiting_user_input_total') + private readonly runsStuckWaitingUserInputTotal: Gauge, + @InjectMetric('runs_stuck_waiting_provider_total') + private readonly runsStuckWaitingProviderTotal: Gauge, + @InjectMetric('runs_stuck_waiting_capacity_total') + private readonly runsStuckWaitingCapacityTotal: Gauge, + @InjectMetric('prompts_resolved_without_resume_ack_total') + private readonly promptsResolvedWithoutResumeAckTotal: Gauge, + ) {} + + onModuleInit(): void { + this.logger.log('Interaction SLI metrics enabled'); + } + + @Cron(CronExpression.EVERY_MINUTE) + async tick(): Promise { + try { + await this.refresh(); + } catch (error: any) { + this.logger.debug(`Interaction SLI refresh failed: ${error?.message || String(error)}`); + } + } + + private async refresh(): Promise { + type WaitingBuckets = { + lt_5m: bigint; + m5_15: bigint; + m15_1h: bigint; + h1_24: bigint; + gt_24h: bigint; + }; + + const waitingRows = await this.prisma.$queryRaw>` + SELECT + gr.phase, + SUM(CASE WHEN COALESCE(gr.wait_started_at, gr.updated_at) >= NOW() - INTERVAL '5 minutes' THEN 1 ELSE 0 END)::bigint AS lt_5m, + SUM(CASE WHEN COALESCE(gr.wait_started_at, gr.updated_at) < NOW() - INTERVAL '5 minutes' AND COALESCE(gr.wait_started_at, gr.updated_at) >= NOW() - INTERVAL '15 minutes' THEN 1 ELSE 0 END)::bigint AS m5_15, + SUM(CASE WHEN COALESCE(gr.wait_started_at, gr.updated_at) < NOW() - INTERVAL '15 minutes' AND COALESCE(gr.wait_started_at, gr.updated_at) >= NOW() - INTERVAL '1 hour' THEN 1 ELSE 0 END)::bigint AS m15_1h, + SUM(CASE WHEN COALESCE(gr.wait_started_at, gr.updated_at) < NOW() - INTERVAL '1 hour' AND COALESCE(gr.wait_started_at, gr.updated_at) >= NOW() - INTERVAL '24 hours' THEN 1 ELSE 0 END)::bigint AS h1_24, + SUM(CASE WHEN COALESCE(gr.wait_started_at, gr.updated_at) < NOW() - INTERVAL '24 hours' THEN 1 ELSE 0 END)::bigint AS gt_24h + FROM "workflow_orchestrator"."goal_runs" gr + WHERE gr.status IN ('PENDING', 'RUNNING') + AND gr.phase IN ('WAITING_USER_INPUT', 'WAITING_PROVIDER', 'WAITING_CAPACITY') + GROUP BY gr.phase; + `; + + const zeros: WaitingBuckets = { lt_5m: 0n, m5_15: 0n, m15_1h: 0n, h1_24: 0n, gt_24h: 0n }; + const byPhase = new Map( + waitingRows.map((row) => [ + row.phase, + { + lt_5m: row.lt_5m ?? 0n, + m5_15: row.m5_15 ?? 0n, + m15_1h: row.m15_1h ?? 0n, + h1_24: row.h1_24 ?? 0n, + gt_24h: row.gt_24h ?? 0n, + }, + ]), + ); + + const setBuckets = (metric: Gauge, buckets: WaitingBuckets) => { + metric.labels('lt_5m').set(Number(buckets.lt_5m ?? 0n)); + metric.labels('5m_15m').set(Number(buckets.m5_15 ?? 0n)); + metric.labels('15m_1h').set(Number(buckets.m15_1h ?? 0n)); + metric.labels('1h_24h').set(Number(buckets.h1_24 ?? 0n)); + metric.labels('gt_24h').set(Number(buckets.gt_24h ?? 0n)); + }; + + setBuckets(this.runsStuckWaitingUserInputTotal, byPhase.get('WAITING_USER_INPUT') ?? zeros); + setBuckets(this.runsStuckWaitingProviderTotal, byPhase.get('WAITING_PROVIDER') ?? zeros); + setBuckets(this.runsStuckWaitingCapacityTotal, byPhase.get('WAITING_CAPACITY') ?? zeros); + + const [{ count }] = await this.prisma.$queryRaw>` + SELECT COUNT(*)::bigint AS count + FROM "workflow_orchestrator"."user_prompt_resolutions" r + JOIN "workflow_orchestrator"."user_prompts" p + ON p.id = r.prompt_id + JOIN "workflow_orchestrator"."goal_runs" gr + ON gr.id = p.goal_run_id + WHERE p.status = 'RESOLVED' + AND p.resolved_at IS NOT NULL + AND p.resolved_at < NOW() - INTERVAL '30 seconds' + AND r.resume_acknowledged_at IS NULL + AND gr.status IN ('PENDING', 'RUNNING') + AND gr.execution_engine = 'TEMPORAL_WORKFLOW'; + `; + + this.promptsResolvedWithoutResumeAckTotal.set(Number(count ?? 0n)); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/json-schema-validator.service.ts b/packages/bytebot-workflow-orchestrator/src/services/json-schema-validator.service.ts new file mode 100644 index 000000000..0f135e6aa --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/json-schema-validator.service.ts @@ -0,0 +1,74 @@ +import { Injectable } from '@nestjs/common'; +import Ajv, { type ErrorObject } from 'ajv'; +import addFormats from 'ajv-formats'; + +export type JsonSchema = Record; + +export interface JsonSchemaViolation { + keyword: string; + instancePath: string; + schemaPath: string; + message: string; + params: Record; +} + +export interface JsonSchemaValidationResult { + valid: boolean; + violations: JsonSchemaViolation[]; + missingFields: string[]; +} + +@Injectable() +export class JsonSchemaValidatorService { + private readonly ajv: Ajv; + + constructor() { + // Strict by default: schema mistakes should fail-closed. + this.ajv = new Ajv({ + allErrors: true, + strict: true, + // Validate schemas we load at runtime; invalid schemas are treated as server errors. + validateSchema: true, + }); + addFormats(this.ajv); + } + + validate(schema: JsonSchema, data: unknown): JsonSchemaValidationResult { + const validate = this.ajv.compile(schema); + const valid = validate(data) as boolean; + + if (valid) { + return { valid: true, violations: [], missingFields: [] }; + } + + const errors = (validate.errors ?? []) as ErrorObject[]; + const violations = errors.map((e) => ({ + keyword: e.keyword, + instancePath: e.instancePath ?? '', + schemaPath: e.schemaPath ?? '', + message: e.message ?? 'invalid', + params: (e.params as any) ?? {}, + })); + + const missingFields = errors + .filter((e) => e.keyword === 'required' && typeof (e.params as any)?.missingProperty === 'string') + .map((e) => (e.params as any).missingProperty as string); + + return { valid: false, violations, missingFields }; + } + + /** + * Patch semantics: + * - validate only the fields present in the payload (no "required" enforcement) + * - apply merged values, then run a full validation pass for completeness + */ + makePatchSchema(schema: JsonSchema): JsonSchema { + // Shallow clone is enough for our current v1 schemas; deep clone later if nested required is introduced. + const copy: JsonSchema = { ...(schema as any) }; + if (Array.isArray(copy.required)) { + copy.required = []; + } + return copy; + } +} + diff --git a/packages/bytebot-workflow-orchestrator/src/services/knowledge-extraction.service.ts b/packages/bytebot-workflow-orchestrator/src/services/knowledge-extraction.service.ts new file mode 100644 index 000000000..cb40b80ff --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/knowledge-extraction.service.ts @@ -0,0 +1,612 @@ +/** + * Knowledge Extraction Service + * v1.0.0: LLM-Driven Knowledge Extraction for Agent Context + * + * Implements industry-standard patterns for extracting actionable knowledge: + * - OpenAI: Structured outputs with JSON schema validation + * - Anthropic: Chain-of-thought extraction with reasoning + * - Google: Entity extraction and relationship mapping + * + * Key Features: + * 1. Multi-dimensional knowledge extraction (facts, entities, decisions, metrics) + * 2. Confidence scoring for extracted knowledge + * 3. Deduplication and conflict resolution + * 4. Temporal ordering and relevance decay + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; + +// Knowledge types following OpenAI structured output patterns +export interface ExtractedFact { + id: string; + type: 'fact' | 'decision' | 'discovery' | 'error' | 'metric'; + content: string; + confidence: number; // 0-1 score + source: { + stepNumber?: number; + timestamp: Date; + context: string; // Brief context about where this came from + }; + entities?: string[]; // Named entities mentioned + tags?: string[]; // Categorization tags +} + +export interface ExtractedEntity { + name: string; + type: 'person' | 'organization' | 'location' | 'product' | 'service' | 'price' | 'date' | 'url' | 'other'; + mentions: number; + firstSeen: Date; + lastSeen: Date; + relatedFacts: string[]; // IDs of facts mentioning this entity +} + +export interface KnowledgeGraph { + goalRunId: string; + extractedAt: Date; + facts: ExtractedFact[]; + entities: ExtractedEntity[]; + summary: string; + keyMetrics: Record; + decisions: Array<{ + decision: string; + reasoning?: string; + stepNumber: number; + }>; +} + +export interface ExtractionResult { + success: boolean; + knowledgeGraph: KnowledgeGraph; + newFactsCount: number; + newEntitiesCount: number; + processingTimeMs: number; +} + +// Regex patterns for common knowledge extraction +const EXTRACTION_PATTERNS = { + prices: /\$[\d,]+(?:\.\d{2})?|\b\d+(?:,\d{3})*(?:\.\d{2})?\s*(?:USD|EUR|GBP|dollars?)\b/gi, + dates: /\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+\d{1,2}(?:,?\s+\d{4})?\b|\b\d{1,2}\/\d{1,2}\/\d{2,4}\b|\b\d{4}-\d{2}-\d{2}\b/gi, + urls: /https?:\/\/[^\s<>"{}|\\^`\[\]]+/gi, + emails: /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/gi, + confirmations: /(?:confirmed|verified|selected|booked|reserved|completed|found|discovered)[:.]?\s*(.{10,100})/gi, + errors: /(?:error|failed|couldn't|unable|problem|issue)[:.]?\s*(.{10,100})/gi, + decisions: /(?:decided|chose|selected|picked|going with|will use)[:.]?\s*(.{10,100})/gi, +}; + +@Injectable() +export class KnowledgeExtractionService { + private readonly logger = new Logger(KnowledgeExtractionService.name); + private readonly enabled: boolean; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + + // Cache for extracted knowledge (goalRunId -> KnowledgeGraph) + private knowledgeCache: Map = new Map(); + + // Configuration + private readonly maxFactsPerGoal: number; + private readonly confidenceThreshold: number; + private readonly useLlmExtraction: boolean; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.enabled = this.configService.get('KNOWLEDGE_EXTRACTION_ENABLED', 'true') === 'true'; + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + this.maxFactsPerGoal = parseInt(this.configService.get('MAX_FACTS_PER_GOAL', '100'), 10); + this.confidenceThreshold = parseFloat(this.configService.get('FACT_CONFIDENCE_THRESHOLD', '0.6')); + this.useLlmExtraction = this.configService.get('USE_LLM_EXTRACTION', 'true') === 'true'; + + this.logger.log( + `Knowledge extraction ${this.enabled ? 'enabled' : 'disabled'} ` + + `(LLM: ${this.useLlmExtraction}, threshold: ${this.confidenceThreshold})` + ); + } + + /** + * Extract knowledge from step outcome text + */ + async extractFromOutcome( + goalRunId: string, + stepNumber: number, + outcomeText: string, + goalDescription: string, + ): Promise { + const startTime = Date.now(); + + if (!this.enabled || !outcomeText) { + return this.emptyResult(goalRunId); + } + + this.logger.debug(`Extracting knowledge from step ${stepNumber} for goal ${goalRunId}`); + + // Get or create knowledge graph for this goal + let graph = this.knowledgeCache.get(goalRunId) || this.createEmptyGraph(goalRunId); + + // Extract using pattern matching (fast, always available) + const patternFacts = this.extractWithPatterns(outcomeText, stepNumber); + + // Extract using LLM if enabled and API key available + let llmFacts: ExtractedFact[] = []; + if (this.useLlmExtraction && this.llmApiKey) { + try { + llmFacts = await this.extractWithLLM(outcomeText, stepNumber, goalDescription); + } catch (error) { + this.logger.warn(`LLM extraction failed: ${(error as Error).message}`); + } + } + + // Merge and deduplicate facts + const allFacts = [...patternFacts, ...llmFacts]; + const newFacts = this.deduplicateAndMerge(graph.facts, allFacts); + + // Extract entities from all facts + const newEntities = this.extractEntities(newFacts, graph.entities); + + // Update graph + graph.facts = [...graph.facts, ...newFacts].slice(-this.maxFactsPerGoal); + graph.entities = newEntities; + graph.extractedAt = new Date(); + + // Update decisions list + const newDecisions = newFacts + .filter(f => f.type === 'decision') + .map(f => ({ + decision: f.content, + reasoning: f.source.context, + stepNumber: f.source.stepNumber || stepNumber, + })); + graph.decisions = [...graph.decisions, ...newDecisions]; + + // Update key metrics + this.updateKeyMetrics(graph, newFacts); + + // Generate summary if we have enough facts + if (graph.facts.length >= 5 && graph.facts.length % 5 === 0) { + graph.summary = this.generateSummary(graph); + } + + // Cache the updated graph + this.knowledgeCache.set(goalRunId, graph); + + const processingTimeMs = Date.now() - startTime; + + // Emit event for monitoring + this.eventEmitter.emit('knowledge.extracted', { + goalRunId, + stepNumber, + newFactsCount: newFacts.length, + newEntitiesCount: newEntities.length - (graph.entities.length - newEntities.length), + processingTimeMs, + }); + + this.logger.log( + `Extracted ${newFacts.length} facts, ${newEntities.length} entities ` + + `from step ${stepNumber} (${processingTimeMs}ms)` + ); + + return { + success: true, + knowledgeGraph: graph, + newFactsCount: newFacts.length, + newEntitiesCount: newEntities.length, + processingTimeMs, + }; + } + + /** + * Get accumulated knowledge for a goal + */ + getKnowledge(goalRunId: string): KnowledgeGraph | null { + return this.knowledgeCache.get(goalRunId) || null; + } + + /** + * Format knowledge for LLM context + */ + formatForContext(goalRunId: string, maxTokens: number = 1000): string { + const graph = this.knowledgeCache.get(goalRunId); + if (!graph || graph.facts.length === 0) { + return ''; + } + + const lines: string[] = ['=== ACCUMULATED KNOWLEDGE ===']; + + // Add key metrics + if (Object.keys(graph.keyMetrics).length > 0) { + lines.push('\nKey Metrics:'); + for (const [key, value] of Object.entries(graph.keyMetrics)) { + lines.push(` - ${key}: ${value}`); + } + } + + // Add high-confidence facts (sorted by confidence) + const highConfidenceFacts = graph.facts + .filter(f => f.confidence >= this.confidenceThreshold) + .sort((a, b) => b.confidence - a.confidence) + .slice(0, 15); + + if (highConfidenceFacts.length > 0) { + lines.push('\nKey Facts:'); + for (const fact of highConfidenceFacts) { + const icon = fact.type === 'error' ? '⚠️' : fact.type === 'decision' ? '→' : '•'; + lines.push(` ${icon} ${fact.content}`); + } + } + + // Add decisions + if (graph.decisions.length > 0) { + lines.push('\nDecisions Made:'); + for (const decision of graph.decisions.slice(-5)) { + lines.push(` Step ${decision.stepNumber}: ${decision.decision}`); + } + } + + // Add important entities + const importantEntities = graph.entities + .filter(e => e.mentions >= 2 || e.type === 'price' || e.type === 'date') + .slice(0, 10); + + if (importantEntities.length > 0) { + lines.push('\nKey Entities:'); + for (const entity of importantEntities) { + lines.push(` - ${entity.name} (${entity.type})`); + } + } + + lines.push('\n=== END KNOWLEDGE ==='); + + // Truncate if too long (rough token estimate: 4 chars per token) + let result = lines.join('\n'); + const maxChars = maxTokens * 4; + if (result.length > maxChars) { + result = result.substring(0, maxChars - 50) + '\n... (truncated)'; + } + + return result; + } + + /** + * Clear knowledge for a goal (on completion or reset) + */ + clearKnowledge(goalRunId: string): void { + this.knowledgeCache.delete(goalRunId); + this.logger.debug(`Cleared knowledge cache for goal ${goalRunId}`); + } + + /** + * Extract facts using regex patterns (fast, no API calls) + */ + private extractWithPatterns(text: string, stepNumber: number): ExtractedFact[] { + const facts: ExtractedFact[] = []; + const timestamp = new Date(); + + // Extract prices + const prices = text.match(EXTRACTION_PATTERNS.prices) || []; + for (const price of prices.slice(0, 5)) { + facts.push(this.createFact('metric', `Price found: ${price}`, 0.9, stepNumber, timestamp, text)); + } + + // Extract dates + const dates = text.match(EXTRACTION_PATTERNS.dates) || []; + for (const date of dates.slice(0, 3)) { + facts.push(this.createFact('fact', `Date mentioned: ${date}`, 0.85, stepNumber, timestamp, text)); + } + + // Extract URLs + const urls = text.match(EXTRACTION_PATTERNS.urls) || []; + for (const url of urls.slice(0, 3)) { + facts.push(this.createFact('discovery', `URL found: ${url}`, 0.95, stepNumber, timestamp, text)); + } + + // Extract confirmations + let match; + while ((match = EXTRACTION_PATTERNS.confirmations.exec(text)) !== null) { + if (facts.length < 10) { + facts.push(this.createFact('fact', match[0].trim(), 0.8, stepNumber, timestamp, text)); + } + } + + // Extract errors + while ((match = EXTRACTION_PATTERNS.errors.exec(text)) !== null) { + if (facts.length < 15) { + facts.push(this.createFact('error', match[0].trim(), 0.85, stepNumber, timestamp, text)); + } + } + + // Extract decisions + while ((match = EXTRACTION_PATTERNS.decisions.exec(text)) !== null) { + if (facts.length < 15) { + facts.push(this.createFact('decision', match[0].trim(), 0.75, stepNumber, timestamp, text)); + } + } + + return facts; + } + + /** + * Extract facts using LLM (more accurate, requires API) + */ + private async extractWithLLM( + text: string, + stepNumber: number, + goalDescription: string, + ): Promise { + const prompt = `You are extracting knowledge from an AI agent's step outcome. + +GOAL: ${goalDescription} +STEP ${stepNumber} OUTCOME: +${text.substring(0, 2000)} + +Extract the most important facts, decisions, and discoveries. Return a JSON array with objects containing: +- type: "fact" | "decision" | "discovery" | "error" | "metric" +- content: concise description (max 100 chars) +- confidence: 0-1 how certain this fact is +- entities: array of named entities mentioned + +Return ONLY the JSON array, no other text. Extract at most 5 items, focusing on the most important. + +Example output: +[ + {"type": "discovery", "content": "Found Southwest flight for $289", "confidence": 0.95, "entities": ["Southwest", "$289"]}, + {"type": "decision", "content": "Selected cheapest option", "confidence": 0.8, "entities": []} +]`; + + try { + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: 'claude-3-haiku-20240307', + max_tokens: 500, + messages: [{ role: 'user', content: prompt }], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status}`); + } + + const data = await response.json(); + const responseText = data.content?.[0]?.text || '[]'; + + // Parse JSON from response + const jsonMatch = responseText.match(/\[[\s\S]*\]/); + if (!jsonMatch) { + return []; + } + + const extracted = JSON.parse(jsonMatch[0]); + const timestamp = new Date(); + + return extracted.map((item: any) => this.createFact( + item.type || 'fact', + item.content || '', + item.confidence || 0.7, + stepNumber, + timestamp, + text, + item.entities, + )); + } catch (error) { + this.logger.warn(`LLM extraction parse error: ${(error as Error).message}`); + return []; + } + } + + /** + * Create a fact object with unique ID + */ + private createFact( + type: ExtractedFact['type'], + content: string, + confidence: number, + stepNumber: number, + timestamp: Date, + context: string, + entities?: string[], + ): ExtractedFact { + return { + id: `fact-${Date.now()}-${Math.random().toString(36).substring(2, 8)}`, + type, + content: content.substring(0, 200), + confidence, + source: { + stepNumber, + timestamp, + context: context.substring(0, 100), + }, + entities: entities || [], + tags: [], + }; + } + + /** + * Deduplicate and merge facts + */ + private deduplicateAndMerge( + existing: ExtractedFact[], + newFacts: ExtractedFact[], + ): ExtractedFact[] { + const existingContents = new Set(existing.map(f => f.content.toLowerCase())); + const result: ExtractedFact[] = []; + + for (const fact of newFacts) { + const lowerContent = fact.content.toLowerCase(); + // Check for exact or near-duplicate + if (!existingContents.has(lowerContent)) { + // Check for similar content (simple Jaccard-ish similarity) + const isDuplicate = existing.some(e => { + const similarity = this.calculateSimilarity(e.content, fact.content); + return similarity > 0.8; + }); + + if (!isDuplicate) { + result.push(fact); + existingContents.add(lowerContent); + } + } + } + + return result; + } + + /** + * Calculate simple word overlap similarity + */ + private calculateSimilarity(a: string, b: string): number { + const wordsA = new Set(a.toLowerCase().split(/\s+/)); + const wordsB = new Set(b.toLowerCase().split(/\s+/)); + const intersection = new Set([...wordsA].filter(x => wordsB.has(x))); + const union = new Set([...wordsA, ...wordsB]); + return intersection.size / union.size; + } + + /** + * Extract entities from facts + */ + private extractEntities( + facts: ExtractedFact[], + existing: ExtractedEntity[], + ): ExtractedEntity[] { + const entityMap = new Map(); + + // Add existing entities + for (const entity of existing) { + entityMap.set(entity.name.toLowerCase(), entity); + } + + // Extract from facts + for (const fact of facts) { + // From explicit entities + for (const entityName of fact.entities || []) { + const key = entityName.toLowerCase(); + const entityType = this.inferEntityType(entityName); + + if (entityMap.has(key)) { + const e = entityMap.get(key)!; + e.mentions++; + e.lastSeen = fact.source.timestamp; + e.relatedFacts.push(fact.id); + } else { + entityMap.set(key, { + name: entityName, + type: entityType, + mentions: 1, + firstSeen: fact.source.timestamp, + lastSeen: fact.source.timestamp, + relatedFacts: [fact.id], + }); + } + } + + // Extract prices as entities + const prices = fact.content.match(EXTRACTION_PATTERNS.prices) || []; + for (const price of prices) { + const key = price.toLowerCase(); + if (!entityMap.has(key)) { + entityMap.set(key, { + name: price, + type: 'price', + mentions: 1, + firstSeen: fact.source.timestamp, + lastSeen: fact.source.timestamp, + relatedFacts: [fact.id], + }); + } + } + } + + return Array.from(entityMap.values()); + } + + /** + * Infer entity type from name + */ + private inferEntityType(name: string): ExtractedEntity['type'] { + if (/^\$[\d,]+/.test(name) || /\d+\s*(USD|EUR|GBP)/i.test(name)) return 'price'; + if (/^https?:\/\//.test(name)) return 'url'; + if (/@/.test(name)) return 'other'; // Email-like + if (/^\d{1,2}\/\d{1,2}\/\d{2,4}$/.test(name)) return 'date'; + if (/^(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)/i.test(name)) return 'date'; + // Default to other - could be enhanced with NER + return 'other'; + } + + /** + * Update key metrics from facts + */ + private updateKeyMetrics(graph: KnowledgeGraph, newFacts: ExtractedFact[]): void { + for (const fact of newFacts) { + if (fact.type === 'metric') { + // Extract metric name and value + const match = fact.content.match(/(.+?):\s*(.+)/); + if (match) { + graph.keyMetrics[match[1].trim()] = match[2].trim(); + } + } + + // Also track prices as metrics + const prices = fact.content.match(EXTRACTION_PATTERNS.prices) || []; + if (prices.length > 0) { + graph.keyMetrics['lastPrice'] = prices[prices.length - 1]; + } + } + } + + /** + * Generate summary from knowledge graph + */ + private generateSummary(graph: KnowledgeGraph): string { + const factCount = graph.facts.length; + const decisionCount = graph.decisions.length; + const errorCount = graph.facts.filter(f => f.type === 'error').length; + const discoveryCount = graph.facts.filter(f => f.type === 'discovery').length; + + const parts: string[] = []; + parts.push(`Accumulated ${factCount} facts`); + if (discoveryCount > 0) parts.push(`${discoveryCount} discoveries`); + if (decisionCount > 0) parts.push(`${decisionCount} decisions`); + if (errorCount > 0) parts.push(`${errorCount} errors encountered`); + + return parts.join(', ') + '.'; + } + + /** + * Create empty knowledge graph + */ + private createEmptyGraph(goalRunId: string): KnowledgeGraph { + return { + goalRunId, + extractedAt: new Date(), + facts: [], + entities: [], + summary: '', + keyMetrics: {}, + decisions: [], + }; + } + + /** + * Return empty result + */ + private emptyResult(goalRunId: string): ExtractionResult { + return { + success: true, + knowledgeGraph: this.createEmptyGraph(goalRunId), + newFactsCount: 0, + newEntitiesCount: 0, + processingTimeMs: 0, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/leader-election.service.ts b/packages/bytebot-workflow-orchestrator/src/services/leader-election.service.ts new file mode 100644 index 000000000..9a09333cb --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/leader-election.service.ts @@ -0,0 +1,410 @@ +/** + * Leader Election Service + * v1.0.3: Kubernetes Lease-based leader election for scheduler single-runner guarantee + * + * Uses the coordination.k8s.io/v1 Lease API to ensure only one replica + * runs the scheduler at a time. + * + * Configuration (environment variables): + * - LEADER_ELECTION_ENABLED: Enable/disable leader election (default: true in K8s, false locally) + * - LEADER_ELECTION_LEASE_NAME: Name of the lease object (default: workflow-orchestrator-scheduler) + * - LEADER_ELECTION_NAMESPACE: Kubernetes namespace (default: from cluster or 'default') + * - LEADER_ELECTION_LEASE_DURATION: Lease duration in seconds (default: 15) + * - LEADER_ELECTION_RENEW_DEADLINE: Renew deadline in seconds (default: 10) + * - LEADER_ELECTION_RETRY_PERIOD: Retry period in seconds (default: 2) + * - POD_NAME: Pod identity (injected from Kubernetes downward API) + */ + +import { Injectable, Logger, OnModuleInit, OnModuleDestroy } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import * as k8s from '@kubernetes/client-node'; +import * as os from 'os'; + +export const LEADER_ELECTED_EVENT = 'leader.elected'; +export const LEADER_LOST_EVENT = 'leader.lost'; + +export interface LeaderElectionConfig { + enabled: boolean; + leaseName: string; + leaseNamespace: string; + leaseDurationSeconds: number; + renewDeadlineSeconds: number; + retryPeriodSeconds: number; +} + +/** + * Format a Date as RFC 3339 with microsecond precision for Kubernetes MicroTime fields. + * Kubernetes expects format: 2006-01-02T15:04:05.000000Z + */ +function formatMicroTime(date: Date): string { + const iso = date.toISOString(); // e.g., "2025-12-13T19:04:41.512Z" + // Replace milliseconds with microseconds (pad to 6 digits) + return iso.replace(/\.\d{3}Z$/, '.' + date.getMilliseconds().toString().padStart(3, '0') + '000Z'); +} + +@Injectable() +export class LeaderElectionService implements OnModuleInit, OnModuleDestroy { + private readonly logger = new Logger(LeaderElectionService.name); + private kc: k8s.KubeConfig | null = null; + private coordinationApi: k8s.CoordinationV1Api | null = null; + + private _isLeader = false; + private leaderCheckInterval: NodeJS.Timeout | null = null; + private shuttingDown = false; + + private readonly config: LeaderElectionConfig; + private readonly identity: string; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + // Determine if we're running in Kubernetes + const inKubernetes = !!process.env.KUBERNETES_SERVICE_HOST; + + // Configuration with sensible defaults + this.config = { + enabled: + this.configService.get('LEADER_ELECTION_ENABLED', inKubernetes ? 'true' : 'false') === + 'true', + leaseName: this.configService.get( + 'LEADER_ELECTION_LEASE_NAME', + 'workflow-orchestrator-scheduler', + ), + leaseNamespace: this.configService.get( + 'LEADER_ELECTION_NAMESPACE', + process.env.POD_NAMESPACE || 'bytebot', + ), + leaseDurationSeconds: parseInt( + this.configService.get('LEADER_ELECTION_LEASE_DURATION', '15'), + 10, + ), + renewDeadlineSeconds: parseInt( + this.configService.get('LEADER_ELECTION_RENEW_DEADLINE', '10'), + 10, + ), + retryPeriodSeconds: parseInt( + this.configService.get('LEADER_ELECTION_RETRY_PERIOD', '2'), + 10, + ), + }; + + // Unique identity for this pod + this.identity = + process.env.POD_NAME || `${os.hostname()}-${process.pid}-${Date.now()}`; + + if (this.config.enabled) { + try { + this.kc = new k8s.KubeConfig(); + + if (inKubernetes) { + this.kc.loadFromCluster(); + this.logger.log('Loaded Kubernetes config from cluster'); + } else { + this.kc.loadFromDefault(); + this.logger.log('Loaded Kubernetes config from default location'); + } + + this.coordinationApi = this.kc.makeApiClient(k8s.CoordinationV1Api); + } catch (error: any) { + this.logger.error(`Failed to initialize Kubernetes client: ${error.message}`); + // Disable leader election if we can't connect + this.config.enabled = false; + } + } + + this.logger.log( + `Leader election ${this.config.enabled ? 'ENABLED' : 'DISABLED'} with identity: ${this.identity}`, + ); + } + + get isLeader(): boolean { + // If leader election is disabled, always return true (single instance mode) + if (!this.config.enabled) { + return true; + } + return this._isLeader; + } + + async onModuleInit(): Promise { + if (!this.config.enabled) { + this.logger.log('Leader election disabled - this instance will act as leader'); + this._isLeader = true; + this.eventEmitter.emit(LEADER_ELECTED_EVENT, { identity: this.identity }); + return; + } + + this.logger.log('Starting leader election...'); + // Start leader election in background (non-blocking) + // This allows the application to start while leader election runs + this.startLeaderElection().catch((error) => { + this.logger.error(`Failed to start leader election: ${error.message}`); + }); + } + + async onModuleDestroy(): Promise { + this.logger.log('Shutting down leader election...'); + this.shuttingDown = true; + + if (this.leaderCheckInterval) { + clearInterval(this.leaderCheckInterval); + this.leaderCheckInterval = null; + } + + // Release leadership gracefully + if (this._isLeader && this.config.enabled) { + await this.releaseLease(); + } + } + + private async startLeaderElection(): Promise { + // Set up periodic lease operations FIRST so even if first attempt hangs, we still retry + this.leaderCheckInterval = setInterval( + () => this.tryAcquireOrRenewLease().catch((e) => { + this.logger.error(`Leader election retry failed: ${e.message}`); + }), + this.config.retryPeriodSeconds * 1000, + ); + this.logger.log('Leader election interval started'); + + // Initial attempt to acquire or check lease (non-blocking) + try { + await this.tryAcquireOrRenewLease(); + } catch (error: any) { + this.logger.error(`Initial leader election attempt failed: ${error.message}`); + } + } + + private async tryAcquireOrRenewLease(): Promise { + if (this.shuttingDown || !this.coordinationApi) return; + + try { + const lease = await this.getOrCreateLease(); + + if (this.isLeaseHeldByUs(lease)) { + // We hold the lease, renew it + await this.renewLease(lease); + } else if (this.isLeaseExpired(lease)) { + // Lease is expired, try to acquire it + await this.acquireLease(lease); + } else { + // Someone else holds a valid lease + if (this._isLeader) { + this.loseLeadership(); + } + } + } catch (error: any) { + this.logger.error(`Leader election error: ${error.message}`); + if (this._isLeader) { + this.loseLeadership(); + } + } + } + + private async getOrCreateLease(): Promise { + if (!this.coordinationApi) { + throw new Error('Kubernetes API not initialized'); + } + + try { + const { body } = await this.coordinationApi.readNamespacedLease( + this.config.leaseName, + this.config.leaseNamespace, + ); + return body; + } catch (error: any) { + if (error.statusCode === 404) { + // Lease doesn't exist, create it + return this.createLease(); + } + throw error; + } + } + + private async createLease(): Promise { + if (!this.coordinationApi) { + throw new Error('Kubernetes API not initialized'); + } + + const now = new Date(); + const microTimeNow = formatMicroTime(now); + const lease: k8s.V1Lease = { + apiVersion: 'coordination.k8s.io/v1', + kind: 'Lease', + metadata: { + name: this.config.leaseName, + namespace: this.config.leaseNamespace, + labels: { + 'app.kubernetes.io/name': 'workflow-orchestrator', + 'app.kubernetes.io/component': 'scheduler', + }, + }, + spec: { + holderIdentity: this.identity, + leaseDurationSeconds: this.config.leaseDurationSeconds, + acquireTime: microTimeNow as unknown as Date, + renewTime: microTimeNow as unknown as Date, + leaseTransitions: 0, + }, + }; + + const { body } = await this.coordinationApi.createNamespacedLease( + this.config.leaseNamespace, + lease, + ); + + this.becomeLeader(); + this.logger.log('Created new lease and became leader'); + return body; + } + + private async acquireLease(existingLease: k8s.V1Lease): Promise { + if (!this.coordinationApi) return; + + const now = new Date(); + const microTimeNow = formatMicroTime(now); + const updatedLease: k8s.V1Lease = { + ...existingLease, + spec: { + ...existingLease.spec, + holderIdentity: this.identity, + acquireTime: microTimeNow as unknown as Date, + renewTime: microTimeNow as unknown as Date, + leaseTransitions: (existingLease.spec?.leaseTransitions || 0) + 1, + }, + }; + + try { + await this.coordinationApi.replaceNamespacedLease( + this.config.leaseName, + this.config.leaseNamespace, + updatedLease, + ); + this.becomeLeader(); + this.logger.log('Acquired lease and became leader'); + } catch (error: any) { + if (error.statusCode === 409) { + // Conflict - someone else got the lease first + this.logger.debug('Failed to acquire lease - conflict'); + } else { + throw error; + } + } + } + + private async renewLease(existingLease: k8s.V1Lease): Promise { + if (!this.coordinationApi) return; + + const now = new Date(); + const microTimeNow = formatMicroTime(now); + const updatedLease: k8s.V1Lease = { + ...existingLease, + spec: { + ...existingLease.spec, + renewTime: microTimeNow as unknown as Date, + }, + }; + + try { + await this.coordinationApi.replaceNamespacedLease( + this.config.leaseName, + this.config.leaseNamespace, + updatedLease, + ); + + if (!this._isLeader) { + this.becomeLeader(); + } + } catch (error: any) { + if (error.statusCode === 409) { + this.logger.warn('Lost lease during renewal - conflict'); + this.loseLeadership(); + } else { + throw error; + } + } + } + + private async releaseLease(): Promise { + if (!this.coordinationApi) return; + + try { + const { body: lease } = await this.coordinationApi.readNamespacedLease( + this.config.leaseName, + this.config.leaseNamespace, + ); + + if (this.isLeaseHeldByUs(lease)) { + // Set holder to empty to release immediately + const microTimeNow = formatMicroTime(new Date()); + const updatedLease: k8s.V1Lease = { + ...lease, + spec: { + ...lease.spec, + holderIdentity: '', + renewTime: microTimeNow as unknown as Date, + }, + }; + + await this.coordinationApi.replaceNamespacedLease( + this.config.leaseName, + this.config.leaseNamespace, + updatedLease, + ); + this.logger.log('Released lease successfully'); + } + } catch (error: any) { + this.logger.error(`Failed to release lease: ${error.message}`); + } + } + + private isLeaseHeldByUs(lease: k8s.V1Lease): boolean { + return lease.spec?.holderIdentity === this.identity; + } + + private isLeaseExpired(lease: k8s.V1Lease): boolean { + if (!lease.spec?.renewTime || !lease.spec?.leaseDurationSeconds) { + return true; + } + + const renewTime = new Date(lease.spec.renewTime as unknown as string).getTime(); + const expirationTime = renewTime + lease.spec.leaseDurationSeconds * 1000; + return Date.now() > expirationTime; + } + + private becomeLeader(): void { + if (!this._isLeader) { + this._isLeader = true; + this.logger.log('🏆 This instance is now the LEADER'); + this.eventEmitter.emit(LEADER_ELECTED_EVENT, { identity: this.identity }); + } + } + + private loseLeadership(): void { + if (this._isLeader) { + this._isLeader = false; + this.logger.warn('⚠️ This instance LOST leadership'); + this.eventEmitter.emit(LEADER_LOST_EVENT, { identity: this.identity }); + } + } + + /** + * Get leadership status for health checks and monitoring + */ + getLeadershipStatus(): { + enabled: boolean; + isLeader: boolean; + identity: string; + leaseName: string; + leaseNamespace: string; + } { + return { + enabled: this.config.enabled, + isLeader: this.isLeader, + identity: this.identity, + leaseName: this.config.leaseName, + leaseNamespace: this.config.leaseNamespace, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/llm-provider.service.ts b/packages/bytebot-workflow-orchestrator/src/services/llm-provider.service.ts new file mode 100644 index 000000000..d846e8340 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/llm-provider.service.ts @@ -0,0 +1,666 @@ +/** + * LLM Provider Service + * Phase 10 (v5.5.0): Enterprise Features - Custom LLM Provider Support + * + * Provides multi-provider LLM capabilities: + * - Support for Anthropic, OpenAI, Azure OpenAI, Google Vertex AI, AWS Bedrock + * - Per-tenant provider configuration + * - Automatic fallback between providers + * - Usage tracking and cost estimation + * - Rate limiting per provider + */ + +import { Injectable, Logger, NotFoundException, BadRequestException } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; + +// ============================================================================ +// Types and Interfaces +// ============================================================================ + +export enum LLMProvider { + ANTHROPIC = 'anthropic', + OPENAI = 'openai', + AZURE_OPENAI = 'azure_openai', + GOOGLE_VERTEX = 'google_vertex', + AWS_BEDROCK = 'aws_bedrock', +} + +export interface LLMRequest { + prompt: string; + systemPrompt?: string; + maxTokens?: number; + temperature?: number; + stopSequences?: string[]; +} + +export interface LLMResponse { + content: string; + tokensUsed: { + input: number; + output: number; + total: number; + }; + model: string; + provider: LLMProvider; + latencyMs: number; + cached?: boolean; +} + +export interface ProviderConfig { + provider: LLMProvider; + name: string; + apiKey?: string; + apiEndpoint?: string; + model?: string; + region?: string; + config?: Record; + isDefault?: boolean; + isFallback?: boolean; + priority?: number; + maxRequestsPerMinute?: number; + maxTokensPerRequest?: number; +} + +// Provider-specific model defaults +const PROVIDER_DEFAULTS: Record = { + [LLMProvider.ANTHROPIC]: { + model: 'claude-3-5-sonnet-20241022', + endpoint: 'https://api.anthropic.com/v1/messages', + }, + [LLMProvider.OPENAI]: { + model: 'gpt-4-turbo-preview', + endpoint: 'https://api.openai.com/v1/chat/completions', + }, + [LLMProvider.AZURE_OPENAI]: { + model: 'gpt-4', + endpoint: '', // Requires custom endpoint + }, + [LLMProvider.GOOGLE_VERTEX]: { + model: 'gemini-1.5-pro', + endpoint: 'https://us-central1-aiplatform.googleapis.com/v1', + }, + [LLMProvider.AWS_BEDROCK]: { + model: 'anthropic.claude-3-sonnet-20240229-v1:0', + endpoint: '', // Uses AWS SDK + }, +}; + +// Approximate cost per 1M tokens (input/output) +const PROVIDER_COSTS: Record = { + [LLMProvider.ANTHROPIC]: { input: 3.0, output: 15.0 }, + [LLMProvider.OPENAI]: { input: 10.0, output: 30.0 }, + [LLMProvider.AZURE_OPENAI]: { input: 10.0, output: 30.0 }, + [LLMProvider.GOOGLE_VERTEX]: { input: 1.25, output: 5.0 }, + [LLMProvider.AWS_BEDROCK]: { input: 3.0, output: 15.0 }, +}; + +@Injectable() +export class LLMProviderService { + private readonly logger = new Logger(LLMProviderService.name); + private readonly defaultProvider: LLMProvider; + private readonly defaultApiKey: string; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.defaultProvider = LLMProvider.ANTHROPIC; + this.defaultApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.logger.log('LLMProviderService initialized'); + } + + // ========================================================================== + // Provider Configuration + // ========================================================================== + + /** + * Add or update an LLM provider configuration for a tenant + */ + async configureProvider(tenantId: string, input: ProviderConfig): Promise { + // Validate provider + if (!Object.values(LLMProvider).includes(input.provider)) { + throw new BadRequestException(`Invalid provider: ${input.provider}`); + } + + // If setting as default, unset other defaults + if (input.isDefault) { + await this.prisma.lLMProviderConfig.updateMany({ + where: { tenantId, isDefault: true }, + data: { isDefault: false }, + }); + } + + const config = await this.prisma.lLMProviderConfig.upsert({ + where: { + tenantId_provider_name: { + tenantId, + provider: input.provider, + name: input.name, + }, + }, + create: { + tenantId, + provider: input.provider, + name: input.name, + apiKey: input.apiKey, + apiEndpoint: input.apiEndpoint || PROVIDER_DEFAULTS[input.provider].endpoint, + model: input.model || PROVIDER_DEFAULTS[input.provider].model, + region: input.region, + config: input.config || {}, + isDefault: input.isDefault ?? false, + isFallback: input.isFallback ?? false, + priority: input.priority ?? 0, + maxRequestsPerMinute: input.maxRequestsPerMinute, + maxTokensPerRequest: input.maxTokensPerRequest, + isEnabled: true, + }, + update: { + apiKey: input.apiKey, + apiEndpoint: input.apiEndpoint, + model: input.model, + region: input.region, + config: input.config, + isDefault: input.isDefault, + isFallback: input.isFallback, + priority: input.priority, + maxRequestsPerMinute: input.maxRequestsPerMinute, + maxTokensPerRequest: input.maxTokensPerRequest, + }, + }); + + this.logger.log(`Configured LLM provider ${input.provider} for tenant ${tenantId}`); + return this.maskSensitiveData(config); + } + + /** + * Get all provider configurations for a tenant + */ + async getProviders(tenantId: string): Promise { + const configs = await this.prisma.lLMProviderConfig.findMany({ + where: { tenantId }, + orderBy: [{ isDefault: 'desc' }, { priority: 'asc' }], + }); + + return configs.map(c => this.maskSensitiveData(c)); + } + + /** + * Get the default provider for a tenant + */ + async getDefaultProvider(tenantId: string): Promise { + const config = await this.prisma.lLMProviderConfig.findFirst({ + where: { tenantId, isDefault: true, isEnabled: true }, + }); + + return config ? this.maskSensitiveData(config) : null; + } + + /** + * Delete a provider configuration + */ + async deleteProvider(tenantId: string, providerId: string): Promise { + const existing = await this.prisma.lLMProviderConfig.findFirst({ + where: { id: providerId, tenantId }, + }); + + if (!existing) { + throw new NotFoundException(`Provider configuration ${providerId} not found`); + } + + await this.prisma.lLMProviderConfig.delete({ + where: { id: providerId }, + }); + + this.logger.log(`Deleted LLM provider ${providerId} for tenant ${tenantId}`); + } + + /** + * Enable/disable a provider + */ + async setProviderEnabled(tenantId: string, providerId: string, enabled: boolean): Promise { + const config = await this.prisma.lLMProviderConfig.updateMany({ + where: { id: providerId, tenantId }, + data: { isEnabled: enabled }, + }); + + if (config.count === 0) { + throw new NotFoundException(`Provider configuration ${providerId} not found`); + } + + return { success: true, enabled }; + } + + // ========================================================================== + // LLM Invocation + // ========================================================================== + + /** + * Call LLM with automatic provider selection and fallback + */ + async call(tenantId: string, request: LLMRequest): Promise { + const startTime = Date.now(); + + // Get provider chain (default + fallbacks) + const providers = await this.getProviderChain(tenantId); + + if (providers.length === 0) { + // Use system default + return this.callProvider(this.defaultProvider, { + apiKey: this.defaultApiKey, + model: PROVIDER_DEFAULTS[this.defaultProvider].model, + endpoint: PROVIDER_DEFAULTS[this.defaultProvider].endpoint, + }, request, startTime); + } + + // Try each provider in order + let lastError: Error | null = null; + + for (const provider of providers) { + try { + const response = await this.callProvider( + provider.provider as LLMProvider, + { + apiKey: provider.apiKey!, + model: provider.model || PROVIDER_DEFAULTS[provider.provider as LLMProvider].model, + endpoint: provider.apiEndpoint || PROVIDER_DEFAULTS[provider.provider as LLMProvider].endpoint, + config: provider.config as Record, + }, + request, + startTime, + ); + + // Update usage stats + await this.updateUsageStats(provider.id, response.tokensUsed.total); + + return response; + } catch (error: any) { + this.logger.warn(`Provider ${provider.provider} failed: ${error.message}`); + lastError = error; + + // Emit event for monitoring + this.eventEmitter.emit('llm.provider.failed', { + tenantId, + provider: provider.provider, + error: error.message, + }); + + // Continue to next provider + } + } + + // All providers failed + throw lastError || new Error('All LLM providers failed'); + } + + /** + * Call a specific provider + */ + private async callProvider( + provider: LLMProvider, + config: { apiKey: string; model: string; endpoint: string; config?: Record }, + request: LLMRequest, + startTime: number, + ): Promise { + switch (provider) { + case LLMProvider.ANTHROPIC: + return this.callAnthropic(config, request, startTime); + case LLMProvider.OPENAI: + return this.callOpenAI(config, request, startTime); + case LLMProvider.AZURE_OPENAI: + return this.callAzureOpenAI(config, request, startTime); + case LLMProvider.GOOGLE_VERTEX: + return this.callGoogleVertex(config, request, startTime); + case LLMProvider.AWS_BEDROCK: + return this.callAWSBedrock(config, request, startTime); + default: + throw new Error(`Unsupported provider: ${provider}`); + } + } + + /** + * Call Anthropic API + */ + private async callAnthropic( + config: { apiKey: string; model: string; endpoint: string }, + request: LLMRequest, + startTime: number, + ): Promise { + const response = await fetch(config.endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': config.apiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: config.model, + max_tokens: request.maxTokens || 4096, + temperature: request.temperature ?? 0.7, + system: request.systemPrompt, + messages: [{ role: 'user', content: request.prompt }], + stop_sequences: request.stopSequences, + }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Anthropic API error: ${error}`); + } + + const data = await response.json(); + + return { + content: data.content[0]?.text || '', + tokensUsed: { + input: data.usage?.input_tokens || 0, + output: data.usage?.output_tokens || 0, + total: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0), + }, + model: config.model, + provider: LLMProvider.ANTHROPIC, + latencyMs: Date.now() - startTime, + }; + } + + /** + * Call OpenAI API + */ + private async callOpenAI( + config: { apiKey: string; model: string; endpoint: string }, + request: LLMRequest, + startTime: number, + ): Promise { + const messages: any[] = []; + + if (request.systemPrompt) { + messages.push({ role: 'system', content: request.systemPrompt }); + } + messages.push({ role: 'user', content: request.prompt }); + + const response = await fetch(config.endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${config.apiKey}`, + }, + body: JSON.stringify({ + model: config.model, + messages, + max_tokens: request.maxTokens || 4096, + temperature: request.temperature ?? 0.7, + stop: request.stopSequences, + }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`OpenAI API error: ${error}`); + } + + const data = await response.json(); + + return { + content: data.choices[0]?.message?.content || '', + tokensUsed: { + input: data.usage?.prompt_tokens || 0, + output: data.usage?.completion_tokens || 0, + total: data.usage?.total_tokens || 0, + }, + model: config.model, + provider: LLMProvider.OPENAI, + latencyMs: Date.now() - startTime, + }; + } + + /** + * Call Azure OpenAI API + */ + private async callAzureOpenAI( + config: { apiKey: string; model: string; endpoint: string; config?: Record }, + request: LLMRequest, + startTime: number, + ): Promise { + const deploymentName = config.config?.deploymentName || config.model; + const apiVersion = config.config?.apiVersion || '2024-02-01'; + + const url = `${config.endpoint}/openai/deployments/${deploymentName}/chat/completions?api-version=${apiVersion}`; + + const messages: any[] = []; + if (request.systemPrompt) { + messages.push({ role: 'system', content: request.systemPrompt }); + } + messages.push({ role: 'user', content: request.prompt }); + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'api-key': config.apiKey, + }, + body: JSON.stringify({ + messages, + max_tokens: request.maxTokens || 4096, + temperature: request.temperature ?? 0.7, + stop: request.stopSequences, + }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Azure OpenAI API error: ${error}`); + } + + const data = await response.json(); + + return { + content: data.choices[0]?.message?.content || '', + tokensUsed: { + input: data.usage?.prompt_tokens || 0, + output: data.usage?.completion_tokens || 0, + total: data.usage?.total_tokens || 0, + }, + model: deploymentName, + provider: LLMProvider.AZURE_OPENAI, + latencyMs: Date.now() - startTime, + }; + } + + /** + * Call Google Vertex AI (stub - requires Google Cloud SDK) + */ + private async callGoogleVertex( + config: { apiKey: string; model: string; endpoint: string; config?: Record }, + request: LLMRequest, + startTime: number, + ): Promise { + // Note: Full implementation would use @google-cloud/aiplatform SDK + throw new Error('Google Vertex AI requires Google Cloud SDK integration'); + } + + /** + * Call AWS Bedrock (stub - requires AWS SDK) + */ + private async callAWSBedrock( + config: { apiKey: string; model: string; endpoint: string; config?: Record }, + request: LLMRequest, + startTime: number, + ): Promise { + // Note: Full implementation would use @aws-sdk/client-bedrock-runtime + throw new Error('AWS Bedrock requires AWS SDK integration'); + } + + // ========================================================================== + // Usage and Cost Tracking + // ========================================================================== + + /** + * Get usage statistics for a tenant + */ + async getUsageStats( + tenantId: string, + options: { startDate?: Date; endDate?: Date } = {}, + ): Promise<{ + byProvider: Record; + total: { tokens: number; requests: number; estimatedCost: number }; + }> { + const providers = await this.prisma.lLMProviderConfig.findMany({ + where: { tenantId }, + }); + + const byProvider: Record = {}; + let totalTokens = 0; + let totalRequests = 0; + let totalCost = 0; + + for (const provider of providers) { + const tokens = Number(provider.totalTokensUsed); + const requests = provider.totalRequestsCount; + const costs = PROVIDER_COSTS[provider.provider as LLMProvider] || { input: 0, output: 0 }; + const estimatedCost = (tokens / 1000000) * ((costs.input + costs.output) / 2); + + byProvider[provider.provider] = { + tokens, + requests, + estimatedCost: Math.round(estimatedCost * 100) / 100, + }; + + totalTokens += tokens; + totalRequests += requests; + totalCost += estimatedCost; + } + + return { + byProvider, + total: { + tokens: totalTokens, + requests: totalRequests, + estimatedCost: Math.round(totalCost * 100) / 100, + }, + }; + } + + /** + * Estimate cost for a request + */ + estimateCost(provider: LLMProvider, inputTokens: number, outputTokens: number): number { + const costs = PROVIDER_COSTS[provider]; + const inputCost = (inputTokens / 1000000) * costs.input; + const outputCost = (outputTokens / 1000000) * costs.output; + return Math.round((inputCost + outputCost) * 10000) / 10000; + } + + // ========================================================================== + // Helper Methods + // ========================================================================== + + /** + * Get provider chain for fallback + */ + private async getProviderChain(tenantId: string): Promise { + return this.prisma.lLMProviderConfig.findMany({ + where: { + tenantId, + isEnabled: true, + OR: [{ isDefault: true }, { isFallback: true }], + }, + orderBy: [{ isDefault: 'desc' }, { priority: 'asc' }], + }); + } + + /** + * Update usage statistics + */ + private async updateUsageStats(providerId: string, tokensUsed: number): Promise { + await this.prisma.lLMProviderConfig.update({ + where: { id: providerId }, + data: { + totalTokensUsed: { increment: BigInt(tokensUsed) }, + totalRequestsCount: { increment: 1 }, + lastUsedAt: new Date(), + }, + }); + } + + /** + * Mask sensitive data in config + */ + private maskSensitiveData(config: any): any { + return { + ...config, + apiKey: config.apiKey ? '***' + config.apiKey.slice(-4) : null, + }; + } + + /** + * Get available providers + */ + getAvailableProviders(): Array<{ + provider: LLMProvider; + name: string; + defaultModel: string; + costPer1MTokens: { input: number; output: number }; + }> { + return Object.entries(PROVIDER_DEFAULTS).map(([provider, defaults]) => ({ + provider: provider as LLMProvider, + name: this.getProviderDisplayName(provider as LLMProvider), + defaultModel: defaults.model, + costPer1MTokens: PROVIDER_COSTS[provider as LLMProvider], + })); + } + + /** + * Get display name for provider + */ + private getProviderDisplayName(provider: LLMProvider): string { + const names: Record = { + [LLMProvider.ANTHROPIC]: 'Anthropic (Claude)', + [LLMProvider.OPENAI]: 'OpenAI (GPT)', + [LLMProvider.AZURE_OPENAI]: 'Azure OpenAI', + [LLMProvider.GOOGLE_VERTEX]: 'Google Vertex AI (Gemini)', + [LLMProvider.AWS_BEDROCK]: 'AWS Bedrock', + }; + return names[provider] || provider; + } + + /** + * Test a provider configuration + */ + async testProvider(tenantId: string, providerId: string): Promise<{ success: boolean; latencyMs: number; error?: string }> { + const config = await this.prisma.lLMProviderConfig.findFirst({ + where: { id: providerId, tenantId }, + }); + + if (!config) { + throw new NotFoundException(`Provider ${providerId} not found`); + } + + try { + const startTime = Date.now(); + const response = await this.callProvider( + config.provider as LLMProvider, + { + apiKey: config.apiKey!, + model: config.model || PROVIDER_DEFAULTS[config.provider as LLMProvider].model, + endpoint: config.apiEndpoint || PROVIDER_DEFAULTS[config.provider as LLMProvider].endpoint, + config: config.config as Record, + }, + { prompt: 'Hello, respond with "OK" only.' }, + startTime, + ); + + return { + success: true, + latencyMs: response.latencyMs, + }; + } catch (error: any) { + return { + success: false, + latencyMs: 0, + error: error.message, + }; + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/maintenance-mode.service.ts b/packages/bytebot-workflow-orchestrator/src/services/maintenance-mode.service.ts new file mode 100644 index 000000000..88e5f105a --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/maintenance-mode.service.ts @@ -0,0 +1,477 @@ +/** + * Maintenance Mode Service + * v1.0.0: Phase E - Maintenance mode handling + * + * Provides graceful maintenance mode handling for the orchestrator: + * - State machine: RUNNING -> ENTERING_MAINTENANCE -> MAINTENANCE -> EXITING_MAINTENANCE -> RUNNING + * - Graceful drain: Stops new work, allows in-progress work to complete + * - Automatic pause: Pauses all running goal runs during maintenance + * - Notification: Emits activity events for visibility + * + * Key behaviors: + * - In ENTERING_MAINTENANCE: Reject new goal runs, allow in-progress to continue + * - In MAINTENANCE: All goal runs paused, no new work accepted + * - In EXITING_MAINTENANCE: Resume paused goal runs, accept new work + * + * @see Phase E documentation + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import { GoalRunPhase, GoalRunStatus } from './goal-run.service'; +import { GoalRunWaitReason, Prisma } from '@prisma/client'; + +// Maintenance mode states +export enum MaintenanceState { + RUNNING = 'RUNNING', + ENTERING_MAINTENANCE = 'ENTERING_MAINTENANCE', + MAINTENANCE = 'MAINTENANCE', + EXITING_MAINTENANCE = 'EXITING_MAINTENANCE', +} + +// Configuration defaults +const DEFAULT_DRAIN_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes to drain +const DEFAULT_DRAIN_CHECK_INTERVAL_MS = 5 * 1000; // Check every 5 seconds + +export interface MaintenanceStatus { + state: MaintenanceState; + inMaintenance: boolean; + acceptingNewWork: boolean; + reason?: string; + initiatedBy?: string; + startedAt?: Date; + expectedEndAt?: Date; + drainProgress?: { + activeGoalRuns: number; + pausedGoalRuns: number; + drainTimeoutMs: number; + drainElapsedMs: number; + }; +} + +export interface EnterMaintenanceOptions { + reason?: string; + initiatedBy?: string; + expectedDurationMs?: number; + drainTimeoutMs?: number; + force?: boolean; // If true, immediately pause all without waiting for drain +} + +@Injectable() +export class MaintenanceModeService implements OnModuleInit { + private readonly logger = new Logger(MaintenanceModeService.name); + + // Current state + private state: MaintenanceState = MaintenanceState.RUNNING; + private reason?: string; + private initiatedBy?: string; + private startedAt?: Date; + private expectedEndAt?: Date; + private drainTimeoutMs: number = DEFAULT_DRAIN_TIMEOUT_MS; + private drainStartedAt?: Date; + + // Drain timer + private drainCheckInterval?: NodeJS.Timeout; + + // Track goal runs paused by maintenance (to resume them on exit) + private pausedByMaintenance: Set = new Set(); + + constructor( + private prisma: PrismaService, + private eventEmitter: EventEmitter2, + private configService: ConfigService, + ) {} + + async onModuleInit() { + // Check if there's a persisted maintenance state on startup + await this.loadPersistedState(); + } + + /** + * Load persisted maintenance state from database (if any) + */ + private async loadPersistedState(): Promise { + try { + const config = await this.prisma.systemConfig.findUnique({ + where: { key: 'MAINTENANCE_MODE' }, + }); + + if (config?.value) { + const persisted = JSON.parse(config.value); + if (persisted.state && persisted.state !== MaintenanceState.RUNNING) { + this.logger.warn( + `Found persisted maintenance state: ${persisted.state}. ` + + `Restoring maintenance mode.` + ); + this.state = persisted.state; + this.reason = persisted.reason; + this.initiatedBy = persisted.initiatedBy; + this.startedAt = persisted.startedAt ? new Date(persisted.startedAt) : undefined; + this.expectedEndAt = persisted.expectedEndAt ? new Date(persisted.expectedEndAt) : undefined; + } + } + } catch (error: any) { + // SystemConfig table might not exist yet + this.logger.debug(`Could not load maintenance state: ${error.message}`); + } + } + + /** + * Persist current maintenance state to database + */ + private async persistState(): Promise { + try { + const value = JSON.stringify({ + state: this.state, + reason: this.reason, + initiatedBy: this.initiatedBy, + startedAt: this.startedAt?.toISOString(), + expectedEndAt: this.expectedEndAt?.toISOString(), + }); + + await this.prisma.systemConfig.upsert({ + where: { key: 'MAINTENANCE_MODE' }, + create: { key: 'MAINTENANCE_MODE', value }, + update: { value }, + }); + } catch (error: any) { + // Log but don't fail - in-memory state is authoritative + this.logger.warn(`Could not persist maintenance state: ${error.message}`); + } + } + + /** + * Get current maintenance status + */ + getStatus(): MaintenanceStatus { + const drainElapsedMs = this.drainStartedAt + ? Date.now() - this.drainStartedAt.getTime() + : 0; + + return { + state: this.state, + inMaintenance: this.state === MaintenanceState.MAINTENANCE, + acceptingNewWork: this.state === MaintenanceState.RUNNING || + this.state === MaintenanceState.EXITING_MAINTENANCE, + reason: this.reason, + initiatedBy: this.initiatedBy, + startedAt: this.startedAt, + expectedEndAt: this.expectedEndAt, + drainProgress: this.state === MaintenanceState.ENTERING_MAINTENANCE ? { + activeGoalRuns: 0, // Will be populated when queried + pausedGoalRuns: this.pausedByMaintenance.size, + drainTimeoutMs: this.drainTimeoutMs, + drainElapsedMs, + } : undefined, + }; + } + + /** + * Check if new work should be accepted + */ + shouldAcceptNewWork(): boolean { + return this.state === MaintenanceState.RUNNING || + this.state === MaintenanceState.EXITING_MAINTENANCE; + } + + /** + * Check if orchestrator should skip execution (maintenance mode) + */ + shouldSkipExecution(): boolean { + return this.state === MaintenanceState.MAINTENANCE; + } + + /** + * Check if in draining state + */ + isDraining(): boolean { + return this.state === MaintenanceState.ENTERING_MAINTENANCE; + } + + /** + * Enter maintenance mode + * Begins graceful drain process + */ + async enterMaintenance(options: EnterMaintenanceOptions = {}): Promise { + if (this.state !== MaintenanceState.RUNNING) { + this.logger.warn(`Cannot enter maintenance: already in state ${this.state}`); + return this.getStatus(); + } + + this.logger.log( + `Entering maintenance mode: ${options.reason || 'No reason provided'} ` + + `(initiated by: ${options.initiatedBy || 'system'})` + ); + + // Update state + this.state = MaintenanceState.ENTERING_MAINTENANCE; + this.reason = options.reason; + this.initiatedBy = options.initiatedBy; + this.startedAt = new Date(); + this.drainTimeoutMs = options.drainTimeoutMs || DEFAULT_DRAIN_TIMEOUT_MS; + this.drainStartedAt = new Date(); + + if (options.expectedDurationMs) { + this.expectedEndAt = new Date(Date.now() + options.expectedDurationMs); + } + + // Persist state + await this.persistState(); + + // Emit event for visibility + this.eventEmitter.emit('maintenance.entering', { + reason: this.reason, + initiatedBy: this.initiatedBy, + expectedEndAt: this.expectedEndAt, + }); + + // If force mode, immediately pause everything + if (options.force) { + this.logger.warn('Force maintenance mode: immediately pausing all goal runs'); + await this.pauseAllRunningGoalRuns(); + await this.transitionToMaintenance(); + return this.getStatus(); + } + + // Start drain check interval + this.startDrainCheck(); + + return this.getStatus(); + } + + /** + * Start periodic check for drain completion + */ + private startDrainCheck(): void { + if (this.drainCheckInterval) { + clearInterval(this.drainCheckInterval); + } + + this.drainCheckInterval = setInterval(async () => { + await this.checkDrainProgress(); + }, DEFAULT_DRAIN_CHECK_INTERVAL_MS); + } + + /** + * Check drain progress and transition when complete + */ + private async checkDrainProgress(): Promise { + if (this.state !== MaintenanceState.ENTERING_MAINTENANCE) { + this.stopDrainCheck(); + return; + } + + const drainElapsedMs = this.drainStartedAt + ? Date.now() - this.drainStartedAt.getTime() + : 0; + + // Get count of active goal runs + const activeGoalRuns = await this.prisma.goalRun.count({ + where: { + status: GoalRunStatus.RUNNING, + phase: { + notIn: [GoalRunPhase.COMPLETED, GoalRunPhase.FAILED, GoalRunPhase.PAUSED], + }, + }, + }); + + this.logger.debug( + `Drain progress: ${activeGoalRuns} active goal runs, ` + + `elapsed ${Math.round(drainElapsedMs / 1000)}s / ${Math.round(this.drainTimeoutMs / 1000)}s` + ); + + // If all drained or timeout, transition to maintenance + if (activeGoalRuns === 0) { + this.logger.log('All goal runs drained, transitioning to maintenance'); + await this.transitionToMaintenance(); + } else if (drainElapsedMs >= this.drainTimeoutMs) { + this.logger.warn( + `Drain timeout reached with ${activeGoalRuns} active goal runs. ` + + `Forcing pause and transitioning to maintenance.` + ); + await this.pauseAllRunningGoalRuns(); + await this.transitionToMaintenance(); + } + } + + /** + * Stop drain check interval + */ + private stopDrainCheck(): void { + if (this.drainCheckInterval) { + clearInterval(this.drainCheckInterval); + this.drainCheckInterval = undefined; + } + } + + /** + * Transition to maintenance state + */ + private async transitionToMaintenance(): Promise { + this.stopDrainCheck(); + this.state = MaintenanceState.MAINTENANCE; + this.drainStartedAt = undefined; + + await this.persistState(); + + this.logger.log('Entered maintenance mode'); + + // Emit event + this.eventEmitter.emit('maintenance.entered', { + reason: this.reason, + initiatedBy: this.initiatedBy, + pausedGoalRuns: this.pausedByMaintenance.size, + }); + } + + /** + * Pause all running goal runs for maintenance + */ + private async pauseAllRunningGoalRuns(): Promise { + const runningGoalRuns = await this.prisma.goalRun.findMany({ + where: { + status: GoalRunStatus.RUNNING, + phase: { + notIn: [GoalRunPhase.COMPLETED, GoalRunPhase.FAILED, GoalRunPhase.PAUSED], + }, + }, + select: { id: true }, + }); + + let pausedCount = 0; + for (const goalRun of runningGoalRuns) { + try { + await this.prisma.goalRun.update({ + where: { id: goalRun.id }, + data: { + phase: GoalRunPhase.PAUSED, + waitReason: GoalRunWaitReason.POLICY, + waitStartedAt: new Date(), + waitUntil: null, + waitDetail: { + kind: 'MAINTENANCE', + reason: this.reason || null, + initiatedBy: this.initiatedBy || null, + } as any, + }, + }); + this.pausedByMaintenance.add(goalRun.id); + pausedCount++; + + // Create activity event for this goal run + await this.prisma.activityEvent.create({ + data: { + goalRunId: goalRun.id, + eventType: 'MAINTENANCE_PAUSED', + title: 'Paused for system maintenance', + description: this.reason || 'System entering maintenance mode', + severity: 'warning', + }, + }); + } catch (error: any) { + this.logger.error(`Failed to pause goal run ${goalRun.id}: ${error.message}`); + } + } + + this.logger.log(`Paused ${pausedCount} goal runs for maintenance`); + return pausedCount; + } + + /** + * Exit maintenance mode + * Resumes goal runs that were paused by maintenance + */ + async exitMaintenance(): Promise { + if (this.state !== MaintenanceState.MAINTENANCE && + this.state !== MaintenanceState.ENTERING_MAINTENANCE) { + this.logger.warn(`Cannot exit maintenance: in state ${this.state}`); + return this.getStatus(); + } + + this.logger.log('Exiting maintenance mode'); + + this.stopDrainCheck(); + this.state = MaintenanceState.EXITING_MAINTENANCE; + + await this.persistState(); + + // Emit event + this.eventEmitter.emit('maintenance.exiting', { + pausedGoalRunsToResume: this.pausedByMaintenance.size, + }); + + // Resume goal runs that were paused by maintenance + let resumedCount = 0; + for (const goalRunId of this.pausedByMaintenance) { + try { + // Check if still exists and is paused + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { planVersions: true }, + }); + + if (goalRun && goalRun.phase === GoalRunPhase.PAUSED) { + // Determine resume phase (executing if plan exists, planning if not) + const resumePhase = goalRun.planVersions.length > 0 + ? GoalRunPhase.EXECUTING + : GoalRunPhase.PLANNING; + + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { + phase: resumePhase, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + + // Create activity event + await this.prisma.activityEvent.create({ + data: { + goalRunId, + eventType: 'MAINTENANCE_RESUMED', + title: 'Resumed after maintenance', + description: 'System maintenance completed, resuming execution', + }, + }); + + resumedCount++; + } + } catch (error: any) { + this.logger.error(`Failed to resume goal run ${goalRunId}: ${error.message}`); + } + } + + this.pausedByMaintenance.clear(); + this.logger.log(`Resumed ${resumedCount} goal runs after maintenance`); + + // Transition to running + this.state = MaintenanceState.RUNNING; + this.reason = undefined; + this.initiatedBy = undefined; + this.startedAt = undefined; + this.expectedEndAt = undefined; + + await this.persistState(); + + // Emit event + this.eventEmitter.emit('maintenance.exited', { + resumedGoalRuns: resumedCount, + }); + + this.logger.log('Exited maintenance mode'); + + return this.getStatus(); + } + + /** + * Handle module destroy - clean up intervals + */ + onModuleDestroy() { + this.stopDrainCheck(); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/metrics-aggregation.service.ts b/packages/bytebot-workflow-orchestrator/src/services/metrics-aggregation.service.ts new file mode 100644 index 000000000..ebed8af43 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/metrics-aggregation.service.ts @@ -0,0 +1,538 @@ +/** + * Metrics Aggregation Service + * v1.0.0: Phase 8 Advanced Analytics Dashboard + * + * Pre-aggregates raw metrics into time-bucketed snapshots for efficient + * dashboard queries. Runs on configurable intervals to create summaries + * at different granularities (1m, 5m, 1h, 1d). + * + * Key responsibilities: + * - Aggregate workflow execution metrics by time bucket + * - Calculate percentiles (p50, p95, p99) + * - Maintain pre-computed aggregates for fast dashboard queries + * - Clean up old raw metrics (configurable retention) + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { PrismaService } from './prisma.service'; + +// Aggregation period +export type AggregationPeriod = '1m' | '5m' | '15m' | '1h' | '1d'; + +// Aggregation result +export interface AggregationResult { + period: AggregationPeriod; + bucketStart: Date; + bucketEnd: Date; + metricsCreated: number; +} + +@Injectable() +export class MetricsAggregationService implements OnModuleInit { + private readonly logger = new Logger(MetricsAggregationService.name); + private isProcessing = false; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + ) {} + + onModuleInit() { + this.logger.log('Metrics Aggregation Service initialized'); + } + + /** + * Aggregate metrics for a specific time period + */ + async aggregateMetrics( + period: AggregationPeriod, + bucketStart: Date, + bucketEnd: Date, + ): Promise { + const result: AggregationResult = { + period, + bucketStart, + bucketEnd, + metricsCreated: 0, + }; + + try { + // Get distinct tenants with metrics in this period + const tenants = await this.prisma.workflowExecutionMetric.groupBy({ + by: ['tenantId'], + where: { + timestamp: { + gte: bucketStart, + lt: bucketEnd, + }, + }, + }); + + for (const tenant of tenants) { + // Aggregate workflow execution metrics + const workflowMetrics = await this.aggregateWorkflowMetrics( + tenant.tenantId, + period, + bucketStart, + bucketEnd, + ); + result.metricsCreated += workflowMetrics; + + // Aggregate step metrics + const stepMetrics = await this.aggregateStepMetrics( + tenant.tenantId, + period, + bucketStart, + bucketEnd, + ); + result.metricsCreated += stepMetrics; + } + + return result; + } catch (error) { + this.logger.error( + `Failed to aggregate metrics for ${period}: ${error.message}`, + error.stack, + ); + throw error; + } + } + + /** + * Aggregate workflow execution metrics for a tenant + */ + private async aggregateWorkflowMetrics( + tenantId: string, + period: AggregationPeriod, + bucketStart: Date, + bucketEnd: Date, + ): Promise { + // Get all workflow metrics for this period + const metrics = await this.prisma.workflowExecutionMetric.findMany({ + where: { + tenantId, + timestamp: { gte: bucketStart, lt: bucketEnd }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + select: { + durationMs: true, + status: true, + }, + }); + + if (metrics.length === 0) return 0; + + // Calculate aggregates + const durations = metrics + .filter((m) => m.durationMs !== null) + .map((m) => m.durationMs!); + + const successCount = metrics.filter((m) => m.status === 'COMPLETED').length; + const failureCount = metrics.filter((m) => m.status === 'FAILED').length; + + // Calculate percentiles + const sortedDurations = [...durations].sort((a, b) => a - b); + const p50 = this.getPercentile(sortedDurations, 50); + const p95 = this.getPercentile(sortedDurations, 95); + const p99 = this.getPercentile(sortedDurations, 99); + + const sum = durations.reduce((a, b) => a + b, 0); + const avg = durations.length > 0 ? sum / durations.length : 0; + const min = durations.length > 0 ? Math.min(...durations) : 0; + const max = durations.length > 0 ? Math.max(...durations) : 0; + + // Upsert metrics snapshot + await this.prisma.metricsSnapshot.upsert({ + where: { + tenantId_metricName_period_bucketStart_workflowId_agentId: { + tenantId, + metricName: 'workflow_execution_duration', + period, + bucketStart, + workflowId: '_all', + agentId: '_all', + }, + }, + create: { + tenantId, + metricName: 'workflow_execution_duration', + period, + bucketStart, + bucketEnd, + workflowId: '_all', + agentId: '_all', + count: metrics.length, + sum, + min, + max, + avg, + percentile50: p50, + percentile95: p95, + percentile99: p99, + successCount, + failureCount, + }, + update: { + count: metrics.length, + sum, + min, + max, + avg, + percentile50: p50, + percentile95: p95, + percentile99: p99, + successCount, + failureCount, + }, + }); + + // Also create success rate metric + const successRate = + metrics.length > 0 ? (successCount / metrics.length) * 100 : 0; + + await this.prisma.metricsSnapshot.upsert({ + where: { + tenantId_metricName_period_bucketStart_workflowId_agentId: { + tenantId, + metricName: 'workflow_success_rate', + period, + bucketStart, + workflowId: '_all', + agentId: '_all', + }, + }, + create: { + tenantId, + metricName: 'workflow_success_rate', + period, + bucketStart, + bucketEnd, + workflowId: '_all', + agentId: '_all', + count: metrics.length, + sum: successRate, + min: successRate, + max: successRate, + avg: successRate, + percentile50: successRate, + percentile95: successRate, + percentile99: successRate, + successCount, + failureCount, + }, + update: { + count: metrics.length, + avg: successRate, + successCount, + failureCount, + }, + }); + + return 2; // Created/updated 2 metrics + } + + /** + * Aggregate step metrics for a tenant + */ + private async aggregateStepMetrics( + tenantId: string, + period: AggregationPeriod, + bucketStart: Date, + bucketEnd: Date, + ): Promise { + // Get all step metrics for this period + const metrics = await this.prisma.workflowStepMetric.findMany({ + where: { + tenantId, + timestamp: { gte: bucketStart, lt: bucketEnd }, + status: { in: ['COMPLETED', 'FAILED'] }, + }, + select: { + durationMs: true, + status: true, + }, + }); + + if (metrics.length === 0) return 0; + + // Calculate aggregates + const durations = metrics + .filter((m) => m.durationMs !== null) + .map((m) => m.durationMs!); + + const successCount = metrics.filter((m) => m.status === 'COMPLETED').length; + const failureCount = metrics.filter((m) => m.status === 'FAILED').length; + + const sortedDurations = [...durations].sort((a, b) => a - b); + const p50 = this.getPercentile(sortedDurations, 50); + const p95 = this.getPercentile(sortedDurations, 95); + const p99 = this.getPercentile(sortedDurations, 99); + + const sum = durations.reduce((a, b) => a + b, 0); + const avg = durations.length > 0 ? sum / durations.length : 0; + const min = durations.length > 0 ? Math.min(...durations) : 0; + const max = durations.length > 0 ? Math.max(...durations) : 0; + + await this.prisma.metricsSnapshot.upsert({ + where: { + tenantId_metricName_period_bucketStart_workflowId_agentId: { + tenantId, + metricName: 'step_execution_duration', + period, + bucketStart, + workflowId: '_all', + agentId: '_all', + }, + }, + create: { + tenantId, + metricName: 'step_execution_duration', + period, + bucketStart, + bucketEnd, + workflowId: '_all', + agentId: '_all', + count: metrics.length, + sum, + min, + max, + avg, + percentile50: p50, + percentile95: p95, + percentile99: p99, + successCount, + failureCount, + }, + update: { + count: metrics.length, + sum, + min, + max, + avg, + percentile50: p50, + percentile95: p95, + percentile99: p99, + successCount, + failureCount, + }, + }); + + return 1; + } + + /** + * Calculate percentile from sorted array + */ + private getPercentile(sortedArray: number[], percentile: number): number { + if (sortedArray.length === 0) return 0; + const index = Math.ceil((percentile / 100) * sortedArray.length) - 1; + return sortedArray[Math.max(0, Math.min(index, sortedArray.length - 1))]; + } + + /** + * Get bucket start time for a given timestamp and period + */ + private getBucketStart(timestamp: Date, period: AggregationPeriod): Date { + const date = new Date(timestamp); + + switch (period) { + case '1m': + date.setSeconds(0, 0); + break; + case '5m': + date.setMinutes(Math.floor(date.getMinutes() / 5) * 5, 0, 0); + break; + case '15m': + date.setMinutes(Math.floor(date.getMinutes() / 15) * 15, 0, 0); + break; + case '1h': + date.setMinutes(0, 0, 0); + break; + case '1d': + date.setHours(0, 0, 0, 0); + break; + } + + return date; + } + + /** + * Get bucket end time for a given bucket start and period + */ + private getBucketEnd(bucketStart: Date, period: AggregationPeriod): Date { + const end = new Date(bucketStart); + + switch (period) { + case '1m': + end.setMinutes(end.getMinutes() + 1); + break; + case '5m': + end.setMinutes(end.getMinutes() + 5); + break; + case '15m': + end.setMinutes(end.getMinutes() + 15); + break; + case '1h': + end.setHours(end.getHours() + 1); + break; + case '1d': + end.setDate(end.getDate() + 1); + break; + } + + return end; + } + + // ========================================================================= + // Scheduled Aggregation Jobs + // ========================================================================= + + /** + * Aggregate 1-minute metrics (runs every minute) + */ + @Cron(CronExpression.EVERY_MINUTE) + async aggregate1mMetrics(): Promise { + if (this.isProcessing) return; + this.isProcessing = true; + + try { + const now = new Date(); + const bucketStart = this.getBucketStart( + new Date(now.getTime() - 60000), + '1m', + ); + const bucketEnd = this.getBucketEnd(bucketStart, '1m'); + + const result = await this.aggregateMetrics('1m', bucketStart, bucketEnd); + + if (result.metricsCreated > 0) { + this.logger.debug( + `Aggregated ${result.metricsCreated} 1m metrics for ${bucketStart.toISOString()}`, + ); + } + } catch (error) { + this.logger.error(`1m aggregation failed: ${error.message}`); + } finally { + this.isProcessing = false; + } + } + + /** + * Aggregate 5-minute metrics (runs every 5 minutes) + */ + @Cron(CronExpression.EVERY_5_MINUTES) + async aggregate5mMetrics(): Promise { + try { + const now = new Date(); + const bucketStart = this.getBucketStart( + new Date(now.getTime() - 5 * 60000), + '5m', + ); + const bucketEnd = this.getBucketEnd(bucketStart, '5m'); + + const result = await this.aggregateMetrics('5m', bucketStart, bucketEnd); + + if (result.metricsCreated > 0) { + this.logger.debug( + `Aggregated ${result.metricsCreated} 5m metrics for ${bucketStart.toISOString()}`, + ); + } + } catch (error) { + this.logger.error(`5m aggregation failed: ${error.message}`); + } + } + + /** + * Aggregate hourly metrics (runs every hour) + */ + @Cron(CronExpression.EVERY_HOUR) + async aggregate1hMetrics(): Promise { + try { + const now = new Date(); + const bucketStart = this.getBucketStart( + new Date(now.getTime() - 60 * 60000), + '1h', + ); + const bucketEnd = this.getBucketEnd(bucketStart, '1h'); + + const result = await this.aggregateMetrics('1h', bucketStart, bucketEnd); + + if (result.metricsCreated > 0) { + this.logger.log( + `Aggregated ${result.metricsCreated} 1h metrics for ${bucketStart.toISOString()}`, + ); + } + } catch (error) { + this.logger.error(`1h aggregation failed: ${error.message}`); + } + } + + /** + * Aggregate daily metrics (runs at midnight) + */ + @Cron(CronExpression.EVERY_DAY_AT_MIDNIGHT) + async aggregate1dMetrics(): Promise { + try { + const now = new Date(); + const bucketStart = this.getBucketStart( + new Date(now.getTime() - 24 * 60 * 60000), + '1d', + ); + const bucketEnd = this.getBucketEnd(bucketStart, '1d'); + + const result = await this.aggregateMetrics('1d', bucketStart, bucketEnd); + + if (result.metricsCreated > 0) { + this.logger.log( + `Aggregated ${result.metricsCreated} 1d metrics for ${bucketStart.toISOString()}`, + ); + } + } catch (error) { + this.logger.error(`1d aggregation failed: ${error.message}`); + } + } + + /** + * Clean up old raw metrics (runs daily at 2 AM) + */ + @Cron('0 2 * * *') + async cleanupOldMetrics(): Promise { + const retentionDays = this.configService.get( + 'METRICS_RETENTION_DAYS', + 30, + ); + + const cutoffDate = new Date(); + cutoffDate.setDate(cutoffDate.getDate() - retentionDays); + + try { + const [deletedWorkflow, deletedStep, deletedSnapshots] = + await Promise.all([ + this.prisma.workflowExecutionMetric.deleteMany({ + where: { timestamp: { lt: cutoffDate } }, + }), + this.prisma.workflowStepMetric.deleteMany({ + where: { timestamp: { lt: cutoffDate } }, + }), + // Keep aggregated snapshots longer (90 days) + this.prisma.metricsSnapshot.deleteMany({ + where: { + bucketStart: { + lt: new Date( + cutoffDate.getTime() - 60 * 24 * 60 * 60 * 1000, + ), + }, + }, + }), + ]); + + this.logger.log( + `Cleaned up old metrics: ${deletedWorkflow.count} workflow, ${deletedStep.count} step, ${deletedSnapshots.count} snapshots`, + ); + } catch (error) { + this.logger.error(`Metrics cleanup failed: ${error.message}`); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/metrics-collector.service.ts b/packages/bytebot-workflow-orchestrator/src/services/metrics-collector.service.ts new file mode 100644 index 000000000..c67c7b4bb --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/metrics-collector.service.ts @@ -0,0 +1,619 @@ +/** + * Metrics Collector Service + * v1.0.0: Phase 8 Advanced Analytics Dashboard + * v1.0.1: Fixed PrismaClientValidationError - defensive event handling + * + * Captures workflow and step execution metrics for analytics and dashboards. + * Uses event-driven collection to capture metrics in real-time as workflows execute. + * + * Key responsibilities: + * - Record workflow execution start/complete/fail events + * - Record step execution metrics + * - Integrate with Phase 7 agent metrics + * - Provide real-time metric emission for streaming dashboards + * + * v1.0.1 Changes: + * - Handle both workflowRunId and workflowId property names in events + * - Guard against undefined IDs before Prisma queries + * - Fetch missing data from DB when not provided in events + * - Handle both 'success' (boolean) and 'status' (enum) formats + * - Graceful skip when required data is unavailable + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; + +// Metric types +export enum MetricType { + WORKFLOW_EXECUTION = 'workflow_execution', + WORKFLOW_STEP = 'workflow_step', + AGENT_TASK = 'agent_task', + ERROR_RATE = 'error_rate', + THROUGHPUT = 'throughput', +} + +// Workflow execution metric input +export interface WorkflowExecutionMetricInput { + workflowRunId: string; + tenantId: string; + workflowName: string; + templateId?: string; + status: 'STARTED' | 'COMPLETED' | 'FAILED' | 'CANCELLED'; + startedAt: Date; + completedAt?: Date; + durationMs?: number; + nodeCount?: number; + completedNodeCount?: number; + failedNodeCount?: number; + retriedNodeCount?: number; + agentId?: string; + agentName?: string; + reassignments?: number; + errorType?: string; + errorMessage?: string; + peakMemoryMb?: number; + avgCpuPercent?: number; + tags?: Record; +} + +// Step execution metric input +export interface StepMetricInput { + nodeId: string; + nodeRunId: string; + workflowRunId: string; + tenantId: string; + stepName: string; + stepType: string; + status: 'STARTED' | 'COMPLETED' | 'FAILED' | 'SKIPPED'; + startedAt: Date; + completedAt?: Date; + durationMs?: number; + queueTimeMs?: number; + attempt?: number; + retryCount?: number; + toolsUsed?: string[]; + highRiskTools?: string[]; + agentId?: string; + agentName?: string; + memoryMb?: number; + cpuPercent?: number; + errorType?: string; + errorMessage?: string; +} + +// Event payloads +export interface WorkflowStartedEvent { + workflowRunId: string; + tenantId: string; + workflowName: string; + templateId?: string; + nodeCount: number; +} + +export interface WorkflowCompletedEvent { + workflowRunId: string; + tenantId: string; + status: 'COMPLETED' | 'FAILED' | 'CANCELLED'; + completedNodeCount: number; + failedNodeCount: number; + retriedNodeCount: number; + error?: { type: string; message: string }; +} + +export interface NodeStartedEvent { + nodeId: string; + nodeRunId: string; + workflowRunId: string; + tenantId: string; + stepName: string; + stepType: string; + attempt: number; + agentId?: string; + agentName?: string; +} + +export interface NodeCompletedEvent { + nodeId: string; + nodeRunId: string; + workflowRunId: string; + tenantId: string; + status: 'SUCCEEDED' | 'FAILED' | 'SKIPPED'; + durationMs: number; + toolsUsed?: string[]; + highRiskTools?: string[]; + error?: { type: string; message: string }; +} + +@Injectable() +export class MetricsCollectorService implements OnModuleInit { + private readonly logger = new Logger(MetricsCollectorService.name); + private workflowStartTimes = new Map(); + + constructor( + private readonly prisma: PrismaService, + private readonly eventEmitter: EventEmitter2, + ) {} + + onModuleInit() { + this.logger.log('Metrics Collector Service initialized'); + } + + /** + * Record a workflow execution metric + */ + async recordWorkflowExecution( + input: WorkflowExecutionMetricInput, + ): Promise { + try { + await this.prisma.workflowExecutionMetric.create({ + data: { + workflowRunId: input.workflowRunId, + tenantId: input.tenantId, + workflowName: input.workflowName, + templateId: input.templateId, + status: input.status, + startedAt: input.startedAt, + completedAt: input.completedAt, + durationMs: input.durationMs, + nodeCount: input.nodeCount ?? 0, + completedNodeCount: input.completedNodeCount ?? 0, + failedNodeCount: input.failedNodeCount ?? 0, + retriedNodeCount: input.retriedNodeCount ?? 0, + agentId: input.agentId, + agentName: input.agentName, + reassignments: input.reassignments ?? 0, + errorType: input.errorType, + errorMessage: input.errorMessage, + peakMemoryMb: input.peakMemoryMb, + avgCpuPercent: input.avgCpuPercent, + tags: input.tags ?? {}, + }, + }); + + // Emit metric event for real-time streaming + this.eventEmitter.emit('metrics.workflow.recorded', { + type: MetricType.WORKFLOW_EXECUTION, + ...input, + timestamp: new Date(), + }); + + this.logger.debug( + `Recorded workflow execution metric: ${input.workflowRunId} (${input.status})`, + ); + } catch (error) { + this.logger.error( + `Failed to record workflow execution metric: ${error.message}`, + error.stack, + ); + } + } + + /** + * Record a step execution metric + */ + async recordStepExecution(input: StepMetricInput): Promise { + try { + await this.prisma.workflowStepMetric.create({ + data: { + nodeId: input.nodeId, + nodeRunId: input.nodeRunId, + workflowRunId: input.workflowRunId, + tenantId: input.tenantId, + stepName: input.stepName, + stepType: input.stepType, + status: input.status, + startedAt: input.startedAt, + completedAt: input.completedAt, + durationMs: input.durationMs, + queueTimeMs: input.queueTimeMs, + attempt: input.attempt ?? 1, + retryCount: input.retryCount ?? 0, + toolsUsed: input.toolsUsed ?? [], + highRiskTools: input.highRiskTools ?? [], + agentId: input.agentId, + agentName: input.agentName, + memoryMb: input.memoryMb, + cpuPercent: input.cpuPercent, + errorType: input.errorType, + errorMessage: input.errorMessage, + }, + }); + + // Emit metric event for real-time streaming + this.eventEmitter.emit('metrics.step.recorded', { + type: MetricType.WORKFLOW_STEP, + ...input, + timestamp: new Date(), + }); + + this.logger.debug( + `Recorded step metric: ${input.nodeRunId} (${input.status})`, + ); + } catch (error) { + this.logger.error( + `Failed to record step metric: ${error.message}`, + error.stack, + ); + } + } + + // ========================================================================= + // Event Handlers + // ========================================================================= + + @OnEvent('workflow.started') + async handleWorkflowStarted(event: WorkflowStartedEvent & { workflowId?: string }): Promise { + // v1.0.1: Handle both workflowRunId and workflowId property names + const workflowRunId = event.workflowRunId || event.workflowId; + + if (!workflowRunId) { + this.logger.warn('workflow.started event missing workflowRunId/workflowId, skipping metrics'); + return; + } + + const startTime = new Date(); + this.workflowStartTimes.set(workflowRunId, startTime); + + // Fetch additional data from DB if not provided in event + let tenantId = event.tenantId; + let workflowName = event.workflowName; + let templateId = event.templateId; + let nodeCount = event.nodeCount; + + if (!tenantId || !workflowName) { + try { + const workflowRun = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + include: { nodes: true }, + }); + if (workflowRun) { + tenantId = tenantId || workflowRun.tenantId; + workflowName = workflowName || workflowRun.name; + templateId = templateId || workflowRun.templateId || undefined; + nodeCount = nodeCount ?? workflowRun.nodes?.length ?? 0; + } + } catch (error) { + this.logger.debug(`Could not fetch workflow details for ${workflowRunId}: ${error.message}`); + } + } + + if (!tenantId) { + this.logger.warn(`workflow.started: Missing tenantId for ${workflowRunId}, skipping metrics`); + return; + } + + await this.recordWorkflowExecution({ + workflowRunId, + tenantId, + workflowName: workflowName || 'Unknown', + templateId, + status: 'STARTED', + startedAt: startTime, + nodeCount: nodeCount ?? 0, + }); + } + + @OnEvent('workflow.completed') + async handleWorkflowCompleted(event: WorkflowCompletedEvent & { workflowId?: string; error?: any }): Promise { + // v1.0.1: Handle both workflowRunId and workflowId property names + const workflowRunId = event.workflowRunId || event.workflowId; + + if (!workflowRunId) { + this.logger.warn('workflow.completed event missing workflowRunId/workflowId, skipping metrics'); + return; + } + + const startTime = this.workflowStartTimes.get(workflowRunId); + const completedAt = new Date(); + const durationMs = startTime + ? completedAt.getTime() - startTime.getTime() + : undefined; + + // Get workflow info from DB - safely handle if workflow doesn't exist + let workflowRun: any = null; + try { + workflowRun = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + include: { nodes: true }, + }); + } catch (error) { + this.logger.debug(`Could not fetch workflow for ${workflowRunId}: ${error.message}`); + } + + // Get tenantId from event or DB + const tenantId = event.tenantId || workflowRun?.tenantId; + if (!tenantId) { + this.logger.warn(`workflow.completed: Missing tenantId for ${workflowRunId}, skipping metrics`); + this.workflowStartTimes.delete(workflowRunId); + return; + } + + // Calculate node counts from DB if not provided in event + let completedNodeCount = event.completedNodeCount; + let failedNodeCount = event.failedNodeCount; + let retriedNodeCount = event.retriedNodeCount; + + if (workflowRun?.nodes && (completedNodeCount === undefined || failedNodeCount === undefined)) { + const nodes = workflowRun.nodes; + completedNodeCount = completedNodeCount ?? nodes.filter((n: any) => n.status === 'SUCCEEDED').length; + failedNodeCount = failedNodeCount ?? nodes.filter((n: any) => n.status === 'FAILED').length; + retriedNodeCount = retriedNodeCount ?? nodes.filter((n: any) => (n.retryCount || 0) > 0).length; + } + + // v1.0.1: Handle error as string or object + const errorType = typeof event.error === 'object' ? event.error?.type : undefined; + const errorMessage = typeof event.error === 'string' ? event.error : event.error?.message; + + await this.recordWorkflowExecution({ + workflowRunId, + tenantId, + workflowName: workflowRun?.name ?? 'Unknown', + templateId: workflowRun?.templateId ?? undefined, + status: event.status, + startedAt: startTime ?? completedAt, + completedAt, + durationMs, + completedNodeCount: completedNodeCount ?? 0, + failedNodeCount: failedNodeCount ?? 0, + retriedNodeCount: retriedNodeCount ?? 0, + errorType, + errorMessage, + }); + + // Cleanup + this.workflowStartTimes.delete(workflowRunId); + } + + @OnEvent('node.started') + async handleNodeStarted(event: NodeStartedEvent & { workflowId?: string }): Promise { + // v1.0.1: Handle both workflowRunId and workflowId property names + const workflowRunId = event.workflowRunId || event.workflowId; + + if (!event.nodeId) { + this.logger.warn('node.started event missing nodeId, skipping metrics'); + return; + } + + // Fetch tenant and node info from DB if not provided + let tenantId = event.tenantId; + let stepName = event.stepName; + let stepType = event.stepType; + let nodeRunId = event.nodeRunId; + + if (!tenantId || !stepName) { + try { + // Try to get info from the node + const node = await this.prisma.workflowNode.findUnique({ + where: { id: event.nodeId }, + include: { workflowRun: true }, + }); + if (node) { + tenantId = tenantId || node.workflowRun?.tenantId; + stepName = stepName || node.name; + stepType = stepType || node.type; + } + } catch (error) { + this.logger.debug(`Could not fetch node details for ${event.nodeId}: ${error.message}`); + } + } + + if (!tenantId) { + this.logger.debug(`node.started: Missing tenantId for node ${event.nodeId}, skipping metrics`); + return; + } + + await this.recordStepExecution({ + nodeId: event.nodeId, + nodeRunId: nodeRunId || event.nodeId, // Use nodeId as fallback + workflowRunId: workflowRunId || '', + tenantId, + stepName: stepName || 'Unknown', + stepType: stepType || 'TASK', + status: 'STARTED', + startedAt: new Date(), + attempt: event.attempt ?? 1, + agentId: event.agentId, + agentName: event.agentName, + }); + } + + @OnEvent('node.completed') + async handleNodeCompleted(event: NodeCompletedEvent & { workflowId?: string; success?: boolean }): Promise { + // v1.0.1: Handle both workflowRunId and workflowId property names + const workflowRunId = event.workflowRunId || event.workflowId; + + if (!event.nodeId) { + this.logger.warn('node.completed event missing nodeId, skipping metrics'); + return; + } + + // v1.0.1: Handle both 'status' (enum) and 'success' (boolean) formats + const statusMap: Record = { + SUCCEEDED: 'COMPLETED', + FAILED: 'FAILED', + SKIPPED: 'SKIPPED', + }; + + let status: 'COMPLETED' | 'FAILED' | 'SKIPPED'; + if (event.status) { + status = statusMap[event.status] ?? 'COMPLETED'; + } else if (event.success !== undefined) { + status = event.success ? 'COMPLETED' : 'FAILED'; + } else { + status = 'COMPLETED'; + } + + // Get node info from DB - handle undefined nodeRunId safely + let nodeRun: any = null; + let tenantId = event.tenantId; + let stepName: string | undefined; + let stepType: string | undefined; + + // Try to fetch from nodeRunId first, then fall back to nodeId + if (event.nodeRunId) { + try { + nodeRun = await this.prisma.workflowNodeRun.findUnique({ + where: { id: event.nodeRunId }, + include: { node: true }, + }); + } catch (error) { + this.logger.debug(`Could not fetch nodeRun ${event.nodeRunId}: ${error.message}`); + } + } + + // Fall back to fetching node directly + if (!nodeRun && event.nodeId) { + try { + const node = await this.prisma.workflowNode.findUnique({ + where: { id: event.nodeId }, + include: { workflowRun: true }, + }); + if (node) { + tenantId = tenantId || node.workflowRun?.tenantId; + stepName = node.name; + stepType = node.type; + } + } catch (error) { + this.logger.debug(`Could not fetch node ${event.nodeId}: ${error.message}`); + } + } else if (nodeRun) { + stepName = nodeRun.node?.name; + stepType = nodeRun.node?.type; + } + + if (!tenantId) { + this.logger.debug(`node.completed: Missing tenantId for node ${event.nodeId}, skipping metrics`); + return; + } + + const completedAt = new Date(); + const startedAt = nodeRun?.startedAt ?? completedAt; + + await this.recordStepExecution({ + nodeId: event.nodeId, + nodeRunId: event.nodeRunId || event.nodeId, // Use nodeId as fallback + workflowRunId: workflowRunId || '', + tenantId, + stepName: stepName ?? 'Unknown', + stepType: stepType ?? 'TASK', + status, + startedAt, + completedAt, + durationMs: event.durationMs, + toolsUsed: event.toolsUsed, + highRiskTools: event.highRiskTools, + errorType: event.error?.type, + errorMessage: event.error?.message, + }); + } + + // ========================================================================= + // Utility Methods + // ========================================================================= + + /** + * Get real-time metrics summary for the last N minutes + */ + async getRealtimeSummary( + tenantId: string, + minutesBack: number = 5, + ): Promise<{ + workflowsStarted: number; + workflowsCompleted: number; + workflowsFailed: number; + avgDurationMs: number; + stepsCompleted: number; + stepsFailed: number; + }> { + const since = new Date(Date.now() - minutesBack * 60 * 1000); + + const [workflowMetrics, stepMetrics] = await Promise.all([ + this.prisma.workflowExecutionMetric.groupBy({ + by: ['status'], + where: { + tenantId, + timestamp: { gte: since }, + }, + _count: true, + _avg: { durationMs: true }, + }), + this.prisma.workflowStepMetric.groupBy({ + by: ['status'], + where: { + tenantId, + timestamp: { gte: since }, + }, + _count: true, + }), + ]); + + const workflowsByStatus = Object.fromEntries( + workflowMetrics.map((m) => [m.status, m._count]), + ); + + const stepsByStatus = Object.fromEntries( + stepMetrics.map((m) => [m.status, m._count]), + ); + + const avgDuration = + workflowMetrics.find((m) => m.status === 'COMPLETED')?._avg?.durationMs ?? + 0; + + return { + workflowsStarted: workflowsByStatus['STARTED'] ?? 0, + workflowsCompleted: workflowsByStatus['COMPLETED'] ?? 0, + workflowsFailed: workflowsByStatus['FAILED'] ?? 0, + avgDurationMs: avgDuration, + stepsCompleted: stepsByStatus['COMPLETED'] ?? 0, + stepsFailed: stepsByStatus['FAILED'] ?? 0, + }; + } + + /** + * Get workflow execution history for a tenant + */ + async getWorkflowHistory( + tenantId: string, + options: { + limit?: number; + offset?: number; + status?: string; + workflowName?: string; + since?: Date; + until?: Date; + } = {}, + ): Promise<{ + executions: any[]; + total: number; + }> { + const { limit = 50, offset = 0, status, workflowName, since, until } = options; + + const where: any = { tenantId }; + + if (status) { + where.status = status; + } + if (workflowName) { + where.workflowName = { contains: workflowName, mode: 'insensitive' }; + } + if (since) { + where.timestamp = { ...where.timestamp, gte: since }; + } + if (until) { + where.timestamp = { ...where.timestamp, lte: until }; + } + + const [executions, total] = await Promise.all([ + this.prisma.workflowExecutionMetric.findMany({ + where, + orderBy: { timestamp: 'desc' }, + take: limit, + skip: offset, + }), + this.prisma.workflowExecutionMetric.count({ where }), + ]); + + return { executions, total }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/node-executor.service.ts b/packages/bytebot-workflow-orchestrator/src/services/node-executor.service.ts new file mode 100644 index 000000000..028025f8e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/node-executor.service.ts @@ -0,0 +1,571 @@ +/** + * Node Executor Service + * v2.0.0: Phase 7 Multi-Agent Orchestration + * + * Executes workflow nodes by dispatching tasks to agents. + * Now supports multi-agent routing via AgentRouterService. + * + * Responsibilities: + * - Execute different node types (TASK, DECISION, PARALLEL, WAIT) + * - Route tasks to optimal agents using AgentRouterService + * - Track task assignments for monitoring and debugging + * - Collect results and update node status + * - Release workspace locks after execution + * - Handle agent failover for resilience + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import axios, { AxiosInstance } from 'axios'; +import { PrismaService } from './prisma.service'; +import { WorkspaceService } from './workspace.service'; +import { NodeStatus } from './workflow.service'; +import { AgentRouterService, RoutingRequest } from './agent-router.service'; +import { AgentRegistryService, AgentInfo } from './agent-registry.service'; + +export interface NodeExecutionResult { + success: boolean; + output?: any; + error?: string; + artifacts?: Array<{ + type: string; + path: string; + name: string; + }>; + agentId?: string; + routingReason?: string; +} + +@Injectable() +export class NodeExecutorService { + private readonly logger = new Logger(NodeExecutorService.name); + private readonly defaultAgentClient: AxiosInstance; + private readonly taskTimeout: number; + private readonly enableMultiAgent: boolean; + + constructor( + private configService: ConfigService, + private prisma: PrismaService, + private workspaceService: WorkspaceService, + private eventEmitter: EventEmitter2, + private agentRouter: AgentRouterService, + private agentRegistry: AgentRegistryService, + ) { + // Default agent URL (backward compatibility) + const agentUrl = this.configService.get( + 'AGENT_SERVICE_URL', + 'http://bytebot-agent:8080', + ); + + // Task execution timeout (default: 10 minutes) + this.taskTimeout = parseInt( + this.configService.get('TASK_EXECUTION_TIMEOUT_MS', '600000'), + 10, + ); + + // Multi-agent mode enable flag + this.enableMultiAgent = + this.configService.get('MULTI_AGENT_ENABLED', 'true') === 'true'; + + this.defaultAgentClient = axios.create({ + baseURL: agentUrl, + timeout: this.taskTimeout, + headers: { + 'Content-Type': 'application/json', + }, + }); + + this.logger.log( + `NodeExecutor initialized (multiAgent: ${this.enableMultiAgent}, timeout: ${this.taskTimeout}ms)`, + ); + } + + /** + * Create an axios client for a specific agent + */ + private createAgentClient(agent: AgentInfo): AxiosInstance { + return axios.create({ + baseURL: agent.endpoint, + timeout: this.taskTimeout, + headers: { + 'Content-Type': 'application/json', + 'X-Agent-Id': agent.id, + 'X-Agent-Name': agent.name, + }, + }); + } + + /** + * Execute a workflow node + * + * This is the main entry point called by the scheduler. + * It routes to the appropriate execution handler based on node type. + */ + async executeNode(node: any, workflow: any): Promise { + this.logger.log(`Executing node ${node.id} (${node.type}): ${node.name}`); + + const startTime = Date.now(); + + try { + let result: NodeExecutionResult; + + switch (node.type) { + case 'TASK': + result = await this.executeTaskNode(node, workflow); + break; + case 'DECISION': + result = await this.executeDecisionNode(node, workflow); + break; + case 'PARALLEL': + result = await this.executeParallelNode(node, workflow); + break; + case 'WAIT': + result = await this.executeWaitNode(node, workflow); + break; + default: + throw new Error(`Unknown node type: ${node.type}`); + } + + // Update node status + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { + status: result.success ? NodeStatus.COMPLETED : NodeStatus.FAILED, + output: result.output, + error: result.error, + completedAt: new Date(), + durationMs: Date.now() - startTime, + }, + }); + + // Release workspace lock + await this.workspaceService.releaseLock(workflow.workspaceId, node.id); + + this.eventEmitter.emit('node.completed', { + nodeId: node.id, + workflowId: workflow.id, + success: result.success, + durationMs: Date.now() - startTime, + }); + + this.logger.log( + `Node ${node.id} completed in ${Date.now() - startTime}ms: ${ + result.success ? 'success' : 'failed' + }`, + ); + } catch (error: any) { + this.logger.error(`Node ${node.id} execution failed: ${error.message}`); + + // Update node status to failed + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { + status: NodeStatus.FAILED, + error: error.message, + completedAt: new Date(), + durationMs: Date.now() - startTime, + }, + }); + + // Release workspace lock + await this.workspaceService.releaseLock(workflow.workspaceId, node.id); + + this.eventEmitter.emit('node.failed', { + nodeId: node.id, + workflowId: workflow.id, + error: error.message, + }); + } + } + + /** + * Execute a TASK node by dispatching to agent + * Phase 7: Now supports multi-agent routing + */ + private async executeTaskNode( + node: any, + workflow: any, + ): Promise { + const config = node.config as any; + + // Get workspace desktop endpoint + const workspaceStatus = await this.workspaceService.getWorkspaceDesktopStatus( + workflow.workspaceId, + ); + + if (workspaceStatus.status !== 'READY') { + throw new Error( + `Workspace not ready: ${workspaceStatus.status} - ${workspaceStatus.message}`, + ); + } + + // Build task request for agent + const taskRequest = { + taskId: node.id, + workflowId: workflow.id, + workspaceId: workflow.workspaceId, + desktopEndpoint: workspaceStatus.desktopEndpoint, + vncEndpoint: workspaceStatus.vncEndpoint, + prompt: config.prompt, + tools: config.tools || [], + maxIterations: config.maxIterations || 50, + timeout: config.timeout || 300000, // 5 minutes default + context: { + workflowName: workflow.name, + nodeName: node.name, + previousOutputs: await this.getPreviousOutputs(node), + }, + }; + + // Select agent using multi-agent routing + let selectedAgent: AgentInfo | null = null; + let routingReason = 'default'; + let agentClient: AxiosInstance = this.defaultAgentClient; + + if (this.enableMultiAgent) { + // Build routing request + const routingRequest: RoutingRequest = { + nodeId: node.id, + workflowId: workflow.id, + workspaceId: workflow.workspaceId, + requiredTools: config.tools || [], + preferredAgentId: config.preferredAgentId, + affinityNodeIds: node.dependencies as string[], + requiresExclusiveWorkspace: !config.gatewayToolsOnly, + }; + + // Route to optimal agent + const routingResult = await this.agentRouter.routeTask(routingRequest); + + if (routingResult) { + selectedAgent = routingResult.agent; + routingReason = routingResult.reason; + agentClient = this.createAgentClient(selectedAgent); + + this.logger.log( + `Task ${node.id} routed to agent ${selectedAgent.name} ` + + `(${selectedAgent.id}) via ${routingReason}`, + ); + + // Record the assignment + await this.agentRouter.recordAssignment( + node.id, + selectedAgent.id, + routingReason, + ); + } else { + this.logger.warn( + `No agent available for task ${node.id}, falling back to default`, + ); + } + } + + // Dispatch to agent + try { + const response = await agentClient.post('/api/v1/tasks/execute', taskRequest); + + // Mark assignment as completed + if (selectedAgent) { + await this.agentRouter.completeAssignment( + node.id, + selectedAgent.id, + response.data.success, + response.data.output, + response.data.error, + ); + } + + return { + success: response.data.success, + output: response.data.output, + error: response.data.error, + artifacts: response.data.artifacts, + agentId: selectedAgent?.id, + routingReason, + }; + } catch (error: any) { + // Mark assignment as failed + if (selectedAgent) { + await this.agentRouter.completeAssignment( + node.id, + selectedAgent.id, + false, + undefined, + error.response?.data?.message || error.message, + ); + } + + // Try failover to alternative agent if available + if (this.enableMultiAgent && selectedAgent) { + const failoverResult = await this.tryFailover( + node, + workflow, + taskRequest, + selectedAgent.id, + error.message, + ); + if (failoverResult) { + return failoverResult; + } + } + + return { + success: false, + error: error.response?.data?.message || error.message, + agentId: selectedAgent?.id, + routingReason, + }; + } + } + + /** + * Try to failover to an alternative agent + */ + private async tryFailover( + node: any, + workflow: any, + taskRequest: any, + failedAgentId: string, + failureReason: string, + ): Promise { + const config = node.config as any; + + // Build routing request excluding failed agent + const routingRequest: RoutingRequest = { + nodeId: node.id, + workflowId: workflow.id, + workspaceId: workflow.workspaceId, + requiredTools: config.tools || [], + requiresExclusiveWorkspace: !config.gatewayToolsOnly, + }; + + // Get alternative agent + const routingResult = await this.agentRouter.routeTask(routingRequest); + + if ( + !routingResult || + routingResult.agent.id === failedAgentId || + routingResult.alternativeAgents.length === 0 + ) { + return null; + } + + // Use the first alternative + const alternativeAgent = routingResult.alternativeAgents[0].agent; + + this.logger.log( + `Failing over task ${node.id} from agent ${failedAgentId} ` + + `to ${alternativeAgent.name} (${alternativeAgent.id}): ${failureReason}`, + ); + + // Reassign the task + await this.agentRouter.reassignTask( + node.id, + failedAgentId, + alternativeAgent.id, + `failover: ${failureReason}`, + ); + + // Create client and dispatch + const agentClient = this.createAgentClient(alternativeAgent); + + try { + const response = await agentClient.post('/api/v1/tasks/execute', taskRequest); + + await this.agentRouter.completeAssignment( + node.id, + alternativeAgent.id, + response.data.success, + response.data.output, + response.data.error, + ); + + return { + success: response.data.success, + output: response.data.output, + error: response.data.error, + artifacts: response.data.artifacts, + agentId: alternativeAgent.id, + routingReason: 'failover', + }; + } catch (error: any) { + await this.agentRouter.completeAssignment( + node.id, + alternativeAgent.id, + false, + undefined, + error.response?.data?.message || error.message, + ); + + return null; + } + } + + /** + * Execute a DECISION node (conditional branching) + */ + private async executeDecisionNode( + node: any, + workflow: any, + ): Promise { + const config = node.config as any; + + // Get previous outputs for condition evaluation + const previousOutputs = await this.getPreviousOutputs(node); + + // Simple condition evaluation + // In a real implementation, this would use a proper expression evaluator + let selectedBranch: string | null = null; + + for (const condition of config.conditions || []) { + if (this.evaluateCondition(condition.expression, previousOutputs)) { + selectedBranch = condition.targetNodeId; + break; + } + } + + // Use default branch if no condition matched + if (!selectedBranch && config.defaultBranch) { + selectedBranch = config.defaultBranch; + } + + if (!selectedBranch) { + return { + success: false, + error: 'No matching condition and no default branch', + }; + } + + // Mark the selected branch as ready + await this.prisma.workflowNode.update({ + where: { id: selectedBranch }, + data: { status: NodeStatus.READY }, + }); + + // Skip non-selected branches + const allBranches = [ + ...(config.conditions?.map((c: any) => c.targetNodeId) || []), + config.defaultBranch, + ].filter(Boolean); + + const skippedBranches = allBranches.filter((b: string) => b !== selectedBranch); + if (skippedBranches.length > 0) { + await this.prisma.workflowNode.updateMany({ + where: { id: { in: skippedBranches } }, + data: { status: NodeStatus.SKIPPED }, + }); + } + + return { + success: true, + output: { + selectedBranch, + skippedBranches, + }, + }; + } + + /** + * Execute a PARALLEL node (fan-out to multiple tasks) + */ + private async executeParallelNode( + node: any, + workflow: any, + ): Promise { + const config = node.config as any; + + // Mark all parallel branches as ready + const parallelNodeIds = config.parallelNodeIds || []; + + await this.prisma.workflowNode.updateMany({ + where: { id: { in: parallelNodeIds } }, + data: { status: NodeStatus.READY }, + }); + + return { + success: true, + output: { + dispatchedNodes: parallelNodeIds, + }, + }; + } + + /** + * Execute a WAIT node (delay or wait for condition) + */ + private async executeWaitNode( + node: any, + workflow: any, + ): Promise { + const config = node.config as any; + + // Simple delay wait + if (config.delayMs) { + await new Promise((resolve) => setTimeout(resolve, config.delayMs)); + } + + // In a real implementation, this could wait for: + // - External webhook + // - Time-based condition + // - Manual approval + // - Resource availability + + return { + success: true, + output: { + waitedMs: config.delayMs || 0, + }, + }; + } + + /** + * Get outputs from previous nodes (dependencies) + */ + private async getPreviousOutputs(node: any): Promise> { + const dependencies = node.dependencies as string[]; + if (!dependencies || dependencies.length === 0) { + return {}; + } + + const previousNodes = await this.prisma.workflowNode.findMany({ + where: { id: { in: dependencies } }, + }); + + const outputs: Record = {}; + for (const prevNode of previousNodes) { + outputs[prevNode.id] = prevNode.output; + outputs[prevNode.name] = prevNode.output; + } + + return outputs; + } + + /** + * Simple condition evaluation + * In production, use a proper expression evaluator + */ + private evaluateCondition( + expression: string, + context: Record, + ): boolean { + try { + // Very simple evaluation - just check for truthy values + // Production should use a sandboxed expression evaluator + const parts = expression.split('.'); + let value: any = context; + + for (const part of parts) { + if (value && typeof value === 'object') { + value = value[part]; + } else { + return false; + } + } + + return Boolean(value); + } catch { + return false; + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.goal-intake-gate-before-planning.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.goal-intake-gate-before-planning.spec.ts new file mode 100644 index 000000000..f203008d9 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.goal-intake-gate-before-planning.spec.ts @@ -0,0 +1,63 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { GoalRunPhase } from '@prisma/client'; + +describe('OrchestratorLoopService Goal Intake gate before planning', () => { + it('does not enter PLANNING when GoalSpec is INCOMPLETE', async () => { + const prisma = { + goalRun: { + updateMany: jest.fn(), + }, + } as any; + + const goalRunService = { + updatePhase: jest.fn(), + failGoalRun: jest.fn(), + } as any; + + const plannerService = { + generateInitialPlan: jest.fn(), + } as any; + + const goalIntakeService = { + ensureGoalSpecReadyForPlanning: jest.fn(), + requestGoalIntakeFromPlannerError: jest.fn(), + } as any; + + const service = new OrchestratorLoopService( + prisma, + goalRunService, + plannerService, + goalIntakeService, + {} as any, + {} as any, + { isInBackoff: () => false } as any, + { shouldSkipExecution: () => false } as any, + {} as any, + {} as any, + { emit: jest.fn() } as any, + { get: jest.fn((_key: string, fallback: any) => fallback) } as any, + {} as any, + {} as any, + ); + + goalIntakeService.ensureGoalSpecReadyForPlanning.mockResolvedValueOnce({ + ready: false, + goalSpecId: 'gs-1', + promptId: 'p-1', + }); + + await (service as any).executePlanningPhase({ id: 'gr-1', tenantId: 't-1', phase: GoalRunPhase.INITIALIZING }); + + expect(goalIntakeService.ensureGoalSpecReadyForPlanning).toHaveBeenCalledWith({ + goalRunId: 'gr-1', + tenantId: 't-1', + }); + + // No planning lock acquired, no plan generation started + expect(prisma.goalRun.updateMany).not.toHaveBeenCalled(); + expect(plannerService.generateInitialPlan).not.toHaveBeenCalled(); + expect(goalRunService.updatePhase).not.toHaveBeenCalled(); + expect(goalRunService.failGoalRun).not.toHaveBeenCalled(); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.goal-intake-on-replan.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.goal-intake-on-replan.spec.ts new file mode 100644 index 000000000..a7f003f8d --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.goal-intake-on-replan.spec.ts @@ -0,0 +1,65 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { PlannerFirstStepUserInputError } from './planner.errors'; +import { StepType } from '@prisma/client'; + +describe('OrchestratorLoopService GoalIntake on replanning', () => { + it('converts prompt-first replans into goal intake prompt', async () => { + const prisma = {} as any; + + const goalRunService = { + createActivityEvent: jest.fn(), + updatePhase: jest.fn(), + failGoalRun: jest.fn(), + } as any; + + const plannerService = { + generateReplan: jest.fn(), + } as any; + + const goalIntakeService = { + requestGoalIntakeFromPlannerError: jest.fn(), + } as any; + + const service = new OrchestratorLoopService( + prisma, + goalRunService, + plannerService, + goalIntakeService, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + { emit: jest.fn() } as any, + { get: jest.fn((_key: string, fallback: any) => fallback) } as any, + {} as any, + {} as any, + ); + + const error = new PlannerFirstStepUserInputError({ + mode: 'replan', + reason: 'USER_INPUT_REQUIRED_TYPE', + firstStep: { + description: 'Ask the user for missing details', + type: StepType.USER_INPUT_REQUIRED, + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + }, + }); + + plannerService.generateReplan.mockRejectedValueOnce(error); + + const goalRun = { id: 'gr-1', tenantId: 't-1', workflowRunId: null } as any; + await (service as any).executeReplanningPhase(goalRun, 'Need more details', 'ci-failed'); + + expect(goalIntakeService.requestGoalIntakeFromPlannerError).toHaveBeenCalledWith({ + goalRunId: 'gr-1', + tenantId: 't-1', + error, + }); + expect(goalRunService.updatePhase).not.toHaveBeenCalled(); + expect(goalRunService.failGoalRun).not.toHaveBeenCalled(); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.heartbeat-waiting-provider.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.heartbeat-waiting-provider.spec.ts new file mode 100644 index 000000000..00a2d2cc0 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.heartbeat-waiting-provider.spec.ts @@ -0,0 +1,88 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { ChecklistItemStatus, GoalRunPhase } from '@prisma/client'; + +describe('OrchestratorLoopService heartbeat exhaustion → WAITING_PROVIDER', () => { + it('pauses safely (WAIT_PROVIDER) after heartbeat retry budget is exhausted', async () => { + const configService = { + get: jest.fn((key: string, fallback: any) => { + if (key === 'ORCHESTRATOR_RESTART_GRACE_MS') return '0'; + if (key === 'HEARTBEAT_MAX_RETRIES') return '2'; + if (key === 'HEARTBEAT_BASE_DELAY_MS') return '1'; + if (key === 'HEARTBEAT_MAX_DELAY_MS') return '1'; + return fallback; + }), + } as any; + + const eventEmitter = { emit: jest.fn() } as any; + + const taskDispatchService = { + getHeartbeatHealth: jest.fn(() => ({ + hasActiveDispatch: true, + isHealthy: false, + consecutiveUnhealthy: 99, + lastHeartbeat: undefined, + shouldTimeout: true, + })), + getStatusCheckHealth: jest.fn(() => ({ + hasActiveDispatch: true, + isHealthy: true, + consecutiveFailures: 0, + lastSuccessfulCheck: new Date(), + })), + getLastProgressTime: jest.fn(() => null), + } as any; + + const service = new OrchestratorLoopService( + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + taskDispatchService, + { isInBackoff: () => false } as any, + { shouldSkipExecution: () => false } as any, + {} as any, + {} as any, + eventEmitter, + configService, + {} as any, + {} as any, + ); + + const item = { + id: 'ci-1', + status: ChecklistItemStatus.IN_PROGRESS, + description: 'Desktop step', + startedAt: new Date(0), + heartbeatRetryCount: 0, + }; + + const goalRun: any = { + id: 'gr-1', + phase: GoalRunPhase.EXECUTING, + currentPlanVersion: 1, + planVersions: [ + { + checklistItems: [ + item, + ], + }, + ], + }; + + const d1 = await (service as any).makeDecision(goalRun); + item.heartbeatRetryCount = 1; + const d2 = await (service as any).makeDecision(goalRun); + item.heartbeatRetryCount = 2; + const d3 = await (service as any).makeDecision(goalRun); + + expect(d1.action).toBe('RETRY'); + expect(d2.action).toBe('RETRY'); + expect(d3).toEqual( + expect.objectContaining({ + action: 'WAIT_PROVIDER', + itemId: 'ci-1', + }), + ); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.service.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.service.ts new file mode 100644 index 000000000..640dcfc0c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.service.ts @@ -0,0 +1,2035 @@ +/** + * Orchestrator Loop Service + * v2.3.0: Non-blocking startup for Kubernetes probe success (CRITICAL FIX) + * - Defers resumeActiveGoalRuns() until after HTTP server binds + * - Uses setImmediate() pattern for event loop deferral + * - Fixes pod crash loop when initializing with running goal runs + * v2.2.0: Context-Preserving Replanning (Industry Standard Fix) + * - Manus-style checkpoint service integration + * - Updates checkpoint after each step completion + * - Enables recovery from any point without re-running successful steps + * v2.1.0: Option C Industry Standard Fix - Heartbeat timeout uses RETRY not REPLAN + * - Google-style failure classification (transient vs semantic) + * - Separate retry budgets for heartbeat (5) vs replan (3) + * - Manus-style error preservation for diagnostics + * - Fixes: Heartbeat gaps no longer exhaust replan budget + * v2.0.0: Phase E maintenance mode handling - graceful drain and recovery + * v1.9.0: Phase D UX improvements - delay notifications for user awareness + * v1.8.0: Phase C heartbeat-based timeout (replaces static TTL) + * v1.7.0: DB transient error resilience - graceful handling of DB restarts + * v1.6.0: Configurable loop interval (LOOP_INTERVAL_MS env var, default 5000ms) + * v1.5.0: DB-driven retry gating to prevent tight loop bug (CPU/DB thrashing) + * v1.1.1: Infrastructure failure retry (don't consume replan budget on infra issues) + * v1.1.0: Fixed runaway workspace creation bug with link-first pattern + * v1.0.1: Fixed race condition in planning phase with atomic phase transitions + * v1.0.0: Core PEVR (Plan-Execute-Verify-Replan) loop for Manus-style orchestration + * + * Responsibilities: + * - Run the main orchestration loop for goal runs + * - Coordinate between planner, executor, and verifier + * - Handle steering messages and user interventions + * - Manage phase transitions + * + * DB Transient Resilience (v1.7.0): + * - Wraps loop tick in DbTransientService.withTransientGuard() + * - Transient DB errors trigger backoff (5s→60s), not crash + * - Throttled logging (once per backoff window) + * - Activity events throttled (once per minute max) + * - Timeout/replan evaluation gated by DB availability + * + * Runaway Loop Fix (v1.1.0): + * - createWorkflowForGoalRun: LINK FIRST, provision second + * - Creates workflow record and links to goalRun BEFORE attempting desktop provisioning + * - Prevents infinite workspace creation when provisioning fails + * - Handles capacity issues with exponential backoff, not replanning + * + * @see Phase 2 fix: https://book.kubebuilder.io/reference/good-practices + * @see Backoff pattern: https://docs.aws.amazon.com/prescriptive-guidance/latest/cloud-design-patterns/retry-backoff.html + * + * Race Condition Fix (v1.0.1): + * - makeDecision: Only triggers PLAN for INITIALIZING phase, not PLANNING + * - executePlanningPhase: Uses atomic updateMany with conditional WHERE to + * ensure only one iteration can start planning (optimistic locking pattern) + */ + +import { Injectable, Logger, OnModuleInit, OnModuleDestroy } from '@nestjs/common'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import { GoalRunService, GoalRunPhase, GoalRunStatus } from './goal-run.service'; +import { PlannerService } from './planner.service'; +import { PlannerFirstStepUserInputError } from './planner.errors'; +import { GoalIntakeService } from './goal-intake.service'; +import { hasUserInteractionTool } from '../contracts/planner-tools'; +import { WorkflowService, WorkflowStatus, NodeStatus, WorkspaceProvisioningStatus } from './workflow.service'; +import { TaskDispatchService } from './task-dispatch.service'; +import { DbTransientService } from './db-transient.service'; +import { MaintenanceModeService, MaintenanceState } from './maintenance-mode.service'; +import { FailureClassificationService } from './failure-classification.service'; +import { GoalCheckpointService } from './goal-checkpoint.service'; +import { + ChecklistItemStatus, + ExecutionSurface, + GoalRunWaitReason, + GoalSpecStatus, + Prisma, + StepType, + UserPromptCancelReason, + UserPromptKind, + UserPromptStatus, +} from '@prisma/client'; +import { createId } from '@paralleldrive/cuid2'; +import { UserPromptService } from './user-prompt.service'; +import { OutboxService } from './outbox.service'; + +// Loop configuration +// v1.6.0: Changed default from 1000ms to 5000ms - reduces DB load ~80% with no UX regression +// TaskDispatchService already polls every 5s, so faster orchestrator polling provides no benefit +const DEFAULT_LOOP_INTERVAL_MS = 5000; +const MAX_REPLAN_ATTEMPTS = 3; + +// v1.8.0 Phase C: Heartbeat-based timeout replaces static TTL +// - Primary: Heartbeat health from TaskDispatchService (dynamic, agent-driven) +// - Fallback: MAX_STEP_TTL_MS as absolute safeguard (static, time-based) +// The old STEP_TIMEOUT_MS (5 min) is replaced by heartbeat-based detection +const MAX_STEP_TTL_MS = 30 * 60 * 1000; // 30 minutes absolute maximum per step (safeguard) + +// v1.1.1: Infrastructure failure retry configuration +// Infrastructure failures (404, timeout, network) should retry, not replan +const MAX_INFRA_RETRIES = 5; // Max retries for infrastructure failures per step +const INFRA_RETRY_BASE_DELAY_MS = 10000; // 10 seconds base delay, doubles each retry (exponential backoff) + +// v1.7.0: Restart grace window configuration +// After orchestrator restart, don't apply timeout/replan logic until state is reconciled +// This prevents the "immediate replan on restart" bug where in-progress steps are +// incorrectly marked as timed out because they appear to have been running for +// the entire duration since the pod last polled (before crash/restart). +const DEFAULT_RESTART_GRACE_MS = 5 * 60 * 1000; // 5 minutes grace window + +// v1.9.0 Phase D: Delay notification thresholds (UX improvements) +// Notify users when steps are taking longer than expected +const DELAY_WARNING_THRESHOLD_MS = 60 * 1000; // Warn after 1 minute +const DELAY_CRITICAL_THRESHOLD_MS = 3 * 60 * 1000; // Critical after 3 minutes +const DELAY_NOTIFICATION_INTERVAL_MS = 60 * 1000; // Don't spam - notify once per minute + +// Loop decision types +type LoopAction = + | 'PLAN' + | 'EXECUTE' + | 'VERIFY' + | 'REPLAN' + | 'RETRY' // v1.1.1: Retry same step (for infrastructure failures) + | 'WAIT_PROVIDER' // v6.0.0: Pause safely when provider/model capacity is unavailable + | 'WAIT_APPROVAL' + | 'COMPLETE' + | 'FAIL' + | 'PAUSE' + | 'CONTINUE'; + +interface LoopDecision { + action: LoopAction; + itemId?: string; + nodeRunId?: string; + reason?: string; + retryCount?: number; // v1.1.1: Infrastructure retry count + retryDelayMs?: number; // v1.1.1: Delay before retry (exponential backoff) + retryCategory?: 'INFRA' | 'HEARTBEAT'; +} + +interface ActiveLoop { + goalRunId: string; + intervalId: NodeJS.Timeout; + isRunning: boolean; +} + +@Injectable() +export class OrchestratorLoopService implements OnModuleInit, OnModuleDestroy { + private readonly logger = new Logger(OrchestratorLoopService.name); + private readonly activeLoops = new Map(); + // v1.6.0: Configurable loop interval from env var + private readonly loopIntervalMs: number; + // v1.7.0: Restart grace window - don't timeout/replan during grace + private readonly restartGraceMs: number; + private readonly processStartAt: number; + // Durable retry configuration (used with ChecklistItem retry fields) + private readonly heartbeatMaxRetries: number; + private readonly heartbeatBaseDelayMs: number; + private readonly heartbeatMaxDelayMs: number; + // v1.7.0: Track which goal runs have been reconciled after restart + private readonly reconciledGoalRuns = new Set(); + // v1.9.0 Phase D: Track last delay notification time per checklist item + // Key: checklistItemId, Value: { lastNotifiedAt: timestamp, severity: 'warning' | 'critical' } + private readonly delayNotifications = new Map(); + + constructor( + private prisma: PrismaService, + private goalRunService: GoalRunService, + private plannerService: PlannerService, + private goalIntakeService: GoalIntakeService, + private workflowService: WorkflowService, + private taskDispatchService: TaskDispatchService, + private dbTransientService: DbTransientService, + private maintenanceModeService: MaintenanceModeService, + private failureClassificationService: FailureClassificationService, + private goalCheckpointService: GoalCheckpointService, // v2.2.0: Manus-style checkpoint + private eventEmitter: EventEmitter2, + private configService: ConfigService, + private userPromptService: UserPromptService, + private outboxService: OutboxService, + ) { + // v1.6.0: Read loop interval from env var, default to 5000ms + this.loopIntervalMs = parseInt( + this.configService.get('LOOP_INTERVAL_MS', String(DEFAULT_LOOP_INTERVAL_MS)), + 10, + ); + // v1.7.0: Restart grace window - record process start time + this.processStartAt = Date.now(); + this.restartGraceMs = parseInt( + this.configService.get('ORCHESTRATOR_RESTART_GRACE_MS', String(DEFAULT_RESTART_GRACE_MS)), + 10, + ); + + // Use the same env knobs as FailureClassificationService, but persist budgets in the DB. + this.heartbeatMaxRetries = parseInt(this.configService.get('HEARTBEAT_MAX_RETRIES', '5'), 10); + this.heartbeatBaseDelayMs = parseInt(this.configService.get('HEARTBEAT_BASE_DELAY_MS', '15000'), 10); + this.heartbeatMaxDelayMs = parseInt(this.configService.get('HEARTBEAT_MAX_DELAY_MS', '60000'), 10); + } + + /** + * v5.11.2: Non-blocking initialization to allow HTTP server to start first + * + * CRITICAL FIX: The previous implementation used `await this.resumeActiveGoalRuns()` + * which blocked NestJS startup, preventing the HTTP server from binding to port 8080. + * This caused Kubernetes liveness probes to fail and pods to crash. + * + * Solution: Use setImmediate() to defer heavy initialization to the next event loop tick. + * This allows: + * 1. NestJS module initialization to complete synchronously + * 2. app.listen() to bind HTTP server to port 8080 + * 3. Health endpoints to respond to probes + * 4. Heavy initialization (DB queries, loop starts) to run after server is ready + * + * @see https://docs.nestjs.com/fundamentals/lifecycle-events + * @see Kubernetes best practice: HTTP server must be ready before heavy init + */ + async onModuleInit() { + this.logger.log( + `Orchestrator Loop Service initialized (interval=${this.loopIntervalMs}ms, ` + + `restartGrace=${this.restartGraceMs}ms)`, + ); + + // v5.11.2: Defer heavy initialization to allow HTTP server to start first + // This is critical for Kubernetes probe success - the HTTP server MUST be + // listening before probes run, otherwise pods will be killed. + // + // NOTE: setImmediate is NOT sufficient as it runs before app.listen() in main.ts. + // We use a 2-second delay to ensure the HTTP server has time to bind to port 8080. + // This is a pragmatic solution that works with NestJS's synchronous bootstrap. + const STARTUP_DELAY_MS = 2000; + this.logger.log(`Deferring goal run resumption for ${STARTUP_DELAY_MS}ms to allow HTTP server startup`); + + setTimeout(() => { + this.logger.log('HTTP server should be ready, resuming active goal runs'); + this.resumeActiveGoalRuns() + .then(() => { + this.logger.log('Active goal runs resumed successfully'); + }) + .catch((error) => { + this.logger.error(`Failed to resume active goal runs: ${error.message}`, error.stack); + }); + }, STARTUP_DELAY_MS); + } + + /** + * v1.7.0: Check if we're in the restart grace window + * During grace, we should only reconcile state, not timeout/replan + */ + private isInRestartGraceWindow(): boolean { + return Date.now() - this.processStartAt < this.restartGraceMs; + } + + /** + * v1.7.0: Get remaining restart grace time in milliseconds + */ + private getRestartGraceRemainingMs(): number { + return Math.max(0, this.restartGraceMs - (Date.now() - this.processStartAt)); + } + + /** + * v1.7.0: Mark a goal run as reconciled after restart + * Once reconciled, normal timeout/replan logic applies + */ + private markGoalRunReconciled(goalRunId: string): void { + if (!this.reconciledGoalRuns.has(goalRunId)) { + this.reconciledGoalRuns.add(goalRunId); + this.logger.log(`Goal run ${goalRunId} reconciled after restart`); + } + } + + /** + * v1.7.0: Check if a goal run needs reconciliation after restart + * Returns true if: + * 1. We're in restart grace window AND + * 2. This goal run hasn't been reconciled yet + */ + private needsReconciliation(goalRunId: string): boolean { + return this.isInRestartGraceWindow() && !this.reconciledGoalRuns.has(goalRunId); + } + + async onModuleDestroy() { + this.logger.log('Shutting down orchestrator loops'); + // Stop all active loops + for (const [goalRunId, loop] of this.activeLoops) { + this.stopLoop(goalRunId); + } + } + + /** + * Start the orchestrator loop for a goal run + */ + async startLoop(goalRunId: string): Promise { + if (this.activeLoops.has(goalRunId)) { + this.logger.warn(`Loop already running for goal run ${goalRunId}`); + return; + } + + this.logger.log(`Starting orchestrator loop for goal run ${goalRunId} (interval=${this.loopIntervalMs}ms)`); + + const loop: ActiveLoop = { + goalRunId, + intervalId: setInterval(() => this.runLoopIteration(goalRunId), this.loopIntervalMs), + isRunning: true, + }; + + this.activeLoops.set(goalRunId, loop); + + // Run first iteration immediately + await this.runLoopIteration(goalRunId); + } + + /** + * Stop the orchestrator loop for a goal run + */ + stopLoop(goalRunId: string): void { + const loop = this.activeLoops.get(goalRunId); + if (loop) { + clearInterval(loop.intervalId); + loop.isRunning = false; + this.activeLoops.delete(goalRunId); + this.logger.log(`Stopped orchestrator loop for goal run ${goalRunId}`); + } + } + + /** + * Get loop status + */ + async getLoopStatus(goalRunId: string): Promise<{ running: boolean; replanCount: number }> { + const loop = this.activeLoops.get(goalRunId); + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { currentPlanVersion: true }, + }); + + // Durable: replans = currentPlanVersion - 1 (initial plan is version 1). + const currentPlanVersion = goalRun?.currentPlanVersion || 0; + const replanCount = Math.max(0, currentPlanVersion - 1); + return { + running: loop?.isRunning || false, + replanCount, + }; + } + + /** + * Event handler for goal run started + */ + @OnEvent('goal-run.started') + async handleGoalRunStarted(payload: { goalRunId: string }) { + await this.startLoop(payload.goalRunId); + } + + /** + * Event handler for goal run paused + */ + @OnEvent('goal-run.paused') + handleGoalRunPaused(payload: { goalRunId: string }) { + // Don't stop the loop, just let it handle the PAUSED phase + this.logger.log(`Goal run ${payload.goalRunId} paused`); + } + + /** + * Event handler for goal run cancelled/completed/failed + */ + @OnEvent('goal-run.cancelled') + @OnEvent('goal-run.completed') + @OnEvent('goal-run.failed') + handleGoalRunEnded(payload: { goalRunId: string }) { + this.stopLoop(payload.goalRunId); + } + + /** + * v2.2.0: Event handler for step completion - updates Manus-style checkpoint + * + * This keeps the checkpoint current after every step completion, + * implementing the Manus pattern of "constantly rewriting the todo list" + * to keep completed work in the model's recent attention span. + */ + @OnEvent('activity.STEP_COMPLETED') + async handleStepCompleted(payload: { goalRunId: string; checklistItemId: string }) { + if (!payload.goalRunId) return; + + this.logger.debug(`Step completed, updating checkpoint for goal run ${payload.goalRunId}`); + + try { + await this.goalCheckpointService.updateCheckpoint(payload.goalRunId); + } catch (error) { + this.logger.warn(`Failed to update checkpoint after step completion: ${(error as Error).message}`); + } + } + + // Private methods + + /** + * Run a single iteration of the orchestrator loop + * v1.7.0: Wrapped in DbTransientService for graceful DB restart handling + */ + private async runLoopIteration(goalRunId: string): Promise { + const loop = this.activeLoops.get(goalRunId); + if (!loop?.isRunning) return; + + // v1.7.0: Check if we're in DB backoff before attempting any DB operations + if (this.dbTransientService.isInBackoff()) { + const remainingMs = this.dbTransientService.getBackoffRemainingMs(); + this.logger.debug( + `Loop iteration for ${goalRunId} skipped - DB backoff (${Math.round(remainingMs / 1000)}s remaining)`, + ); + return; + } + + // v1.7.0: Wrap the entire loop iteration in transient guard + await this.dbTransientService.withTransientGuard( + async () => { + await this.runLoopIterationCore(goalRunId); + }, + `OrchestratorLoop.${goalRunId}`, + { + onTransientError: async (error, backoffMs) => { + // v1.7.0: Throttled activity event emission for DB unavailable + if (this.dbTransientService.shouldEmitDbUnavailableActivity()) { + this.dbTransientService.markActivityEmitted(); + // Note: We can't emit activity event during DB outage, but log it + this.logger.warn( + `DB unavailable for goal run ${goalRunId}, pausing orchestration ` + + `(backoff: ${Math.round(backoffMs / 1000)}s)`, + ); + } + }, + onNonTransientError: (error) => { + // Non-transient errors are logged but we don't crash the loop + this.logger.error( + `Non-transient error in loop iteration for ${goalRunId}: ${error.message}`, + error.stack, + ); + }, + }, + ); + } + + /** + * Core loop iteration logic (extracted for transient guard wrapper) + * v1.7.0: Separated from runLoopIteration to enable DB transient wrapping + */ + private async runLoopIterationCore(goalRunId: string): Promise { + // Get current goal run state + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) { + this.logger.error(`Goal run ${goalRunId} not found, stopping loop`); + this.stopLoop(goalRunId); + return; + } + + // Check terminal states + if ( + goalRun.status === GoalRunStatus.COMPLETED || + goalRun.status === GoalRunStatus.FAILED || + goalRun.status === GoalRunStatus.CANCELLED + ) { + this.stopLoop(goalRunId); + return; + } + + // Check for pending steering messages + const pendingSteering = await this.goalRunService.getPendingSteering(goalRunId); + if (pendingSteering) { + await this.processSteering(goalRun, pendingSteering); + return; + } + + // Skip iteration if paused + if (goalRun.phase === GoalRunPhase.PAUSED) { + return; + } + + // v2.0.0 Phase E: Skip execution if in maintenance mode + // The maintenance mode service will pause goal runs when entering full maintenance + if (this.maintenanceModeService.shouldSkipExecution()) { + this.logger.debug( + `Skipping loop iteration for ${goalRunId}: system in maintenance mode`, + ); + return; + } + + // Make decision based on current state + const decision = await this.makeDecision(goalRun); + + // Execute decision + await this.executeDecision(goalRun, decision); + } + + /** + * Make a decision about what to do next + * v1.0.1: Fixed race condition - only trigger PLAN for INITIALIZING, not PLANNING + */ + private async makeDecision(goalRun: any): Promise { + const currentPlan = goalRun.planVersions[0]; + const items = currentPlan?.checklistItems || []; + + switch (goalRun.phase) { + case GoalRunPhase.WAITING_USER_INPUT: + // Stable durable wait state for USER_INPUT_REQUIRED steps + return { action: 'CONTINUE' }; + + case GoalRunPhase.WAITING_PROVIDER: + // Stable durable wait state for provider/model capacity recovery + return { action: 'CONTINUE' }; + + case GoalRunPhase.INITIALIZING: + // Only trigger PLAN for INITIALIZING phase + // This prevents race condition where multiple iterations try to plan + if (!currentPlan || items.length === 0) { + return { action: 'PLAN' }; + } + // Plan exists, move to execution + return { action: 'EXECUTE' }; + + case GoalRunPhase.PLANNING: + // Planning is in progress - wait for it to complete + // DO NOT trigger another PLAN action (prevents duplicate version error) + if (!currentPlan || items.length === 0) { + return { action: 'CONTINUE' }; + } + // Plan exists, move to execution + return { action: 'EXECUTE' }; + + case GoalRunPhase.EXECUTING: + case GoalRunPhase.CONTROLLING_DESKTOP: + case GoalRunPhase.WAITING_CAPACITY: + // v2.2.2: Quiet WAITING_CAPACITY + // If we already have a durable waitUntil in the future, do not churn: + // - do not call workspace provisioning APIs + // - do not update goal_runs row every tick + if (goalRun.phase === GoalRunPhase.WAITING_CAPACITY) { + const waitUntil: Date | null = (goalRun as any).waitUntil ?? null; + if (waitUntil && waitUntil > new Date()) { + return { action: 'CONTINUE' }; + } + } + + // Find next item to execute + const pendingItem = items.find( + (item: any) => item.status === ChecklistItemStatus.PENDING, + ); + const inProgressItem = items.find( + (item: any) => item.status === ChecklistItemStatus.IN_PROGRESS, + ); + + if (inProgressItem) { + // v1.7.0: During restart grace window, skip timeout evaluation + // We need to reconcile task status first before deciding to timeout/replan + if (this.needsReconciliation(goalRun.id)) { + const graceRemaining = this.getRestartGraceRemainingMs(); + this.logger.debug( + `Step ${inProgressItem.id} in progress - skipping timeout check during restart grace ` + + `(${Math.round(graceRemaining / 1000)}s remaining)`, + ); + // Mark as reconciled after first pass - we've seen the in-progress item + // Next iteration will apply normal timeout logic if still in progress + this.markGoalRunReconciled(goalRun.id); + return { action: 'CONTINUE' }; + } + + // v1.8.0 Phase C: Get heartbeat health for dynamic timeout + // Heartbeat health is the primary signal - times out when agent stops sending heartbeats + const heartbeatHealth = this.taskDispatchService.getHeartbeatHealth(inProgressItem.id); + const statusHealth = this.taskDispatchService.getStatusCheckHealth(inProgressItem.id); + const lastProgressTime = this.taskDispatchService.getLastProgressTime(inProgressItem.id); + + // Use lastProgressTime if available, otherwise fall back to startedAt + const effectiveStartTime = lastProgressTime?.getTime() + || inProgressItem.startedAt?.getTime() + || Date.now(); + const timeSinceProgress = Date.now() - effectiveStartTime; + + // v1.7.0: Skip timeout if DB is in backoff (can't trust time measurements) + if (this.dbTransientService.isInBackoff()) { + this.logger.debug( + `Step ${inProgressItem.id} past timeout but DB is in backoff - waiting for recovery`, + ); + return { action: 'CONTINUE' }; + } + + // v2.1.0: Heartbeat-based timeout with failure classification (Option C) + // CRITICAL FIX: Heartbeat timeouts are TRANSIENT failures, use RETRY not REPLAN + // This prevents consuming the replan budget on infrastructure issues. + // Previous behavior (v1.8.0): REPLAN consumed replan budget → exhausted after 3 heartbeat gaps + // New behavior (v2.1.0): RETRY uses separate heartbeat budget (5 retries) + if (heartbeatHealth.shouldTimeout) { + const currentRetries = inProgressItem.heartbeatRetryCount ?? 0; + + // Durable budget (DB-backed): once exhausted, pause safely (no retry storm, no semantic replan). + if (currentRetries >= this.heartbeatMaxRetries) { + return { + action: 'WAIT_PROVIDER', + reason: `Heartbeat retries exhausted (${this.heartbeatMaxRetries}): ${inProgressItem.description}`, + itemId: inProgressItem.id, + }; + } + + const retryCount = currentRetries + 1; + const exponentialDelay = this.heartbeatBaseDelayMs * Math.pow(2, currentRetries); + const jitter = (Math.random() - 0.5) * 0.2 * exponentialDelay; // ±10% jitter + const retryDelayMs = Math.min(exponentialDelay + jitter, this.heartbeatMaxDelayMs); + + this.logger.warn( + `Step ${inProgressItem.id} heartbeat timeout: ` + + `${heartbeatHealth.consecutiveUnhealthy} consecutive unhealthy checks, ` + + `last heartbeat: ${heartbeatHealth.lastHeartbeat?.toISOString() || 'never'} → ` + + `RETRY ${retryCount}/${this.heartbeatMaxRetries} (delay ${Math.round(retryDelayMs / 1000)}s)`, + ); + + return { + action: 'RETRY', + retryCategory: 'HEARTBEAT', + reason: `Heartbeat timeout (retry ${retryCount}/${this.heartbeatMaxRetries}): ${inProgressItem.description}`, + itemId: inProgressItem.id, + retryCount, + retryDelayMs, + }; + } + + // Fallback: Absolute TTL safeguard (30 minutes) + // This prevents tasks from running forever if heartbeat tracking fails + if (timeSinceProgress > MAX_STEP_TTL_MS) { + if (statusHealth.isHealthy || statusHealth.consecutiveFailures > 24) { + this.logger.warn( + `Step ${inProgressItem.id} exceeded absolute TTL (${Math.round(MAX_STEP_TTL_MS / 60000)} min)`, + ); + return { + action: 'REPLAN', + reason: `Step exceeded maximum time: ${inProgressItem.description}`, + itemId: inProgressItem.id, + }; + } + // Status checks failing but within tolerance - continue waiting + this.logger.warn( + `Step ${inProgressItem.id} past TTL but status checks failing ` + + `(${statusHealth.consecutiveFailures} failures) - waiting for recovery`, + ); + } + + // Log heartbeat status for observability + if (!heartbeatHealth.isHealthy && heartbeatHealth.hasActiveDispatch) { + this.logger.debug( + `Step ${inProgressItem.id} heartbeat unhealthy ` + + `(${heartbeatHealth.consecutiveUnhealthy}/3 threshold)`, + ); + } + + // v1.9.0 Phase D: Emit delay notifications for user awareness + // Notify users when steps are taking longer than expected + await this.emitDelayNotificationIfNeeded( + goalRun.id, + inProgressItem, + timeSinceProgress, + ); + + // Wait for in-progress item + return { action: 'CONTINUE' }; + } + + if (pendingItem) { + // Durable retry gating: if the next step was reset to PENDING with a backoff window, + // do not re-dispatch until the gate expires (prevents tight-loop retries). + const now = new Date(); + const gateUntil = [pendingItem.infraRetryAfter, pendingItem.heartbeatRetryAfter] + .filter((d): d is Date => Boolean(d)) + .reduce((max, d) => (!max || d > max ? d : max), null); + + if (gateUntil && gateUntil > now) { + return { action: 'CONTINUE' }; + } + + return { action: 'EXECUTE', itemId: pendingItem.id }; + } + + // No pending items - check for failed items + const failedItem = items.find( + (item: any) => item.status === ChecklistItemStatus.FAILED, + ); + if (failedItem) { + // v1.1.1: Check if this is an infrastructure failure (should retry, not replan) + const isInfraFailure = this.isInfrastructureFailure(failedItem); + + if (isInfraFailure) { + // Infrastructure failure - retry the step instead of replanning + const infraRetryCount = failedItem.infraRetryCount || 0; + + if (infraRetryCount >= MAX_INFRA_RETRIES) { + this.logger.warn( + `Infrastructure retries exhausted (${MAX_INFRA_RETRIES}) for step ${failedItem.id}, ` + + `entering WAITING_PROVIDER` + ); + + return { + action: 'WAIT_PROVIDER', + reason: + `Infrastructure retries exhausted (${MAX_INFRA_RETRIES}). ` + + `Waiting for provider/model capacity to recover.`, + itemId: failedItem.id, + }; + } else { + // Calculate exponential backoff delay for next retry + const retryDelay = INFRA_RETRY_BASE_DELAY_MS * Math.pow(2, infraRetryCount); + + return { + action: 'RETRY', + reason: `Infrastructure failure (retry ${infraRetryCount + 1}/${MAX_INFRA_RETRIES}): ${failedItem.actualOutcome}`, + itemId: failedItem.id, + retryCount: infraRetryCount + 1, + retryDelayMs: retryDelay, + retryCategory: 'INFRA', + }; + } + } + + // Semantic failure - proceed to replan. + // Durable: replans = currentPlanVersion - 1 (initial plan is version 1). + const currentPlanVersion = goalRun.currentPlanVersion || currentPlan?.version || 0; + const replanCount = Math.max(0, currentPlanVersion - 1); + if (replanCount >= MAX_REPLAN_ATTEMPTS) { + return { + action: 'FAIL', + reason: `Max replan attempts (${MAX_REPLAN_ATTEMPTS}) exceeded`, + }; + } + return { + action: 'REPLAN', + reason: `Step failed: ${failedItem.description}`, + itemId: failedItem.id, + }; + } + + // All items completed - verify + const allCompleted = items.every( + (item: any) => + item.status === ChecklistItemStatus.COMPLETED || + item.status === ChecklistItemStatus.SKIPPED, + ); + if (allCompleted) { + return { action: 'VERIFY' }; + } + + return { action: 'CONTINUE' }; + + case GoalRunPhase.VERIFYING: + // Verification phase - check if goal is achieved + return { action: 'COMPLETE' }; + + case GoalRunPhase.WAITING_APPROVAL: + // Wait for approval - will be handled by steering + return { action: 'CONTINUE' }; + + case GoalRunPhase.REPLANNING: + // Replan in progress + return { action: 'CONTINUE' }; + + default: + return { action: 'CONTINUE' }; + } + } + + /** + * Execute the decided action + */ + private async executeDecision(goalRun: any, decision: LoopDecision): Promise { + this.logger.debug(`Executing decision ${decision.action} for goal run ${goalRun.id}`); + + switch (decision.action) { + case 'PLAN': + await this.executePlanningPhase(goalRun); + break; + + case 'EXECUTE': + if (decision.itemId) { + await this.executeStep(goalRun, decision.itemId); + } + break; + + case 'VERIFY': + await this.executeVerificationPhase(goalRun); + break; + + case 'REPLAN': + // v1.9.0 Phase D: Clear delay notification for the failed item + if (decision.itemId) { + this.clearDelayNotification(decision.itemId); + } + await this.executeReplanningPhase(goalRun, decision.reason!, decision.itemId); + break; + + case 'RETRY': + // v2.1.0: Retry step for transient failures (doesn't consume replan budget) + // Handles both infrastructure failures (v1.1.1) and heartbeat timeouts (v2.1.0) + // Uses DB-backed retry gates on checklist items (restart-safe, no tight-loop retries). + if (decision.itemId) { + await this.executeTransientRetry( + goalRun, + decision.itemId, + decision.retryCategory || 'INFRA', + decision.retryCount || 1, + decision.retryDelayMs || INFRA_RETRY_BASE_DELAY_MS, + decision.reason || 'Transient failure', + ); + } + break; + + case 'WAIT_PROVIDER': + // v6.0.0: Stable pause for provider/model capacity recovery (prevents retry storms). + if (decision.itemId) { + await this.enterWaitingProvider( + goalRun, + decision.itemId, + decision.reason || 'Waiting for provider/model capacity recovery', + ); + } + break; + + case 'COMPLETE': + // v1.9.0 Phase D: Clear all delay notifications on completion + this.delayNotifications.clear(); + await this.goalRunService.completeGoalRun(goalRun.id); + break; + + case 'FAIL': + // v1.9.0 Phase D: Clear all delay notifications on failure + this.delayNotifications.clear(); + await this.goalRunService.failGoalRun(goalRun.id, decision.reason || 'Unknown failure'); + break; + + case 'CONTINUE': + case 'PAUSE': + // No action needed + break; + } + } + + /** + * Execute the planning phase + * v1.0.1: Added atomic phase transition with optimistic locking to prevent race conditions + */ + private async executePlanningPhase(goalRun: any): Promise { + this.logger.log(`Executing planning phase for goal run ${goalRun.id}`); + + // Stark Fix (Gold Standard): Goal Intake gate BEFORE planning. + // If GoalSpec is INCOMPLETE, do not enter PLANNING; create/ensure GOAL_INTAKE prompt and WAIT. + const intakeGate = await this.goalIntakeService.ensureGoalSpecReadyForPlanning({ + goalRunId: goalRun.id, + tenantId: goalRun.tenantId, + }); + if (!intakeGate.ready) { + return; + } + + // Atomic phase transition with optimistic locking + // Only proceed if we can atomically move from INITIALIZING to PLANNING + // This prevents race condition where multiple loop iterations try to plan concurrently + const updated = await this.prisma.goalRun.updateMany({ + where: { + id: goalRun.id, + phase: GoalRunPhase.INITIALIZING, // Only if still INITIALIZING + }, + data: { + phase: GoalRunPhase.PLANNING, + }, + }); + + if (updated.count === 0) { + // Another iteration already started planning, or phase changed + this.logger.debug( + `Planning already started for ${goalRun.id} (phase not INITIALIZING), skipping`, + ); + return; + } + + this.logger.log(`Acquired planning lock for goal run ${goalRun.id}`); + + try { + await this.plannerService.generateInitialPlan(goalRun.id); + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.EXECUTING); + } catch (error: any) { + if (error instanceof PlannerFirstStepUserInputError) { + await this.goalIntakeService.requestGoalIntakeFromPlannerError({ + goalRunId: goalRun.id, + tenantId: goalRun.tenantId, + error, + }); + return; + } + + this.logger.error(`Planning failed: ${error.message}`); + await this.goalRunService.failGoalRun(goalRun.id, `Planning failed: ${error.message}`); + } + } + + /** + * Execute a single step + */ + private async executeStep(goalRun: any, itemId: string): Promise { + this.logger.log(`Executing step ${itemId} for goal run ${goalRun.id}`); + + const item = await this.prisma.checklistItem.findUnique({ + where: { id: itemId }, + }); + + if (!item) return; + + // Durable retry gating: if this step was reset to PENDING with a backoff window, + // do not re-dispatch until the gate expires (defense-in-depth; makeDecision also gates). + const gateUntil = [item.infraRetryAfter, item.heartbeatRetryAfter] + .filter((d): d is Date => Boolean(d)) + .reduce((max, d) => (!max || d > max ? d : max), null); + + if (gateUntil && gateUntil > new Date()) { + return; + } + + // Stark Fix (Atom 2): USER_INPUT_REQUIRED steps are a user-interaction surface, never dispatched. + // Prefer explicit step.type; fallback only on explicit machine flags (no NL matching). + const isUserInputRequired = + item.type === StepType.USER_INPUT_REQUIRED || + hasUserInteractionTool(item.suggestedTools); + + if (isUserInputRequired) { + const isDesktopPrompt = + item.executionSurface === ExecutionSurface.DESKTOP || item.requiresDesktop; + + // Gold invariant (I1): strategy must never block progress. + // In a no-chat product, TEXT_CLARIFICATION prompts are reserved for true external requirements only. + // If GoalSpec is COMPLETE, treat USER_INPUT_REQUIRED text steps as "strategy" and auto-resolve by policy. + if (!isDesktopPrompt) { + const goalSpec = await this.prisma.goalSpec.findUnique({ + where: { goalRunId: goalRun.id }, + select: { status: true }, + }); + + // If intake is incomplete (or missing), request GOAL_INTAKE rather than creating ad-hoc prompts. + if (!goalSpec || goalSpec.status !== GoalSpecStatus.COMPLETE) { + await this.goalIntakeService.ensureGoalSpecReadyForPlanning({ + goalRunId: goalRun.id, + tenantId: goalRun.tenantId, + }); + return; + } + + const now = new Date(); + const decision = { + policy: 'NO_STRATEGY_PROMPTS', + outcome: 'AUTO_RESOLVED_STRATEGY', + message: + 'Proceed with system defaults and continue. Do not ask the user for strategy choices.', + step: { + checklistItemId: item.id, + description: item.description, + }, + }; + + // If this step was already blocked by a prompt, cancel that prompt so the run can proceed. + if (item.blockedByPromptId) { + await this.prisma.userPrompt.updateMany({ + where: { + id: item.blockedByPromptId, + status: UserPromptStatus.OPEN, + }, + data: { + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.SUPERSEDED, + cancelledAt: now, + }, + }); + } + + const completed = await this.prisma.checklistItem.updateMany({ + where: { + id: item.id, + status: { + in: [ + ChecklistItemStatus.PENDING, + ChecklistItemStatus.IN_PROGRESS, + ChecklistItemStatus.BLOCKED, + ], + }, + }, + data: { + status: ChecklistItemStatus.COMPLETED, + blockedByPromptId: null, + blockedReason: null, + blockedAt: null, + startedAt: item.startedAt ?? now, + completedAt: now, + actualOutcome: JSON.stringify(decision, null, 2), + }, + }); + + // If we were waiting due to this prompt step, resume the run back to EXECUTING. + const phaseUpdated = await this.prisma.goalRun.updateMany({ + where: { + id: goalRun.id, + phase: GoalRunPhase.WAITING_USER_INPUT, + }, + data: { + phase: GoalRunPhase.EXECUTING, + waitReason: null, + waitDetail: Prisma.DbNull, + waitStartedAt: null, + waitUntil: null, + }, + }); + + if (completed.count > 0 || phaseUpdated.count > 0) { + if (phaseUpdated.count > 0) { + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId: goalRun.id, + previousPhase: goalRun.phase, + newPhase: GoalRunPhase.EXECUTING, + }); + } + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'STEP_AUTO_RESOLVED', + title: `Auto-resolved strategy step: ${item.description}`, + severity: 'info', + checklistItemId: item.id, + details: decision, + }); + } + + return; + } + + const kind = UserPromptKind.DESKTOP_TAKEOVER; + + const prompt = await this.userPromptService.ensureOpenPromptForStep({ + tenantId: goalRun.tenantId, + goalRunId: goalRun.id, + checklistItemId: item.id, + kind, + payload: { + goalRunId: goalRun.id, + checklistItemId: item.id, + step: { + description: item.description, + expectedOutcome: item.expectedOutcome, + suggestedTools: item.suggestedTools, + }, + }, + }); + + const blockedAt = new Date(); + const stepBlocked = await this.prisma.checklistItem.updateMany({ + where: { + id: item.id, + status: { + in: [ChecklistItemStatus.PENDING, ChecklistItemStatus.IN_PROGRESS], + }, + }, + data: { + status: ChecklistItemStatus.BLOCKED, + blockedByPromptId: prompt.id, + blockedReason: 'INPUT_REQUIRED', + blockedAt, + startedAt: item.startedAt ?? new Date(), + completedAt: null, + actualOutcome: JSON.stringify( + { + blockedReason: 'WAITING_USER_INPUT', + promptId: prompt.id, + promptKind: prompt.kind, + }, + null, + 2, + ), + }, + }); + + const phaseUpdated = await this.prisma.goalRun.updateMany({ + where: { + id: goalRun.id, + phase: { + in: [GoalRunPhase.EXECUTING, GoalRunPhase.CONTROLLING_DESKTOP], + }, + }, + data: { + phase: GoalRunPhase.WAITING_USER_INPUT, + waitReason: GoalRunWaitReason.USER_INPUT, + waitStartedAt: blockedAt, + waitUntil: null, + waitDetail: { + kind: 'USER_PROMPT', + promptId: prompt.id, + promptKind: prompt.kind, + checklistItemId: item.id, + } as any, + }, + }); + + // Emit one activity event on the first transition only (prevents 5s loop spam). + if (stepBlocked.count > 0 || phaseUpdated.count > 0) { + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId: goalRun.id, + previousPhase: goalRun.phase, + newPhase: GoalRunPhase.WAITING_USER_INPUT, + }); + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'USER_PROMPT_CREATED', + title: `Waiting for user input: ${item.description}`, + severity: 'warning', + checklistItemId: item.id, + details: { + promptId: prompt.id, + promptKind: prompt.kind, + dedupeKey: prompt.dedupeKey, + }, + }); + } + + await this.outboxService.enqueueOnce({ + dedupeKey: prompt.dedupeKey, + aggregateId: goalRun.id, + eventType: 'user_prompt.created', + payload: { + promptId: prompt.id, + goalRunId: goalRun.id, + tenantId: goalRun.tenantId, + checklistItemId: item.id, + kind: prompt.kind, + stepDescription: item.description, + }, + }); + + return; + } + + try { + // v1.1.0: Create workflow if not exists (with link-first pattern) + if (!goalRun.workflowRunId) { + const workflowRunId = await this.createWorkflowForGoalRun(goalRun); + goalRun.workflowRunId = workflowRunId; + } + + // v1.1.0: Check workspace readiness before executing (capacity backpressure) + const workspaceStatus = await this.ensureWorkspaceReadyForStep(goalRun); + + if (!workspaceStatus.ready) { + if (workspaceStatus.waitingForCapacity) { + // Capacity issue - do not dispatch, do not replan, and do not churn DB state. + // Run enters a stable WAITING_CAPACITY phase, while workspace.nextAttemptAt gates retry timing. + + const retryAfterMs = workspaceStatus.retryAfterMs || 30000; + const waitUntil = workspaceStatus.waitUntil ?? new Date(Date.now() + retryAfterMs); + + // v2.2.2: Do not update the goal_run row every loop tick. + // Only write when: + // - we are entering WAITING_CAPACITY, OR + // - we are extending waitUntil (new backoff window). + const currentWaitUntil: Date | null = (goalRun as any).waitUntil ?? null; + const shouldUpdateWaitUntil = + goalRun.phase !== GoalRunPhase.WAITING_CAPACITY || + !currentWaitUntil || + waitUntil.getTime() > currentWaitUntil.getTime() + 1000; + + if (shouldUpdateWaitUntil) { + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.WAITING_CAPACITY, { + waitDetail: { + kind: 'WORKSPACE_CAPACITY', + checklistItemId: item.id, + retryAfterMs, + } as any, + waitUntil, + }); + } + + // v1.5.0: Activity emission throttling - check last activity event for this step + const recentActivity = await this.prisma.activityEvent.findFirst({ + where: { + goalRunId: goalRun.id, + checklistItemId: itemId, + eventType: 'STEP_WAITING_CAPACITY', + createdAt: { gte: new Date(Date.now() - 60000) }, // Last 60 seconds + }, + orderBy: { createdAt: 'desc' }, + }); + + if (!recentActivity) { + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'STEP_WAITING_CAPACITY', + title: `Waiting for capacity: ${item.description}`, + description: `Desktop workspace not available, retrying in ${Math.ceil((workspaceStatus.retryAfterMs || 30000) / 1000)}s`, + severity: 'warning', + checklistItemId: itemId, + }); + } + + return; // Exit without failing - will retry on next loop + } + + // Non-capacity failure - workspace provisioning truly failed + throw new Error(workspaceStatus.error || 'Workspace not ready'); + } + + // Workspace is ready - execute the step + await this.goalRunService.updatePhase( + goalRun.id, + item.requiresDesktop ? GoalRunPhase.CONTROLLING_DESKTOP : GoalRunPhase.EXECUTING, + ); + + // Mark item as in progress only after workspace is ready (prevents churn while waiting for capacity). + await this.prisma.checklistItem.update({ + where: { id: itemId }, + data: { + status: ChecklistItemStatus.IN_PROGRESS, + startedAt: new Date(), + }, + }); + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'STEP_STARTED', + title: `Starting: ${item.description}`, + checklistItemId: itemId, + }); + + // Workspace is ready - execute the step + await this.executeStepViaWorkflow(goalRun, item); + } catch (error: any) { + this.logger.error(`Step execution failed: ${error.message}`); + + await this.prisma.checklistItem.update({ + where: { id: itemId }, + data: { + status: ChecklistItemStatus.FAILED, + actualOutcome: error.message, + completedAt: new Date(), + }, + }); + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'STEP_FAILED', + title: `Failed: ${item.description}`, + description: error.message, + severity: 'error', + checklistItemId: itemId, + }); + } + } + + /** + * Execute step via workflow system + */ + private async executeStepViaWorkflow(goalRun: any, item: any): Promise { + // Create workflow node for this checklist item + const nodeId = `node-${createId()}`; + + await this.prisma.workflowNode.create({ + data: { + id: nodeId, + workflowRunId: goalRun.workflowRunId, + name: item.description, + type: 'TASK', + config: { + description: item.description, + expectedOutcome: item.expectedOutcome, + suggestedTools: item.suggestedTools, + }, + order: item.order, + status: NodeStatus.READY, + allowedTools: item.suggestedTools || [], + }, + }); + + // Link checklist item to workflow node + await this.prisma.checklistItem.update({ + where: { id: item.id }, + data: { workflowNodeId: nodeId }, + }); + + // Emit event for node execution (handled by existing workflow executor) + this.eventEmitter.emit('workflow.node-ready', { + workflowRunId: goalRun.workflowRunId, + nodeId, + goalRunId: goalRun.id, + checklistItemId: item.id, + }); + } + + /** + * Create workflow run for goal run + * v1.1.0: CRITICAL FIX - Link first, provision second pattern + * + * This prevents the runaway loop bug where failed provisioning caused + * new workflows to be created on each loop iteration. + * + * Sequence (MUST be in this order): + * 1. Create workflow record (DB only, no provisioning) + * 2. Link to goalRun (so next iteration won't create another workflow) + * 3. Attempt workspace provisioning (may fail, that's OK) + * 4. Start workflow if provisioning succeeded + * + * If provisioning fails: + * - Workflow record exists and is linked + * - Next iteration will retry provisioning, NOT create new workflow + * - Capacity issues use exponential backoff, not replanning + */ + private async createWorkflowForGoalRun(goalRun: any): Promise { + // STEP 1: Create workflow RECORD only (no provisioning yet) + const workflowResult = await this.workflowService.createWorkflowRecord({ + tenantId: goalRun.tenantId, + name: `Goal: ${goalRun.goal.substring(0, 50)}`, + description: goalRun.goal, + nodes: [], // Nodes will be added as checklist items are executed + persistence: { + enabled: true, + }, + }); + + this.logger.log( + `Created workflow record ${workflowResult.id} for goal ${goalRun.id} ` + + `(linking before provisioning - prevents runaway loop)` + ); + + // STEP 2: LINK FIRST - This is the critical fix! + // Even if provisioning fails, the workflow is linked. + // Next loop iteration will see workflowRunId and NOT create another workflow. + await this.goalRunService.linkWorkflowRun(goalRun.id, workflowResult.id); + + this.logger.log(`Linked workflow ${workflowResult.id} to goal ${goalRun.id}`); + + // STEP 3: Attempt workspace provisioning (may fail, that's OK) + const provisionResult = await this.workflowService.ensureWorkspaceProvisioned( + workflowResult.id, + goalRun.tenantId, + { enabled: true }, + ); + + if (provisionResult.success) { + // STEP 4: Start the workflow only if provisioning succeeded + await this.workflowService.startWorkflow(workflowResult.id); + this.logger.log(`Workflow ${workflowResult.id} started successfully`); + } else if (provisionResult.status === WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY) { + // Capacity issue - log it, but don't fail or replan + this.logger.warn( + `Workflow ${workflowResult.id} waiting for capacity ` + + `(retry in ${Math.ceil((provisionResult.retryAfterMs || 30000) / 1000)}s)` + ); + + const retryAfterMs = provisionResult.retryAfterMs || 30000; + const waitUntil = + workflowResult.workspaceId + ? ( + await this.prisma.workspace.findUnique({ + where: { id: workflowResult.workspaceId }, + select: { nextAttemptAt: true }, + }) + )?.nextAttemptAt ?? new Date(Date.now() + retryAfterMs) + : new Date(Date.now() + retryAfterMs); + + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.WAITING_CAPACITY, { + waitDetail: { + kind: 'WORKSPACE_CAPACITY', + workflowRunId: workflowResult.id, + workspaceId: workflowResult.workspaceId, + retryAfterMs, + } as any, + waitUntil, + }); + + // Create activity event for visibility + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'WAITING_FOR_CAPACITY', + title: 'Waiting for desktop capacity', + description: `Workspace provisioning will retry in ${Math.ceil((provisionResult.retryAfterMs || 30000) / 1000)} seconds`, + severity: 'warning', + }); + } else { + // Provisioning failed (not capacity) - this is a real error + this.logger.error(`Workflow ${workflowResult.id} provisioning failed: ${provisionResult.error}`); + // Don't throw - the workflow is linked, executeStep will handle the failed state + } + + return workflowResult.id; + } + + /** + * v1.1.0: Ensure workspace is ready before executing steps + * v1.5.0: Added DB-driven retry gating to prevent tight loop bug + * + * Called at the start of step execution to: + * 1. Check if workspace is provisioned (READY) + * 2. If WAITING_FOR_CAPACITY with nextAttemptAt in future, skip without calling API + * 3. If FAILED, return error + */ + private async ensureWorkspaceReadyForStep(goalRun: any): Promise<{ + ready: boolean; + waitingForCapacity: boolean; + retryAfterMs?: number; + waitUntil?: Date; + error?: string; + }> { + if (!goalRun.workflowRunId) { + return { ready: false, waitingForCapacity: false, error: 'No workflow linked' }; + } + + // v1.5.0: Check workspace nextAttemptAt BEFORE calling ensureWorkspaceProvisioned + // This prevents the tight loop bug where we call the API every second + const workflow = await this.prisma.workflowRun.findUnique({ + where: { id: goalRun.workflowRunId }, + include: { workspace: true }, + }); + + if (workflow?.workspace) { + const workspace = workflow.workspace; + const nextAttemptAt = (workspace as any).nextAttemptAt; + const lastActivityEmittedAt = (workspace as any).lastActivityEmittedAt; + + // If workspace is waiting for capacity and nextAttemptAt is in the future, skip + if ( + workspace.status === WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY && + nextAttemptAt && + new Date(nextAttemptAt) > new Date() + ) { + const retryAfterMs = new Date(nextAttemptAt).getTime() - Date.now(); + + // v1.5.0: Activity emission throttling - only log every 60 seconds + const shouldEmitActivity = !lastActivityEmittedAt || + (Date.now() - new Date(lastActivityEmittedAt).getTime() > 60000); + + if (shouldEmitActivity) { + this.logger.debug( + `Workspace ${workspace.id} waiting for capacity, skipping until ${new Date(nextAttemptAt).toISOString()}` + ); + + // Update lastActivityEmittedAt to throttle future logs + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { lastActivityEmittedAt: new Date() }, + }); + } + + return { + ready: false, + waitingForCapacity: true, + retryAfterMs, + waitUntil: new Date(nextAttemptAt), + }; + } + + // If workspace is already READY, return success without calling API + if (workspace.status === WorkspaceProvisioningStatus.READY) { + return { ready: true, waitingForCapacity: false }; + } + } + + // Call ensureWorkspaceProvisioned only if workspace is not in a known state + const provisionResult = await this.workflowService.ensureWorkspaceProvisioned( + goalRun.workflowRunId, + goalRun.tenantId, + { enabled: true }, + ); + + if (provisionResult.success) { + return { ready: true, waitingForCapacity: false }; + } + + if (provisionResult.status === WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY) { + let waitUntil: Date | undefined; + try { + if (workflow?.workspace?.id) { + const refreshed = await this.prisma.workspace.findUnique({ + where: { id: workflow.workspace.id }, + select: { nextAttemptAt: true }, + }); + if (refreshed?.nextAttemptAt) { + waitUntil = refreshed.nextAttemptAt; + } + } + } catch (error: any) { + // best-effort: waitUntil can be derived from retryAfterMs below + } + + return { + ready: false, + waitingForCapacity: true, + retryAfterMs: provisionResult.retryAfterMs, + waitUntil: waitUntil ?? new Date(Date.now() + (provisionResult.retryAfterMs || 30000)), + }; + } + + return { + ready: false, + waitingForCapacity: false, + error: provisionResult.error || 'Workspace provisioning failed', + }; + } + + /** + * Execute the verification phase + */ + private async executeVerificationPhase(goalRun: any): Promise { + this.logger.log(`Executing verification phase for goal run ${goalRun.id}`); + + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.VERIFYING); + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'VERIFICATION_STARTED', + title: 'Verifying goal completion', + }); + + // For now, simple verification: all steps completed = goal completed + // Future: LLM-based verification + const currentPlan = goalRun.planVersions[0]; + const items = currentPlan?.checklistItems || []; + + const allCompleted = items.every( + (item: any) => + item.status === ChecklistItemStatus.COMPLETED || + item.status === ChecklistItemStatus.SKIPPED, + ); + + if (allCompleted) { + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'VERIFICATION_PASSED', + title: 'Goal verification passed', + }); + await this.goalRunService.completeGoalRun(goalRun.id); + + // v1.5.0: Complete workflow to trigger workspace hibernation + // This was missing, causing workspaces to never hibernate + if (goalRun.workflowRunId) { + try { + await this.workflowService.completeWorkflow(goalRun.workflowRunId, WorkflowStatus.COMPLETED); + this.logger.log(`Workflow ${goalRun.workflowRunId} completed and workspace hibernated`); + } catch (error: any) { + // Log but don't fail - orphan GC will clean up + this.logger.warn(`Failed to complete workflow ${goalRun.workflowRunId}: ${error.message}`); + } + } + } else { + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'VERIFICATION_FAILED', + title: 'Goal verification failed', + description: 'Not all steps completed successfully', + severity: 'warning', + }); + // Trigger replan + await this.executeReplanningPhase(goalRun, 'Verification failed - not all steps completed'); + } + } + + /** + * Execute the replanning phase + */ + private async executeReplanningPhase( + goalRun: any, + reason: string, + failedItemId?: string, + ): Promise { + this.logger.log(`Executing replanning phase for goal run ${goalRun.id}: ${reason}`); + + // Durable budget: replans = currentPlanVersion - 1 (initial plan is version 1). + const currentPlanVersion = goalRun.currentPlanVersion || 0; + const currentReplanCount = Math.max(0, currentPlanVersion - 1); + const nextReplanAttempt = currentReplanCount + 1; + + if (nextReplanAttempt > MAX_REPLAN_ATTEMPTS) { + await this.goalRunService.failGoalRun( + goalRun.id, + `Max replan attempts (${MAX_REPLAN_ATTEMPTS}) exceeded. Last reason: ${reason}`, + ); + + // v1.5.0: Cancel workflow to trigger workspace hibernation + if (goalRun.workflowRunId) { + try { + await this.workflowService.cancelWorkflow(goalRun.workflowRunId, 'Goal run failed'); + this.logger.log(`Workflow ${goalRun.workflowRunId} cancelled and workspace hibernated`); + } catch (error: any) { + this.logger.warn(`Failed to cancel workflow ${goalRun.workflowRunId}: ${error.message}`); + } + } + return; + } + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'REPLAN_STARTED', + title: 'Generating new plan', + description: reason, + }); + + try { + await this.plannerService.generateReplan(goalRun.id, reason, { + failedItemId, + }); + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.EXECUTING); + } catch (error: any) { + if (error instanceof PlannerFirstStepUserInputError) { + await this.goalIntakeService.requestGoalIntakeFromPlannerError({ + goalRunId: goalRun.id, + tenantId: goalRun.tenantId, + error, + }); + return; + } + + this.logger.error(`Replanning failed: ${error.message}`); + await this.goalRunService.failGoalRun(goalRun.id, `Replanning failed: ${error.message}`); + } + } + + /** + * v1.1.1: Check if a failed checklist item is an infrastructure failure + * + * Infrastructure failures are marked with [INFRA] prefix in actualOutcome + * by the task-dispatch.service.ts markAsInfrastructureFailure method. + * + * Infrastructure failures should be retried, not replanned, because: + * - They're transient (network, 404, timeout, capacity) + * - The step logic is correct, just the execution environment failed + * - Replanning wastes the replan budget on non-semantic issues + */ + private isInfrastructureFailure(item: any): boolean { + if (!item.actualOutcome) return false; + + const outcome = String(item.actualOutcome); + + // Check for [INFRA] prefix (set by task-dispatch.service.ts) + if (outcome.startsWith('[INFRA]') || outcome.includes('[INFRA]')) { + return true; + } + + // Also check for common infrastructure error patterns + // These may come from other sources (workflow service, direct errors) + const infraPatterns = [ + 'INFRA_LOOKUP_FAILED', + 'Task not found', + '404', + '503', + 'ECONNREFUSED', + 'ETIMEDOUT', + 'ENOTFOUND', + 'socket hang up', + 'network error', + 'Agent unreachable', + 'Workspace not ready', + 'capacity', + ]; + + const lowerOutcome = outcome.toLowerCase(); + return infraPatterns.some(pattern => + lowerOutcome.includes(pattern.toLowerCase()) + ); + } + + /** + * v2.1.0: Execute transient retry for a failed/timed-out step + * + * This resets the step to PENDING so it will be re-executed on the next loop. + * Does NOT consume replan budget - transient failures are recoverable. + * + * Handles both: + * - Infrastructure failures (v1.1.1): 404, network errors, capacity issues + * - Heartbeat timeouts (v2.1.0): Agent not responding, connection gaps + * + * Uses exponential backoff to avoid hammering the infrastructure. + * Retry budgets are managed by FailureClassificationService. + */ + private async executeTransientRetry( + goalRun: any, + itemId: string, + retryCategory: 'INFRA' | 'HEARTBEAT', + retryCount: number, + retryDelayMs: number, + reason: string, + ): Promise { + const isHeartbeatRetry = retryCategory === 'HEARTBEAT'; + const retryType = isHeartbeatRetry ? 'Heartbeat' : 'Infrastructure'; + + this.logger.log( + `${retryType} retry ${retryCount} for step ${itemId} ` + + `(delay: ${Math.round(retryDelayMs / 1000)}s): ${reason}`, + ); + + const retryAfter = new Date(Date.now() + retryDelayMs); + + // Get item details for activity event + const item = await this.prisma.checklistItem.findUnique({ + where: { id: itemId }, + }); + + // Get error history for diagnostics (Manus-style error preservation) + const errorHistory = this.failureClassificationService.getErrorHistory(itemId); + const diagnosticSummary = errorHistory.length > 1 + ? ` Previous attempts: ${errorHistory.length - 1}.` + : ''; + + // Reset step to PENDING for retry + // v2.1.0: Preserve error context in actualOutcome for debugging + // This implements Manus-style "error as learning" pattern + const retryContext = JSON.stringify({ + lastRetryAt: new Date().toISOString(), + retryCount, + retryType, + reason, + errorHistory: errorHistory.slice(-3), // Keep last 3 attempts + }); + + await this.prisma.checklistItem.update({ + where: { id: itemId }, + data: { + status: ChecklistItemStatus.PENDING, + startedAt: null, + completedAt: null, + // Durable retry gating (prevents tight-loop retries even after restart) + infraRetryCount: isHeartbeatRetry ? undefined : retryCount, + infraRetryAfter: isHeartbeatRetry ? null : retryAfter, + heartbeatRetryCount: isHeartbeatRetry ? retryCount : undefined, + heartbeatRetryAfter: isHeartbeatRetry ? retryAfter : null, + // Store retry context in actualOutcome (will be overwritten on next attempt) + actualOutcome: `[RETRY:${retryCategory}:${retryCount}] ${reason} | Context: ${retryContext}`, + }, + }); + + // Create activity event for visibility + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: isHeartbeatRetry ? 'STEP_HEARTBEAT_RETRY' : 'STEP_INFRA_RETRY', + title: `Retrying: ${item?.description || 'Unknown step'}`, + description: `${retryType} retry ${retryCount}. ` + + `Next attempt in ${Math.round(retryDelayMs / 1000)} seconds. ` + + `Reason: ${reason}.${diagnosticSummary}`, + severity: 'warning', + checklistItemId: itemId, + }); + + this.logger.log( + `Step ${itemId} reset to PENDING for ${retryType.toLowerCase()} retry ` + + `(will retry in ${Math.round(retryDelayMs / 1000)}s)`, + ); + } + + /** + * v6.0.0: Enter a stable WAITING_PROVIDER state after exhausting transient retries. + * + * This prevents semantic replans and retry storms when the root cause is + * provider/model/gateway capacity rather than step logic. + */ + private async enterWaitingProvider( + goalRun: any, + itemId: string, + reason: string, + ): Promise { + const item = await this.prisma.checklistItem.findUnique({ + where: { id: itemId }, + select: { + id: true, + description: true, + status: true, + startedAt: true, + actualOutcome: true, + }, + }); + + const stepBlocked = await this.prisma.checklistItem.updateMany({ + where: { + id: itemId, + status: { + in: [ChecklistItemStatus.FAILED, ChecklistItemStatus.IN_PROGRESS], + }, + }, + data: { + status: ChecklistItemStatus.BLOCKED, + infraRetryCount: 0, + infraRetryAfter: null, + heartbeatRetryCount: 0, + heartbeatRetryAfter: null, + startedAt: item?.startedAt ?? new Date(), + completedAt: null, + actualOutcome: JSON.stringify( + { + blockedReason: 'WAITING_PROVIDER', + reason, + previousOutcome: item?.actualOutcome ?? null, + }, + null, + 2, + ), + }, + }); + + const phaseUpdated = await this.prisma.goalRun.updateMany({ + where: { + id: goalRun.id, + phase: { + in: [GoalRunPhase.EXECUTING, GoalRunPhase.CONTROLLING_DESKTOP], + }, + }, + data: { + phase: GoalRunPhase.WAITING_PROVIDER, + waitReason: GoalRunWaitReason.PROVIDER, + waitStartedAt: new Date(), + waitUntil: null, + waitDetail: { + kind: 'WAITING_PROVIDER', + checklistItemId: itemId, + reason, + } as any, + }, + }); + + if (stepBlocked.count > 0 || phaseUpdated.count > 0) { + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId: goalRun.id, + previousPhase: goalRun.phase, + newPhase: GoalRunPhase.WAITING_PROVIDER, + }); + + await this.goalRunService.createActivityEvent(goalRun.id, { + eventType: 'WAITING_PROVIDER', + title: 'Waiting for provider/model capacity', + description: reason, + severity: 'warning', + checklistItemId: itemId, + details: { + blockedReason: 'WAITING_PROVIDER', + lastKnownOutcome: item?.actualOutcome ?? null, + }, + }); + + this.logger.warn( + `Goal run ${goalRun.id} entered WAITING_PROVIDER (step ${itemId} blocked): ${reason}`, + ); + } + } + + /** + * Process steering message + */ + private async processSteering(goalRun: any, steering: any): Promise { + this.logger.log(`Processing steering message ${steering.id} for goal run ${goalRun.id}`); + + await this.goalRunService.acknowledgeSteering(steering.id); + + switch (steering.type) { + case 'INSTRUCTION': + // Handle user instruction - may trigger replan + if (steering.content) { + await this.executeReplanningPhase(goalRun, `User instruction: ${steering.content}`); + } + break; + + case 'MODIFY_PLAN': + // Handle plan modification + await this.executeReplanningPhase(goalRun, `User requested plan modification`); + break; + + case 'APPROVE': + // Resume from waiting approval + if (goalRun.phase === GoalRunPhase.WAITING_APPROVAL) { + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.EXECUTING); + } + break; + + case 'REJECT': + // Handle rejection + if (steering.targetItemId) { + await this.prisma.checklistItem.update({ + where: { id: steering.targetItemId }, + data: { status: ChecklistItemStatus.SKIPPED }, + }); + } + await this.goalRunService.updatePhase(goalRun.id, GoalRunPhase.EXECUTING); + break; + + // PAUSE, RESUME, CANCEL handled directly by GoalRunService.submitSteering + } + + await this.prisma.steeringMessage.update({ + where: { id: steering.id }, + data: { processedAt: new Date() }, + }); + } + + /** + * v1.9.0 Phase D: Emit delay notification if step is taking longer than expected + * + * Notifies users when steps exceed warning (1 min) or critical (3 min) thresholds. + * Uses rate limiting to avoid notification spam (max once per minute per item). + * + * This improves UX by: + * - Keeping users informed about long-running tasks + * - Distinguishing between "working but slow" vs "stuck" + * - Providing transparency into what's happening + */ + private async emitDelayNotificationIfNeeded( + goalRunId: string, + item: any, + timeSinceProgress: number, + ): Promise { + const itemId = item.id; + const now = Date.now(); + + // Determine severity level based on delay duration + let severity: 'warning' | 'critical' | null = null; + if (timeSinceProgress >= DELAY_CRITICAL_THRESHOLD_MS) { + severity = 'critical'; + } else if (timeSinceProgress >= DELAY_WARNING_THRESHOLD_MS) { + severity = 'warning'; + } + + // No delay yet - nothing to notify + if (!severity) { + return; + } + + // Check rate limiting - don't spam notifications + const lastNotification = this.delayNotifications.get(itemId); + if (lastNotification) { + const timeSinceLastNotification = now - lastNotification.lastNotifiedAt; + + // Don't notify if we notified recently + if (timeSinceLastNotification < DELAY_NOTIFICATION_INTERVAL_MS) { + return; + } + + // If severity is the same, skip (already notified at this level) + // Only re-notify if escalating from warning to critical + if (lastNotification.severity === severity) { + // Update timestamp but don't emit (periodic logging only) + this.delayNotifications.set(itemId, { lastNotifiedAt: now, severity }); + return; + } + } + + // Calculate human-readable duration + const minutes = Math.floor(timeSinceProgress / 60000); + const seconds = Math.floor((timeSinceProgress % 60000) / 1000); + const durationText = minutes > 0 ? `${minutes}m ${seconds}s` : `${seconds}s`; + + // Emit appropriate event based on severity + const eventType = severity === 'critical' ? 'STEP_DELAY_CRITICAL' : 'STEP_DELAY_WARNING'; + const title = severity === 'critical' + ? `Step taking longer than expected (${durationText})` + : `Step still in progress (${durationText})`; + const description = severity === 'critical' + ? `"${item.description}" has been running for ${durationText}. The agent is still working.` + : `"${item.description}" is taking longer than usual. The agent is still working.`; + + // Log for observability + this.logger.log( + `Step ${itemId} delay ${severity}: ${durationText} elapsed, emitting ${eventType}`, + ); + + // Emit activity event + await this.goalRunService.createActivityEvent(goalRunId, { + eventType, + title, + description, + severity, + checklistItemId: itemId, + details: { + durationMs: timeSinceProgress, + durationText, + threshold: severity === 'critical' + ? DELAY_CRITICAL_THRESHOLD_MS + : DELAY_WARNING_THRESHOLD_MS, + }, + }); + + // Update tracking + this.delayNotifications.set(itemId, { lastNotifiedAt: now, severity }); + } + + /** + * v1.9.0 Phase D: Clear delay notification tracking when step completes + * + * Called when a step transitions out of IN_PROGRESS state to clean up + * the delay notification tracking for that item. + */ + private clearDelayNotification(itemId: string): void { + if (this.delayNotifications.has(itemId)) { + this.delayNotifications.delete(itemId); + this.logger.debug(`Cleared delay notification tracking for step ${itemId}`); + } + } + + /** + * Resume active goal runs on startup + * v1.7.0: Wrapped in transient guard for DB resilience during startup + */ + private async resumeActiveGoalRuns(): Promise { + // v1.7.0: Wrap in transient guard - DB may not be ready immediately on startup + const runningGoalRuns = await this.dbTransientService.withTransientGuard( + async () => { + return await this.prisma.goalRun.findMany({ + where: { + status: GoalRunStatus.RUNNING, + phase: { + notIn: [GoalRunPhase.COMPLETED, GoalRunPhase.FAILED, GoalRunPhase.PAUSED], + }, + }, + }); + }, + 'OrchestratorLoop.resumeActiveGoalRuns', + { + skipIfInBackoff: false, // Always try on startup + onTransientError: (error, backoffMs) => { + this.logger.warn( + `DB not ready during startup, will retry goal run resume ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${error.message}`, + ); + }, + }, + ); + + if (!runningGoalRuns) { + // DB was unavailable - schedule a retry + this.logger.warn('Could not resume goal runs - DB unavailable, will retry on next recovery'); + return; + } + + this.logger.log(`Resuming ${runningGoalRuns.length} active goal runs`); + + for (const goalRun of runningGoalRuns) { + await this.startLoop(goalRun.id); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.transient-retry-gating.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.transient-retry-gating.spec.ts new file mode 100644 index 000000000..cb2ad4afd --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.transient-retry-gating.spec.ts @@ -0,0 +1,51 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { ChecklistItemStatus, GoalRunPhase } from '@prisma/client'; + +describe('OrchestratorLoopService transient retry gating', () => { + it('does not EXECUTE a PENDING step before its retry gate expires', async () => { + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + + const service = new OrchestratorLoopService( + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + { isInBackoff: () => false } as any, + { shouldSkipExecution: () => false } as any, + {} as any, + {} as any, + { emit: jest.fn() } as any, + configService, + {} as any, + {} as any, + ); + + const gateUntil = new Date(Date.now() + 60_000); + const goalRun: any = { + id: 'gr-1', + phase: GoalRunPhase.EXECUTING, + currentPlanVersion: 1, + planVersions: [ + { + checklistItems: [ + { + id: 'ci-1', + status: ChecklistItemStatus.PENDING, + description: 'Retry me later', + infraRetryAfter: gateUntil, + heartbeatRetryAfter: null, + }, + ], + }, + ], + }; + + const decision = await (service as any).makeDecision(goalRun); + expect(decision).toEqual({ action: 'CONTINUE' }); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.user-prompt-gate.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.user-prompt-gate.spec.ts new file mode 100644 index 000000000..4f6932f6d --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.user-prompt-gate.spec.ts @@ -0,0 +1,244 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { + ChecklistItemStatus, + ExecutionSurface, + GoalSpecStatus, + GoalRunPhase, + GoalRunWaitReason, + StepType, + UserPromptKind, +} from '@prisma/client'; + +describe('OrchestratorLoopService USER_INPUT_REQUIRED gate', () => { + it('creates DESKTOP_TAKEOVER prompt, blocks step, and enqueues outbox without dispatch', async () => { + const prisma = { + checklistItem: { + findUnique: jest.fn(), + updateMany: jest.fn(), + update: jest.fn(), + }, + goalSpec: { + findUnique: jest.fn(), + }, + goalRun: { + updateMany: jest.fn(), + }, + } as any; + + const goalRunService = { + createActivityEvent: jest.fn(), + } as any; + + const goalIntakeService = { + ensureGoalSpecReadyForPlanning: jest.fn(), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new OrchestratorLoopService( + prisma, + goalRunService, + {} as any, + goalIntakeService, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + eventEmitter, + configService, + userPromptService, + outboxService, + ); + + const item = { + id: 'ci-1', + description: 'Confirm details with the user', + expectedOutcome: 'Details confirmed', + suggestedTools: ['ASK_USER'], + requiresDesktop: true, + executionSurface: ExecutionSurface.DESKTOP, + type: StepType.USER_INPUT_REQUIRED, + startedAt: null, + blockedByPromptId: null, + }; + + prisma.checklistItem.findUnique.mockResolvedValueOnce(item); + + const prompt = { + id: 'p-1', + kind: UserPromptKind.DESKTOP_TAKEOVER, + dedupeKey: 'prompt:gr-1:ci-1:DESKTOP_TAKEOVER', + }; + + userPromptService.ensureOpenPromptForStep.mockResolvedValueOnce(prompt); + prisma.checklistItem.updateMany.mockResolvedValueOnce({ count: 1 }); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }); + outboxService.enqueueOnce.mockResolvedValueOnce({ id: 'o-1' }); + + await (service as any).executeStep({ id: 'gr-1', tenantId: 't-1', phase: GoalRunPhase.EXECUTING }, item.id); + + expect(prisma.checklistItem.update).not.toHaveBeenCalled(); // Never mark IN_PROGRESS + expect(userPromptService.ensureOpenPromptForStep).toHaveBeenCalledTimes(1); + expect(prisma.checklistItem.updateMany).toHaveBeenCalledWith({ + where: { + id: item.id, + status: { + in: [ChecklistItemStatus.PENDING, ChecklistItemStatus.IN_PROGRESS], + }, + }, + data: expect.objectContaining({ + status: ChecklistItemStatus.BLOCKED, + }), + }); + expect(prisma.goalRun.updateMany).toHaveBeenCalledWith({ + where: { + id: 'gr-1', + phase: { + in: [GoalRunPhase.EXECUTING, GoalRunPhase.CONTROLLING_DESKTOP], + }, + }, + data: expect.objectContaining({ + phase: GoalRunPhase.WAITING_USER_INPUT, + waitReason: GoalRunWaitReason.USER_INPUT, + }), + }); + expect(goalRunService.createActivityEvent).toHaveBeenCalledWith('gr-1', expect.objectContaining({ + eventType: 'USER_PROMPT_CREATED', + checklistItemId: item.id, + details: expect.objectContaining({ promptId: prompt.id }), + })); + expect(outboxService.enqueueOnce).toHaveBeenCalledWith({ + dedupeKey: prompt.dedupeKey, + aggregateId: 'gr-1', + eventType: 'user_prompt.created', + payload: { + promptId: prompt.id, + goalRunId: 'gr-1', + tenantId: 't-1', + checklistItemId: item.id, + kind: prompt.kind, + stepDescription: item.description, + }, + }); + + // Ensure we never dispatch agent work for prompt steps + expect(eventEmitter.emit).not.toHaveBeenCalledWith('workflow.node-ready', expect.anything()); + expect(goalIntakeService.ensureGoalSpecReadyForPlanning).not.toHaveBeenCalled(); + }); + + it('auto-resolves TEXT_ONLY USER_INPUT_REQUIRED when GoalSpec is COMPLETE', async () => { + const prisma = { + checklistItem: { + findUnique: jest.fn(), + updateMany: jest.fn(), + update: jest.fn(), + }, + goalSpec: { + findUnique: jest.fn(), + }, + userPrompt: { + updateMany: jest.fn(), + }, + goalRun: { + updateMany: jest.fn(), + }, + } as any; + + const goalRunService = { + createActivityEvent: jest.fn(), + } as any; + + const goalIntakeService = { + ensureGoalSpecReadyForPlanning: jest.fn(), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new OrchestratorLoopService( + prisma, + goalRunService, + {} as any, + goalIntakeService, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + eventEmitter, + configService, + userPromptService, + outboxService, + ); + + const item = { + id: 'ci-1', + description: 'Ask the user which flight site to use', + expectedOutcome: 'Site chosen', + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + executionSurface: ExecutionSurface.TEXT_ONLY, + type: StepType.USER_INPUT_REQUIRED, + startedAt: null, + blockedByPromptId: null, + }; + + prisma.checklistItem.findUnique.mockResolvedValueOnce(item); + prisma.goalSpec.findUnique.mockResolvedValueOnce({ status: GoalSpecStatus.COMPLETE }); + prisma.checklistItem.updateMany.mockResolvedValueOnce({ count: 1 }); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 0 }); + + await (service as any).executeStep({ id: 'gr-1', tenantId: 't-1', phase: GoalRunPhase.EXECUTING }, item.id); + + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + expect(outboxService.enqueueOnce).not.toHaveBeenCalled(); + expect(goalIntakeService.ensureGoalSpecReadyForPlanning).not.toHaveBeenCalled(); + + expect(prisma.checklistItem.updateMany).toHaveBeenCalledWith({ + where: { + id: item.id, + status: { + in: [ChecklistItemStatus.PENDING, ChecklistItemStatus.IN_PROGRESS, ChecklistItemStatus.BLOCKED], + }, + }, + data: expect.objectContaining({ + status: ChecklistItemStatus.COMPLETED, + }), + }); + + expect(goalRunService.createActivityEvent).toHaveBeenCalledWith('gr-1', expect.objectContaining({ + eventType: 'STEP_AUTO_RESOLVED', + checklistItemId: item.id, + })); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.waiting-capacity.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.waiting-capacity.spec.ts new file mode 100644 index 000000000..972f41c71 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.waiting-capacity.spec.ts @@ -0,0 +1,51 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { ChecklistItemStatus, GoalRunPhase } from '@prisma/client'; + +describe('OrchestratorLoopService WAITING_CAPACITY quiet gate', () => { + it('does not attempt to execute steps before waitUntil (quiet waiting)', async () => { + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + + const service = new OrchestratorLoopService( + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + { isInBackoff: () => false } as any, + { shouldSkipExecution: () => false } as any, + {} as any, + {} as any, + { emit: jest.fn() } as any, + configService, + {} as any, + {} as any, + ); + + const goalRun: any = { + id: 'gr-1', + phase: GoalRunPhase.WAITING_CAPACITY, + waitUntil: new Date(Date.now() + 60_000), + currentPlanVersion: 1, + planVersions: [ + { + checklistItems: [ + { + id: 'ci-1', + status: ChecklistItemStatus.PENDING, + description: 'Await capacity', + infraRetryAfter: null, + heartbeatRetryAfter: null, + }, + ], + }, + ], + }; + + const decision = await (service as any).makeDecision(goalRun); + expect(decision).toEqual({ action: 'CONTINUE' }); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.waiting-provider.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.waiting-provider.spec.ts new file mode 100644 index 000000000..2d4072bc4 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orchestrator-loop.waiting-provider.spec.ts @@ -0,0 +1,168 @@ +import { OrchestratorLoopService } from './orchestrator-loop.service'; +import { ChecklistItemStatus, GoalRunPhase, GoalRunWaitReason } from '@prisma/client'; + +describe('OrchestratorLoopService WAITING_PROVIDER gate', () => { + it('blocks step and transitions run to WAITING_PROVIDER', async () => { + const prisma = { + checklistItem: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + goalRun: { + updateMany: jest.fn(), + }, + } as any; + + const goalRunService = { + createActivityEvent: jest.fn(), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const service = new OrchestratorLoopService( + prisma, + goalRunService, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + eventEmitter, + configService, + {} as any, + {} as any, + ); + + prisma.checklistItem.findUnique.mockResolvedValueOnce({ + id: 'ci-1', + description: 'Desktop step', + status: ChecklistItemStatus.FAILED, + startedAt: null, + actualOutcome: '[INFRA] Gateway timeout', + }); + + prisma.checklistItem.updateMany.mockResolvedValueOnce({ count: 1 }); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }); + + await (service as any).enterWaitingProvider( + { id: 'gr-1', phase: GoalRunPhase.EXECUTING }, + 'ci-1', + 'Waiting for provider capacity', + ); + + expect(prisma.checklistItem.updateMany).toHaveBeenCalledWith({ + where: { + id: 'ci-1', + status: { in: [ChecklistItemStatus.FAILED, ChecklistItemStatus.IN_PROGRESS] }, + }, + data: expect.objectContaining({ + status: ChecklistItemStatus.BLOCKED, + completedAt: null, + }), + }); + + expect(prisma.goalRun.updateMany).toHaveBeenCalledWith({ + where: { + id: 'gr-1', + phase: { in: [GoalRunPhase.EXECUTING, GoalRunPhase.CONTROLLING_DESKTOP] }, + }, + data: expect.objectContaining({ + phase: GoalRunPhase.WAITING_PROVIDER, + waitReason: GoalRunWaitReason.PROVIDER, + }), + }); + + expect(goalRunService.createActivityEvent).toHaveBeenCalledWith( + 'gr-1', + expect.objectContaining({ + eventType: 'WAITING_PROVIDER', + checklistItemId: 'ci-1', + }), + ); + + expect(eventEmitter.emit).toHaveBeenCalledWith('goal-run.phase-changed', { + goalRunId: 'gr-1', + previousPhase: GoalRunPhase.EXECUTING, + newPhase: GoalRunPhase.WAITING_PROVIDER, + }); + }); + + it('is idempotent: repeated calls do not re-emit activity', async () => { + const prisma = { + checklistItem: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + goalRun: { + updateMany: jest.fn(), + }, + } as any; + + const goalRunService = { + createActivityEvent: jest.fn(), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const service = new OrchestratorLoopService( + prisma, + goalRunService, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + {} as any, + eventEmitter, + configService, + {} as any, + {} as any, + ); + + prisma.checklistItem.findUnique.mockResolvedValue({ + id: 'ci-1', + description: 'Desktop step', + status: ChecklistItemStatus.FAILED, + startedAt: null, + actualOutcome: '[INFRA] Gateway timeout', + }); + + prisma.checklistItem.updateMany + .mockResolvedValueOnce({ count: 1 }) + .mockResolvedValueOnce({ count: 0 }); + prisma.goalRun.updateMany + .mockResolvedValueOnce({ count: 1 }) + .mockResolvedValueOnce({ count: 0 }); + + await (service as any).enterWaitingProvider( + { id: 'gr-1', phase: GoalRunPhase.EXECUTING }, + 'ci-1', + 'Waiting for provider capacity', + ); + await (service as any).enterWaitingProvider( + { id: 'gr-1', phase: GoalRunPhase.EXECUTING }, + 'ci-1', + 'Waiting for provider capacity', + ); + + expect(goalRunService.createActivityEvent).toHaveBeenCalledTimes(1); + expect(eventEmitter.emit).toHaveBeenCalledTimes(1); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/orphan-pod-gc.service.ts b/packages/bytebot-workflow-orchestrator/src/services/orphan-pod-gc.service.ts new file mode 100644 index 000000000..d42582aca --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/orphan-pod-gc.service.ts @@ -0,0 +1,686 @@ +/** + * Orphan Pod Garbage Collection Service + * v1.2.0: DB transient error resilience for GC cycles + * v1.1.0: Added Phase 3 - Idle workspace hibernation (configurable idle threshold) + * v1.0.0: Detects and cleans up orphan workspace pods + * + * This service runs on a schedule to find and delete pods that should have + * been hibernated but weren't due to transient failures or are idle. + * + * Orphan pods can occur when: + * - Workflow completion/cancellation failed to hibernate the workspace + * - Network issues caused hibernation API calls to fail + * - Task controller was unavailable during hibernation + * - Workspace is idle with no active goal run (v1.1.0) + * + * The service: + * 1. Queries workspaces with HIBERNATION_FAILED status + * 2. Queries completed/failed/cancelled workflows older than grace period + * 3. Queries idle workspaces with no active goal run (v1.1.0) + * 4. Attempts to delete pods via task-controller + * 5. Updates workspace status on success + * 6. Logs metrics for monitoring + * + * v1.2.0: Wrapped in DbTransientService for DB restart resilience + * + * @see https://book.kubebuilder.io/reference/good-practices (garbage collection) + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { DbTransientService } from './db-transient.service'; +import { WorkspaceService } from './workspace.service'; + +export interface OrphanPodGCResult { + checked: number; + cleaned: number; + failed: number; + errors: string[]; +} + +@Injectable() +export class OrphanPodGCService implements OnModuleInit { + private readonly logger = new Logger(OrphanPodGCService.name); + private isRunning = false; + + // Grace period before considering a completed workflow's pod as orphaned + // Default: 5 minutes to allow for normal cleanup + private readonly gracePeriodMs: number; + + // Max retries for hibernation before giving up + private readonly maxRetries: number; + + // Whether GC is enabled + private readonly enabled: boolean; + + // v1.1.0: Idle threshold - hibernate workspaces with no active goal run + // after this many minutes of inactivity + // Default: 30 minutes - prevents all-day slot pinning without data loss + private readonly idleThresholdMs: number; + + // v1.1.0: Whether idle workspace hibernation is enabled + private readonly idleHibernationEnabled: boolean; + + constructor( + private readonly prisma: PrismaService, + private readonly dbTransientService: DbTransientService, + private readonly workspaceService: WorkspaceService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.gracePeriodMs = this.configService.get( + 'ORPHAN_GC_GRACE_PERIOD_MS', + 5 * 60 * 1000, // 5 minutes + ); + this.maxRetries = this.configService.get( + 'ORPHAN_GC_MAX_RETRIES', + 5, + ); + this.enabled = this.configService.get( + 'ORPHAN_GC_ENABLED', + true, + ); + // v1.1.0: Idle workspace hibernation configuration + const idleMinutes = parseInt( + this.configService.get('WORKSPACE_IDLE_HIBERNATE_MINUTES', '30'), + 10, + ); + this.idleThresholdMs = idleMinutes * 60 * 1000; + this.idleHibernationEnabled = this.configService.get( + 'WORKSPACE_IDLE_HIBERNATE_ENABLED', + true, + ); + } + + onModuleInit() { + this.logger.log( + `OrphanPodGCService initialized (enabled=${this.enabled}, ` + + `gracePeriod=${this.gracePeriodMs}ms, maxRetries=${this.maxRetries}, ` + + `idleHibernation=${this.idleHibernationEnabled}, idleThreshold=${this.idleThresholdMs}ms)`, + ); + } + + /** + * Run GC every 5 minutes + * v1.2.0: Wrapped in transient guard for DB resilience + */ + @Cron(CronExpression.EVERY_5_MINUTES) + async runScheduledGC(): Promise { + if (!this.enabled) { + return; + } + + // v1.2.0: Skip if in DB backoff + if (this.dbTransientService.isInBackoff()) { + this.logger.debug( + `Orphan GC skipped - DB backoff (${Math.round(this.dbTransientService.getBackoffRemainingMs() / 1000)}s remaining)`, + ); + return; + } + + if (this.isRunning) { + this.logger.debug('Orphan GC already running, skipping'); + return; + } + + // v1.2.0: Wrap in transient guard + await this.dbTransientService.withTransientGuard( + async () => { + await this.runGC(); + }, + 'OrphanPodGC.runScheduledGC', + { + onTransientError: (error, backoffMs) => { + this.logger.warn( + `Orphan GC cycle interrupted by DB error, will retry ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${error.message}`, + ); + }, + }, + ); + } + + /** + * Run orphan pod garbage collection + * Can be called manually or via cron + */ + async runGC(): Promise { + this.isRunning = true; + const result: OrphanPodGCResult = { + checked: 0, + cleaned: 0, + failed: 0, + errors: [], + }; + + try { + this.logger.debug('Starting orphan pod GC cycle'); + + // Phase 1: Clean up workspaces with HIBERNATION_FAILED status + const hibernationFailedResult = await this.cleanupHibernationFailed(); + result.checked += hibernationFailedResult.checked; + result.cleaned += hibernationFailedResult.cleaned; + result.failed += hibernationFailedResult.failed; + result.errors.push(...hibernationFailedResult.errors); + + // Phase 2: Clean up orphaned pods from completed workflows + const completedWorkflowResult = await this.cleanupCompletedWorkflows(); + result.checked += completedWorkflowResult.checked; + result.cleaned += completedWorkflowResult.cleaned; + result.failed += completedWorkflowResult.failed; + result.errors.push(...completedWorkflowResult.errors); + + // Phase 3 (v1.1.0): Clean up idle workspaces with no active goal run + if (this.idleHibernationEnabled) { + const idleWorkspaceResult = await this.cleanupIdleWorkspaces(); + result.checked += idleWorkspaceResult.checked; + result.cleaned += idleWorkspaceResult.cleaned; + result.failed += idleWorkspaceResult.failed; + result.errors.push(...idleWorkspaceResult.errors); + } + + if (result.checked > 0) { + this.logger.log( + `Orphan GC complete: checked=${result.checked}, ` + + `cleaned=${result.cleaned}, failed=${result.failed}`, + ); + } + + // Emit metrics event + this.eventEmitter.emit('orphan-gc.completed', { + timestamp: new Date(), + ...result, + }); + + return result; + } catch (error: any) { + this.logger.error(`Orphan GC failed: ${error.message}`); + result.errors.push(error.message); + return result; + } finally { + this.isRunning = false; + } + } + + /** + * Phase 1: Clean up workspaces with HIBERNATION_FAILED status + * + * These are workspaces where the workflow.service tried to hibernate + * but failed after retries. We'll try again here. + */ + private async cleanupHibernationFailed(): Promise { + const result: OrphanPodGCResult = { + checked: 0, + cleaned: 0, + failed: 0, + errors: [], + }; + + try { + // Find workspaces with HIBERNATION_FAILED status + // that haven't exceeded max retry count + const failedWorkspaces = await this.prisma.workspace.findMany({ + where: { + status: 'HIBERNATION_FAILED', + hibernationAttemptCount: { lt: this.maxRetries }, + }, + select: { + id: true, + hibernationAttemptCount: true, + lastHibernationAttemptAt: true, + hibernationError: true, + workflowRun: { + select: { id: true }, + }, + }, + take: 50, // Process in batches + }); + + result.checked = failedWorkspaces.length; + + for (const workspace of failedWorkspaces) { + try { + // Check if enough time has passed since last attempt (exponential backoff) + const lastAttempt = workspace.lastHibernationAttemptAt; + if (lastAttempt) { + const backoffMs = Math.min( + 30000 * Math.pow(2, workspace.hibernationAttemptCount), + 300000, // Max 5 minutes + ); + const nextRetryAt = new Date(lastAttempt.getTime() + backoffMs); + if (new Date() < nextRetryAt) { + this.logger.debug( + `Workspace ${workspace.id} not ready for retry (next: ${nextRetryAt.toISOString()})`, + ); + continue; + } + } + + // Update attempt tracking + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + hibernationAttemptCount: workspace.hibernationAttemptCount + 1, + lastHibernationAttemptAt: new Date(), + }, + }); + + // Attempt hibernation via task-controller + await this.workspaceService.hibernateWorkspace(workspace.id); + + // Success - update status + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'HIBERNATED', + hibernationError: null, + }, + }); + + this.logger.log(`GC: Workspace ${workspace.id} hibernated successfully`); + result.cleaned++; + } catch (error: any) { + const errorMessage = error.message || 'Unknown error'; + + // Check if pod not found (already deleted) + if (errorMessage.includes('404') || errorMessage.includes('not found')) { + // Pod already gone - mark as hibernated + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'HIBERNATED', + hibernationError: null, + }, + }); + this.logger.log(`GC: Workspace ${workspace.id} pod already deleted, marking hibernated`); + result.cleaned++; + } else { + // Still failing - check if exceeded max retries + const newAttemptCount = workspace.hibernationAttemptCount + 1; + if (newAttemptCount >= this.maxRetries) { + // Mark as permanently failed + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'GC_FAILED', + hibernationError: `Max retries (${this.maxRetries}) exceeded: ${errorMessage}`, + }, + }); + this.logger.error( + `GC: Workspace ${workspace.id} max retries exceeded, marked as GC_FAILED`, + ); + } + + result.failed++; + result.errors.push(`${workspace.id}: ${errorMessage}`); + } + } + } + } catch (error: any) { + this.logger.error(`Phase 1 (HIBERNATION_FAILED) failed: ${error.message}`); + result.errors.push(error.message); + } + + return result; + } + + /** + * Phase 2: Clean up pods from completed workflows that weren't properly hibernated + * + * This catches cases where: + * - The workflow was marked complete but hibernation never ran + * - The workflow service crashed before hibernation + * - Database was updated but side effect didn't run + */ + private async cleanupCompletedWorkflows(): Promise { + const result: OrphanPodGCResult = { + checked: 0, + cleaned: 0, + failed: 0, + errors: [], + }; + + try { + const gracePeriodAgo = new Date(Date.now() - this.gracePeriodMs); + + // Find completed workflows with workspaces that aren't hibernated + const potentialOrphans = await this.prisma.workflowRun.findMany({ + where: { + status: { in: ['COMPLETED', 'FAILED', 'CANCELLED'] }, + completedAt: { lt: gracePeriodAgo }, + workspace: { + status: { + notIn: ['HIBERNATED', 'TERMINATED', 'GC_FAILED', 'HIBERNATION_FAILED'], + }, + }, + }, + select: { + id: true, + status: true, + completedAt: true, + workspace: { + select: { + id: true, + status: true, + }, + }, + }, + take: 50, // Process in batches + }); + + result.checked = potentialOrphans.length; + + for (const workflow of potentialOrphans) { + if (!workflow.workspace) continue; + + try { + // Check if pod actually exists before trying to delete + const desktopStatus = await this.workspaceService.getWorkspaceDesktopStatus( + workflow.workspace.id, + ); + + if (desktopStatus.status === 'NOT_FOUND') { + // Pod already gone - just update DB + await this.prisma.workspace.update({ + where: { id: workflow.workspace.id }, + data: { status: 'HIBERNATED' }, + }); + this.logger.log( + `GC: Workspace ${workflow.workspace.id} pod not found, marking hibernated`, + ); + result.cleaned++; + continue; + } + + // Pod exists - try to hibernate + await this.workspaceService.hibernateWorkspace(workflow.workspace.id); + + await this.prisma.workspace.update({ + where: { id: workflow.workspace.id }, + data: { + status: 'HIBERNATED', + hibernationError: null, + }, + }); + + this.logger.log( + `GC: Orphan workspace ${workflow.workspace.id} from workflow ${workflow.id} hibernated`, + ); + result.cleaned++; + } catch (error: any) { + const errorMessage = error.message || 'Unknown error'; + + // Handle 404 (pod already deleted) + if (errorMessage.includes('404') || errorMessage.includes('not found')) { + await this.prisma.workspace.update({ + where: { id: workflow.workspace.id }, + data: { status: 'HIBERNATED' }, + }); + this.logger.log( + `GC: Workspace ${workflow.workspace.id} already deleted, marking hibernated`, + ); + result.cleaned++; + } else { + // Mark for retry next cycle + await this.prisma.workspace.update({ + where: { id: workflow.workspace.id }, + data: { + status: 'HIBERNATION_FAILED', + hibernationError: errorMessage, + hibernationAttemptCount: 1, + lastHibernationAttemptAt: new Date(), + }, + }); + + this.logger.warn( + `GC: Failed to hibernate workspace ${workflow.workspace.id}: ${errorMessage}`, + ); + result.failed++; + result.errors.push(`${workflow.workspace.id}: ${errorMessage}`); + } + } + } + } catch (error: any) { + this.logger.error(`Phase 2 (completed workflows) failed: ${error.message}`); + result.errors.push(error.message); + } + + return result; + } + + /** + * Phase 3 (v1.1.0): Clean up idle workspaces with no active goal run + * + * This catches cases where: + * - A workspace is ready but the associated goal run finished/cancelled + * - A user started a task but never finished it + * - The orchestrator loop stopped processing but the pod remained + * + * Idle detection logic: + * - Workspace status is READY (pod is running) + * - Associated workflow is RUNNING (not yet marked complete) + * - Workspace updatedAt is older than idle threshold + * - No active goal run (RUNNING status) for this workspace + */ + private async cleanupIdleWorkspaces(): Promise { + const result: OrphanPodGCResult = { + checked: 0, + cleaned: 0, + failed: 0, + errors: [], + }; + + try { + const idleThresholdAgo = new Date(Date.now() - this.idleThresholdMs); + + // Find workspaces that: + // 1. Are in READY status (pod is running) + // 2. Have a workflow in RUNNING status (not yet marked complete) + // 3. Haven't been updated in the idle threshold period + // 4. Have no active goal run (status != RUNNING or no goal run at all) + const idleWorkspaces = await this.prisma.workspace.findMany({ + where: { + status: 'READY', + updatedAt: { lt: idleThresholdAgo }, + workflowRun: { + status: 'RUNNING', + // No active goal run for this workflow + // Either no goal run exists OR goal run is not RUNNING + OR: [ + { goalRun: null }, // No goal run linked + { goalRun: { status: { not: 'RUNNING' } } }, // Goal run finished + ], + }, + }, + select: { + id: true, + updatedAt: true, + workflowRun: { + select: { + id: true, + goalRun: { + select: { + id: true, + status: true, + }, + }, + }, + }, + }, + take: 50, // Process in batches + }); + + result.checked = idleWorkspaces.length; + + for (const workspace of idleWorkspaces) { + const idleMinutes = Math.round( + (Date.now() - workspace.updatedAt.getTime()) / 60000, + ); + const goalRunId = workspace.workflowRun?.goalRun?.id; + const goalRunStatus = workspace.workflowRun?.goalRun?.status; + + try { + this.logger.log( + `GC: Hibernating idle workspace ${workspace.id} ` + + `(idle ${idleMinutes}min, goalRun=${goalRunId || 'none'}, status=${goalRunStatus || 'none'})`, + ); + + // Check if pod actually exists + const desktopStatus = await this.workspaceService.getWorkspaceDesktopStatus( + workspace.id, + ); + + if (desktopStatus.status === 'NOT_FOUND') { + // Pod already gone - just update DB + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { status: 'HIBERNATED' }, + }); + this.logger.log( + `GC: Idle workspace ${workspace.id} pod not found, marking hibernated`, + ); + result.cleaned++; + continue; + } + + // Pod exists - hibernate it + await this.workspaceService.hibernateWorkspace(workspace.id); + + // Update workspace and workflow status + await this.prisma.$transaction([ + this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'HIBERNATED', + hibernationError: null, + }, + }), + // Also mark workflow as cancelled since no goal run is active + this.prisma.workflowRun.update({ + where: { id: workspace.workflowRun?.id }, + data: { + status: 'CANCELLED', + completedAt: new Date(), + error: `Workspace hibernated after ${idleMinutes} minutes of inactivity`, + }, + }), + ]); + + this.logger.log( + `GC: Idle workspace ${workspace.id} hibernated after ${idleMinutes} minutes`, + ); + result.cleaned++; + } catch (error: any) { + const errorMessage = error.message || 'Unknown error'; + + // Handle 404 (pod already deleted) + if (errorMessage.includes('404') || errorMessage.includes('not found')) { + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { status: 'HIBERNATED' }, + }); + this.logger.log( + `GC: Idle workspace ${workspace.id} already deleted, marking hibernated`, + ); + result.cleaned++; + } else { + // Mark for retry next cycle + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'HIBERNATION_FAILED', + hibernationError: `Idle hibernation failed: ${errorMessage}`, + hibernationAttemptCount: 1, + lastHibernationAttemptAt: new Date(), + }, + }); + + this.logger.warn( + `GC: Failed to hibernate idle workspace ${workspace.id}: ${errorMessage}`, + ); + result.failed++; + result.errors.push(`${workspace.id}: ${errorMessage}`); + } + } + } + } catch (error: any) { + this.logger.error(`Phase 3 (idle workspaces) failed: ${error.message}`); + result.errors.push(error.message); + } + + return result; + } + + /** + * Get current GC status and stats + */ + async getStatus(): Promise<{ + enabled: boolean; + isRunning: boolean; + gracePeriodMs: number; + maxRetries: number; + idleHibernationEnabled: boolean; + idleThresholdMs: number; + pendingCleanup: { + hibernationFailed: number; + potentialOrphans: number; + idleWorkspaces: number; + gcFailed: number; + }; + }> { + const gracePeriodAgo = new Date(Date.now() - this.gracePeriodMs); + const idleThresholdAgo = new Date(Date.now() - this.idleThresholdMs); + + const [hibernationFailed, potentialOrphans, idleWorkspaces, gcFailed] = await Promise.all([ + this.prisma.workspace.count({ + where: { status: 'HIBERNATION_FAILED' }, + }), + this.prisma.workflowRun.count({ + where: { + status: { in: ['COMPLETED', 'FAILED', 'CANCELLED'] }, + completedAt: { lt: gracePeriodAgo }, + workspace: { + status: { + notIn: ['HIBERNATED', 'TERMINATED', 'GC_FAILED', 'HIBERNATION_FAILED'], + }, + }, + }, + }), + // v1.1.0: Count idle workspaces + this.prisma.workspace.count({ + where: { + status: 'READY', + updatedAt: { lt: idleThresholdAgo }, + workflowRun: { + status: 'RUNNING', + OR: [ + { goalRun: null }, + { goalRun: { status: { not: 'RUNNING' } } }, + ], + }, + }, + }), + this.prisma.workspace.count({ + where: { status: 'GC_FAILED' }, + }), + ]); + + return { + enabled: this.enabled, + isRunning: this.isRunning, + gracePeriodMs: this.gracePeriodMs, + maxRetries: this.maxRetries, + idleHibernationEnabled: this.idleHibernationEnabled, + idleThresholdMs: this.idleThresholdMs, + pendingCleanup: { + hibernationFailed, + potentialOrphans, + idleWorkspaces, + gcFailed, + }, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/outbox-publisher.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/outbox-publisher.service.spec.ts new file mode 100644 index 000000000..7bc86b35f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/outbox-publisher.service.spec.ts @@ -0,0 +1,585 @@ +import { OutboxPublisherService } from './outbox-publisher.service'; +import { SlackEventType } from './slack-notification.service'; +import { GoalRunExecutionEngine } from '@prisma/client'; + +describe(OutboxPublisherService.name, () => { + it('publishes pending user-prompt outbox rows and marks processed', async () => { + const tx = { $queryRaw: jest.fn() } as any; + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + outbox: { + updateMany: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const outboxPendingTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const outboxOldestPendingAgeSeconds = { set: jest.fn() } as any; + const outboxPublishAttemptsTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + const userPromptsOpenTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const resumeUpdateSuccessTotal = { inc: jest.fn() } as any; + const resumeUpdateFailedTotal = { inc: jest.fn() } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const leaderElection = { isLeader: true } as any; + + const slack = { + sendUserPromptNotification: jest.fn().mockResolvedValue([]), + } as any; + + const teams = { + sendUserPromptNotification: jest.fn().mockResolvedValue([]), + } as any; + + const service = new OutboxPublisherService( + configService, + prisma, + leaderElection, + slack, + teams, + outboxPendingTotal, + outboxOldestPendingAgeSeconds, + outboxPublishAttemptsTotal, + userPromptsOpenTotal, + resumeUpdateSuccessTotal, + resumeUpdateFailedTotal, + undefined, + ); + + const row = { + id: 'o-1', + dedupeKey: 'prompt:gr-1:ci-1:TEXT_CLARIFICATION', + aggregateId: 'gr-1', + eventType: SlackEventType.USER_PROMPT_CREATED, + payload: { + promptId: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + kind: 'TEXT_CLARIFICATION', + stepDescription: 'Confirm details', + }, + processedAt: null, + nextAttemptAt: new Date(0), + retryCount: 0, + error: null, + createdAt: new Date(), + }; + + tx.$queryRaw.mockResolvedValueOnce([row]); + prisma.outbox.updateMany.mockResolvedValue({ count: 1 }); + + await service.processBatch(); + + expect(slack.sendUserPromptNotification).toHaveBeenCalledWith( + SlackEventType.USER_PROMPT_CREATED, + expect.objectContaining({ + tenantId: 't-1', + promptId: 'p-1', + }), + { eventId: row.dedupeKey }, + ); + expect(teams.sendUserPromptNotification).toHaveBeenCalledTimes(1); + + expect(prisma.outbox.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: 'o-1', processedAt: null }, + data: expect.objectContaining({ processedAt: expect.any(Date) }), + }), + ); + }); + + it('resolves tenantId from GoalRun when missing from payload', async () => { + const tx = { $queryRaw: jest.fn() } as any; + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + outbox: { + updateMany: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const outboxPendingTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const outboxOldestPendingAgeSeconds = { set: jest.fn() } as any; + const outboxPublishAttemptsTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + const userPromptsOpenTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const resumeUpdateSuccessTotal = { inc: jest.fn() } as any; + const resumeUpdateFailedTotal = { inc: jest.fn() } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const leaderElection = { isLeader: true } as any; + + const slack = { + sendUserPromptNotification: jest.fn().mockResolvedValue([]), + } as any; + + const teams = { + sendUserPromptNotification: jest.fn().mockResolvedValue([]), + } as any; + + const service = new OutboxPublisherService( + configService, + prisma, + leaderElection, + slack, + teams, + outboxPendingTotal, + outboxOldestPendingAgeSeconds, + outboxPublishAttemptsTotal, + userPromptsOpenTotal, + resumeUpdateSuccessTotal, + resumeUpdateFailedTotal, + undefined, + ); + + const row = { + id: 'o-1', + dedupeKey: 'prompt:gr-1:ci-1:TEXT_CLARIFICATION', + aggregateId: 'gr-1', + eventType: SlackEventType.USER_PROMPT_CREATED, + payload: { + promptId: 'p-1', + tenantId: null, + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + kind: 'TEXT_CLARIFICATION', + stepDescription: 'Confirm details', + }, + processedAt: null, + nextAttemptAt: new Date(0), + retryCount: 0, + error: null, + createdAt: new Date(), + }; + + tx.$queryRaw.mockResolvedValueOnce([row]); + prisma.goalRun.findUnique.mockResolvedValueOnce({ tenantId: 't-1' }); + prisma.outbox.updateMany.mockResolvedValue({ count: 1 }); + + await service.processBatch(); + + expect(prisma.goalRun.findUnique).toHaveBeenCalledWith({ + where: { id: 'gr-1' }, + select: { tenantId: true }, + }); + expect(slack.sendUserPromptNotification).toHaveBeenCalledWith( + SlackEventType.USER_PROMPT_CREATED, + expect.objectContaining({ + tenantId: 't-1', + }), + { eventId: row.dedupeKey }, + ); + }); + + it('updates retry_count and gives up when max retries exceeded', async () => { + const tx = { $queryRaw: jest.fn() } as any; + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + outbox: { + updateMany: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const outboxPendingTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const outboxOldestPendingAgeSeconds = { set: jest.fn() } as any; + const outboxPublishAttemptsTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + const userPromptsOpenTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const resumeUpdateSuccessTotal = { inc: jest.fn() } as any; + const resumeUpdateFailedTotal = { inc: jest.fn() } as any; + + const configService = { + get: jest.fn((key: string, fallback: string) => { + if (key === 'OUTBOX_PUBLISHER_MAX_RETRIES') return '1'; + if (key === 'OUTBOX_PUBLISHER_RETRY_BASE_DELAY_MS') return '1'; + if (key === 'OUTBOX_PUBLISHER_RETRY_MAX_DELAY_MS') return '1'; + return fallback; + }), + } as any; + + const leaderElection = { isLeader: true } as any; + + const slack = { + sendUserPromptNotification: jest.fn().mockRejectedValue(new Error('slack down')), + } as any; + + const teams = { + sendUserPromptNotification: jest.fn(), + } as any; + + const service = new OutboxPublisherService( + configService, + prisma, + leaderElection, + slack, + teams, + outboxPendingTotal, + outboxOldestPendingAgeSeconds, + outboxPublishAttemptsTotal, + userPromptsOpenTotal, + resumeUpdateSuccessTotal, + resumeUpdateFailedTotal, + undefined, + ); + + const row = { + id: 'o-1', + dedupeKey: 'prompt:gr-1:ci-1:TEXT_CLARIFICATION', + aggregateId: 'gr-1', + eventType: SlackEventType.USER_PROMPT_CREATED, + payload: { + promptId: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + kind: 'TEXT_CLARIFICATION', + stepDescription: 'Confirm details', + }, + processedAt: null, + nextAttemptAt: new Date(0), + retryCount: 0, + error: null, + createdAt: new Date(), + }; + + tx.$queryRaw.mockResolvedValueOnce([row]); + prisma.outbox.updateMany.mockResolvedValue({ count: 1 }); + + await service.processBatch(); + + expect(prisma.outbox.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: 'o-1', processedAt: null }, + data: expect.objectContaining({ + retryCount: 1, + processedAt: expect.any(Date), + }), + }), + ); + }); + + it('acks resume as skipped when Temporal is disabled (prevents reconciler churn)', async () => { + const tx = { $queryRaw: jest.fn() } as any; + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + outbox: { + updateMany: jest.fn(), + }, + userPromptResolution: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const outboxPendingTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const outboxOldestPendingAgeSeconds = { set: jest.fn() } as any; + const outboxPublishAttemptsTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + const userPromptsOpenTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const resumeUpdateSuccessTotal = { inc: jest.fn() } as any; + const resumeUpdateFailedTotal = { inc: jest.fn() } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const leaderElection = { isLeader: true } as any; + const slack = { sendUserPromptNotification: jest.fn() } as any; + const teams = { sendUserPromptNotification: jest.fn() } as any; + + const service = new OutboxPublisherService( + configService, + prisma, + leaderElection, + slack, + teams, + outboxPendingTotal, + outboxOldestPendingAgeSeconds, + outboxPublishAttemptsTotal, + userPromptsOpenTotal, + resumeUpdateSuccessTotal, + resumeUpdateFailedTotal, + undefined, + ); + + const row = { + id: 'o-1', + dedupeKey: 'user_prompt.resume:p-1', + aggregateId: 'gr-1', + eventType: 'user_prompt.resume', + payload: { + promptId: 'p-1', + goalRunId: 'gr-1', + updateId: 'user_prompt.resume:p-1', + }, + processedAt: null, + nextAttemptAt: new Date(0), + retryCount: 0, + error: null, + createdAt: new Date(), + }; + + tx.$queryRaw.mockResolvedValueOnce([row]); + prisma.goalRun.findUnique.mockResolvedValueOnce({ executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP }); + prisma.outbox.updateMany.mockResolvedValue({ count: 1 }); + + await service.processBatch(); + + expect(prisma.userPromptResolution.updateMany).toHaveBeenCalledWith({ + where: { promptId: 'p-1', resumeAcknowledgedAt: null }, + data: expect.objectContaining({ + resumeAcknowledgedAt: expect.any(Date), + resumeAck: expect.objectContaining({ + skipped: true, + skipReason: 'LEGACY_ENGINE', + executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP, + updateId: 'user_prompt.resume:p-1', + }), + }), + }); + + expect(prisma.userPromptResolution.findUnique).not.toHaveBeenCalled(); + expect(resumeUpdateSuccessTotal.inc).not.toHaveBeenCalled(); + expect(resumeUpdateFailedTotal.inc).not.toHaveBeenCalled(); + }); + + it('acks resume delivery after Temporal Update succeeds', async () => { + const tx = { $queryRaw: jest.fn() } as any; + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + outbox: { + updateMany: jest.fn(), + }, + userPromptResolution: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const outboxPendingTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const outboxOldestPendingAgeSeconds = { set: jest.fn() } as any; + const outboxPublishAttemptsTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + const userPromptsOpenTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const resumeUpdateSuccessTotal = { inc: jest.fn() } as any; + const resumeUpdateFailedTotal = { inc: jest.fn() } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const leaderElection = { isLeader: true } as any; + + const slack = { + sendUserPromptNotification: jest.fn(), + } as any; + + const teams = { + sendUserPromptNotification: jest.fn(), + } as any; + + const temporalWorkflowService = { + isEnabled: jest.fn(() => true), + resumeFromUserPrompt: jest.fn().mockResolvedValue({ didResume: true }), + } as any; + + const service = new OutboxPublisherService( + configService, + prisma, + leaderElection, + slack, + teams, + outboxPendingTotal, + outboxOldestPendingAgeSeconds, + outboxPublishAttemptsTotal, + userPromptsOpenTotal, + resumeUpdateSuccessTotal, + resumeUpdateFailedTotal, + temporalWorkflowService, + ); + + const row = { + id: 'o-1', + dedupeKey: 'user_prompt.resume:p-1', + aggregateId: 'gr-1', + eventType: 'user_prompt.resume', + payload: { + promptId: 'p-1', + goalRunId: 'gr-1', + updateId: 'user_prompt.resume:p-1', + }, + processedAt: null, + nextAttemptAt: new Date(0), + retryCount: 0, + error: null, + createdAt: new Date(), + }; + + tx.$queryRaw.mockResolvedValueOnce([row]); + prisma.goalRun.findUnique.mockResolvedValueOnce({ executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW }); + prisma.userPromptResolution.findUnique.mockResolvedValueOnce({ answers: { foo: 'bar' } }); + prisma.outbox.updateMany.mockResolvedValue({ count: 1 }); + + await service.processBatch(); + + expect(temporalWorkflowService.resumeFromUserPrompt).toHaveBeenCalledWith( + 'gr-1', + { promptId: 'p-1', answers: { foo: 'bar' } }, + { updateId: 'user_prompt.resume:p-1' }, + ); + + expect(prisma.userPromptResolution.updateMany).toHaveBeenCalledWith({ + where: { promptId: 'p-1', resumeAcknowledgedAt: null }, + data: expect.objectContaining({ + resumeAcknowledgedAt: expect.any(Date), + resumeAck: expect.objectContaining({ + didResume: true, + updateId: 'user_prompt.resume:p-1', + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + outboxRowId: 'o-1', + outboxDedupeKey: 'user_prompt.resume:p-1', + }), + }), + }); + expect(resumeUpdateSuccessTotal.inc).toHaveBeenCalledTimes(1); + expect(resumeUpdateFailedTotal.inc).not.toHaveBeenCalled(); + }); + + it('Scenario E: Temporal outage leaves resume row pending, then succeeds on retry', async () => { + const tx = { $queryRaw: jest.fn() } as any; + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + outbox: { + updateMany: jest.fn(), + }, + userPromptResolution: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const outboxPendingTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const outboxOldestPendingAgeSeconds = { set: jest.fn() } as any; + const outboxPublishAttemptsTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + const userPromptsOpenTotal = { reset: jest.fn(), labels: jest.fn(() => ({ set: jest.fn() })) } as any; + const resumeUpdateSuccessTotal = { inc: jest.fn() } as any; + const resumeUpdateFailedTotal = { inc: jest.fn() } as any; + + const configService = { + get: jest.fn((key: string, fallback: string) => { + if (key === 'OUTBOX_PUBLISHER_MAX_RETRIES') return '20'; + if (key === 'OUTBOX_PUBLISHER_RETRY_BASE_DELAY_MS') return '1'; + if (key === 'OUTBOX_PUBLISHER_RETRY_MAX_DELAY_MS') return '1'; + return fallback; + }), + } as any; + + const leaderElection = { isLeader: true } as any; + const slack = { sendUserPromptNotification: jest.fn() } as any; + const teams = { sendUserPromptNotification: jest.fn() } as any; + + const temporalWorkflowService = { + isEnabled: jest.fn(() => true), + resumeFromUserPrompt: jest + .fn() + .mockRejectedValueOnce(new Error('Temporal unavailable')) + .mockResolvedValueOnce({ didResume: true }), + } as any; + + const service = new OutboxPublisherService( + configService, + prisma, + leaderElection, + slack, + teams, + outboxPendingTotal, + outboxOldestPendingAgeSeconds, + outboxPublishAttemptsTotal, + userPromptsOpenTotal, + resumeUpdateSuccessTotal, + resumeUpdateFailedTotal, + temporalWorkflowService, + ); + + const row = { + id: 'o-1', + dedupeKey: 'user_prompt.resume:p-1', + aggregateId: 'gr-1', + eventType: 'user_prompt.resume', + payload: { + promptId: 'p-1', + goalRunId: 'gr-1', + updateId: 'user_prompt.resume:p-1', + }, + processedAt: null, + nextAttemptAt: new Date(0), + retryCount: 0, + error: null, + createdAt: new Date(), + }; + + prisma.userPromptResolution.findUnique.mockResolvedValue({ answers: { foo: 'bar' } }); + prisma.goalRun.findUnique.mockResolvedValue({ executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW }); + + // First attempt: Temporal failure + tx.$queryRaw.mockResolvedValueOnce([row]); + await service.processBatch(); + + expect(prisma.outbox.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: 'o-1', processedAt: null }, + data: expect.objectContaining({ + retryCount: 1, + error: expect.any(String), + nextAttemptAt: expect.any(Date), + }), + }), + ); + expect((prisma.outbox.updateMany as jest.Mock).mock.calls[0][0].data).not.toHaveProperty('processedAt'); + expect(prisma.userPromptResolution.updateMany).not.toHaveBeenCalled(); + expect(resumeUpdateFailedTotal.inc).toHaveBeenCalledTimes(1); + + // Second attempt: Temporal success + ack + mark processed + tx.$queryRaw.mockResolvedValueOnce([row]); + prisma.outbox.updateMany.mockResolvedValue({ count: 1 }); + + await service.processBatch(); + + expect(temporalWorkflowService.resumeFromUserPrompt).toHaveBeenCalledTimes(2); + expect(prisma.userPromptResolution.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: { promptId: 'p-1', resumeAcknowledgedAt: null }, + data: expect.objectContaining({ resumeAcknowledgedAt: expect.any(Date) }), + }), + ); + expect(resumeUpdateSuccessTotal.inc).toHaveBeenCalledTimes(1); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/outbox-publisher.service.ts b/packages/bytebot-workflow-orchestrator/src/services/outbox-publisher.service.ts new file mode 100644 index 000000000..5dd8c6a2c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/outbox-publisher.service.ts @@ -0,0 +1,405 @@ +/** + * Outbox Publisher Service (PR 3) + * + * Implements a DB-backed relay for the transactional outbox: + * - Reads pending rows from workflow_orchestrator.outbox + * - Publishes notifications (Slack/Teams) using stable dedupe keys + * - Marks rows processed only after successful publish + * - Retries with exponential backoff via nextAttemptAt + * + * Runs only on the elected leader to avoid duplicate publishing. + */ + +import { Injectable, Logger, OnModuleInit, Optional } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { GoalRunExecutionEngine, Outbox, UserPromptStatus } from '@prisma/client'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import type { Counter, Gauge } from 'prom-client'; +import { PrismaService } from './prisma.service'; +import { SlackEventType, SlackNotificationService, UserPromptEventData as SlackUserPromptEventData } from './slack-notification.service'; +import { TeamsEventType, TeamsNotificationService, UserPromptEventData as TeamsUserPromptEventData } from './teams-notification.service'; +import { LeaderElectionService } from './leader-election.service'; +import { TemporalWorkflowService } from '../temporal/temporal-workflow.service'; + +type UserPromptOutboxPayload = { + promptId: string; + tenantId?: string | null; + goalRunId: string; + checklistItemId: string | null; + goalSpecId?: string | null; + kind?: string; + stepDescription?: string | null; + links?: { goalRun?: string; prompt?: string; desktopTakeover?: string | null }; +}; + +type UserPromptResumeOutboxPayload = { + promptId: string; + goalRunId: string; + updateId?: string; +}; + +@Injectable() +export class OutboxPublisherService implements OnModuleInit { + private readonly logger = new Logger(OutboxPublisherService.name); + + private readonly enabled: boolean; + private readonly batchSize: number; + private readonly claimLeaseSeconds: number; + private readonly maxRetries: number; + private readonly retryBaseDelayMs: number; + private readonly retryMaxDelayMs: number; + + private isProcessing = false; + + constructor( + private readonly configService: ConfigService, + private readonly prisma: PrismaService, + private readonly leaderElection: LeaderElectionService, + private readonly slack: SlackNotificationService, + private readonly teams: TeamsNotificationService, + @InjectMetric('outbox_pending_total') + private readonly outboxPendingTotal: Gauge, + @InjectMetric('outbox_oldest_pending_age_seconds') + private readonly outboxOldestPendingAgeSeconds: Gauge, + @InjectMetric('outbox_publish_attempts_total') + private readonly outboxPublishAttemptsTotal: Counter, + @InjectMetric('user_prompts_open_total') + private readonly userPromptsOpenTotal: Gauge, + @InjectMetric('resume_update_success_total') + private readonly resumeUpdateSuccessTotal: Counter, + @InjectMetric('resume_update_failed_total') + private readonly resumeUpdateFailedTotal: Counter, + @Optional() private readonly temporalWorkflowService?: TemporalWorkflowService, + ) { + this.enabled = this.configService.get('OUTBOX_PUBLISHER_ENABLED', 'true') === 'true'; + this.batchSize = parseInt(this.configService.get('OUTBOX_PUBLISHER_BATCH_SIZE', '25'), 10); + this.claimLeaseSeconds = parseInt( + this.configService.get('OUTBOX_PUBLISHER_CLAIM_LEASE_SECONDS', '60'), + 10, + ); + this.maxRetries = parseInt(this.configService.get('OUTBOX_PUBLISHER_MAX_RETRIES', '20'), 10); + this.retryBaseDelayMs = parseInt(this.configService.get('OUTBOX_PUBLISHER_RETRY_BASE_DELAY_MS', '30000'), 10); + this.retryMaxDelayMs = parseInt(this.configService.get('OUTBOX_PUBLISHER_RETRY_MAX_DELAY_MS', '600000'), 10); + } + + async onModuleInit(): Promise { + if (!this.enabled) { + this.logger.warn('Outbox publisher is disabled'); + return; + } + this.logger.log( + `Outbox publisher enabled (batchSize=${this.batchSize}, maxRetries=${this.maxRetries}, baseDelayMs=${this.retryBaseDelayMs})`, + ); + this.logger.log('Outbox publisher will start processing when this instance becomes leader'); + } + + @Cron(CronExpression.EVERY_5_SECONDS) + async tick(): Promise { + try { + await this.updateGauges(new Date()); + } catch (error: any) { + this.logger.debug(`Outbox gauge refresh failed: ${error.message}`); + } + + if (!this.enabled) return; + if (!this.leaderElection.isLeader) return; + if (this.isProcessing) return; + + this.isProcessing = true; + try { + await this.processBatch(); + } catch (error: any) { + this.logger.error(`Outbox publisher tick error: ${error.message}`); + } finally { + this.isProcessing = false; + } + } + + async processBatch(): Promise { + const now = new Date(); + + const leaseUntil = new Date(now.getTime() + this.claimLeaseSeconds * 1000); + + const rows = await this.prisma.$transaction(async (tx) => { + // Claim rows atomically using row-level locks (multiple replicas safe). + // We "lease" claimed rows by pushing next_attempt_at forward; if the worker crashes, + // rows become eligible again after the lease expires. + return tx.$queryRaw` + WITH claimed AS ( + SELECT id + FROM "workflow_orchestrator"."outbox" + WHERE "processed_at" IS NULL + AND "next_attempt_at" <= ${now} + ORDER BY "created_at" ASC + FOR UPDATE SKIP LOCKED + LIMIT ${this.batchSize} + ) + UPDATE "workflow_orchestrator"."outbox" o + SET "next_attempt_at" = ${leaseUntil} + FROM claimed + WHERE o.id = claimed.id + RETURNING + o.id, + o.dedupe_key AS "dedupeKey", + o.aggregate_id AS "aggregateId", + o.event_type AS "eventType", + o.payload, + o.processed_at AS "processedAt", + o.retry_count AS "retryCount", + o.error, + o.created_at AS "createdAt", + o.next_attempt_at AS "nextAttemptAt"; + `; + }); + + for (const row of rows) { + await this.processRow(row); + } + + return rows.length; + } + + private async updateGauges(now: Date): Promise { + const pendingByType = await this.prisma.outbox.groupBy({ + by: ['eventType'], + where: { processedAt: null }, + _count: { _all: true }, + }); + + this.outboxPendingTotal.reset(); + for (const row of pendingByType) { + this.outboxPendingTotal.labels(row.eventType).set(row._count._all); + } + + const oldestPending = await this.prisma.outbox.findFirst({ + where: { processedAt: null }, + orderBy: { createdAt: 'asc' }, + select: { createdAt: true }, + }); + + const oldestAgeSeconds = oldestPending ? (now.getTime() - oldestPending.createdAt.getTime()) / 1000 : 0; + this.outboxOldestPendingAgeSeconds.set(oldestAgeSeconds); + + const openPromptsByKind = await this.prisma.userPrompt.groupBy({ + by: ['kind'], + where: { status: UserPromptStatus.OPEN }, + _count: { _all: true }, + }); + + this.userPromptsOpenTotal.reset(); + for (const row of openPromptsByKind) { + this.userPromptsOpenTotal.labels(row.kind).set(row._count._all); + } + } + + private async processRow(row: Outbox): Promise { + try { + await this.publishRow(row); + + this.outboxPublishAttemptsTotal.labels(row.eventType, 'success').inc(); + + await this.prisma.outbox.updateMany({ + where: { id: row.id, processedAt: null }, + data: { processedAt: new Date(), error: null }, + }); + } catch (error: any) { + this.outboxPublishAttemptsTotal.labels(row.eventType, 'failure').inc(); + + const retryCount = (row.retryCount ?? 0) + 1; + const nextAttemptAt = this.computeNextAttemptAt(new Date(), retryCount); + const message = this.truncateError(error?.message || String(error)); + + const shouldGiveUp = retryCount >= this.maxRetries; + + await this.prisma.outbox.updateMany({ + where: { id: row.id, processedAt: null }, + data: { + retryCount, + error: message, + nextAttemptAt, + ...(shouldGiveUp ? { processedAt: new Date() } : {}), + }, + }); + + if (shouldGiveUp) { + this.logger.error(`Outbox row ${row.id} gave up after ${retryCount} attempts: ${message}`); + } else { + this.logger.warn(`Outbox row ${row.id} publish failed (attempt ${retryCount}); retry at ${nextAttemptAt.toISOString()}: ${message}`); + } + } + } + + private async publishRow(row: Outbox): Promise { + switch (row.eventType) { + case SlackEventType.USER_PROMPT_CREATED: + case SlackEventType.USER_PROMPT_RESOLVED: + case SlackEventType.USER_PROMPT_CANCELLED: + await this.publishUserPrompt(row, row.eventType); + return; + case 'user_prompt.resume': + await this.resumeUserPrompt(row); + return; + default: + throw new Error(`Unknown outbox eventType=${row.eventType}`); + } + } + + private async publishUserPrompt( + row: Outbox, + eventType: + | SlackEventType.USER_PROMPT_CREATED + | SlackEventType.USER_PROMPT_RESOLVED + | SlackEventType.USER_PROMPT_CANCELLED, + ): Promise { + const payload = row.payload as unknown as UserPromptOutboxPayload; + + const tenantId = await this.resolveTenantId(payload.tenantId ?? null, payload.goalRunId ?? row.aggregateId ?? null); + if (!tenantId) { + throw new Error(`Missing tenantId for outbox row ${row.id} (${row.eventType})`); + } + + const data: SlackUserPromptEventData & TeamsUserPromptEventData = { + promptId: payload.promptId, + tenantId, + goalRunId: payload.goalRunId, + checklistItemId: payload.checklistItemId, + kind: payload.kind || 'TEXT_CLARIFICATION', + stepDescription: payload.stepDescription ?? null, + links: payload.links, + }; + + const [slackResults, teamsResults] = await Promise.all([ + this.slack.sendUserPromptNotification(eventType, data, { eventId: row.dedupeKey }), + this.teams.sendUserPromptNotification(eventType as unknown as TeamsEventType, data, { eventId: row.dedupeKey }), + ]); + + const failures = [...slackResults, ...teamsResults].filter((r) => !r.success); + if (failures.length > 0) { + throw new Error(`Notification delivery failures: ${failures.length}`); + } + } + + private async resumeUserPrompt(row: Outbox): Promise { + const payload = row.payload as unknown as UserPromptResumeOutboxPayload; + if (!payload?.promptId || !payload?.goalRunId) { + throw new Error(`Invalid resume payload for outbox row ${row.id}`); + } + + const updateId = payload.updateId || row.dedupeKey; + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: payload.goalRunId }, + select: { executionEngine: true }, + }); + + if (!goalRun) { + throw new Error(`Missing goal run for resume ${payload.promptId} (goalRunId=${payload.goalRunId})`); + } + + if (goalRun.executionEngine === GoalRunExecutionEngine.LEGACY_DB_LOOP) { + // Legacy orchestrator (DB loop) does not require Temporal resume, but we still + // must ack durably to prevent reconciler churn and false-positive alerting. + const ackedAt = new Date(); + await this.prisma.userPromptResolution.updateMany({ + where: { + promptId: payload.promptId, + resumeAcknowledgedAt: null, + }, + data: { + resumeAcknowledgedAt: ackedAt, + resumeAck: { + acknowledgedAt: ackedAt.toISOString(), + didResume: false, + updateId, + skipped: true, + skipReason: 'LEGACY_ENGINE', + executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP, + outboxRowId: row.id, + outboxDedupeKey: row.dedupeKey, + } as any, + }, + }); + return; + } + + // For TEMPORAL_WORKFLOW runs, we only ack after a successful Temporal Update. + // If Temporal is disabled/unreachable, that is a delivery failure (not a skip). + if (!this.temporalWorkflowService?.isEnabled()) { + throw new Error( + `Temporal workflows disabled but run execution_engine=TEMPORAL_WORKFLOW (goalRunId=${payload.goalRunId})`, + ); + } + + const resolution = await this.prisma.userPromptResolution.findUnique({ + where: { promptId: payload.promptId }, + select: { answers: true }, + }); + + const answers = + (resolution?.answers as unknown as Record | null) ?? + (await this.prisma.userPrompt.findUnique({ + where: { id: payload.promptId }, + select: { answers: true }, + }))?.answers; + + if (!answers || typeof answers !== 'object') { + throw new Error(`Missing answers for prompt resume ${payload.promptId}`); + } + let resumeResult: { didResume: boolean }; + try { + resumeResult = await this.temporalWorkflowService.resumeFromUserPrompt( + payload.goalRunId, + { promptId: payload.promptId, answers: answers as any }, + { updateId }, + ); + this.resumeUpdateSuccessTotal.inc(); + } catch (error: any) { + this.resumeUpdateFailedTotal.inc(); + throw error; + } + + // Acknowledge resume attempt durably so a reconciler can prove convergence: + // DB prompt RESOLVED + resume_acknowledged_at set ⇒ we delivered (or intentionally skipped) the resume Update. + const ackedAt = new Date(); + await this.prisma.userPromptResolution.updateMany({ + where: { + promptId: payload.promptId, + resumeAcknowledgedAt: null, + }, + data: { + resumeAcknowledgedAt: ackedAt, + resumeAck: { + acknowledgedAt: ackedAt.toISOString(), + didResume: resumeResult.didResume, + updateId, + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + outboxRowId: row.id, + outboxDedupeKey: row.dedupeKey, + } as any, + }, + }); + } + + private async resolveTenantId(tenantId: string | null, goalRunId: string | null): Promise { + if (tenantId) return tenantId; + if (!goalRunId) return null; + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { tenantId: true }, + }); + + return goalRun?.tenantId ?? null; + } + + private computeNextAttemptAt(now: Date, retryCount: number): Date { + const exponent = Math.max(0, retryCount - 1); + const delay = Math.min(this.retryBaseDelayMs * Math.pow(2, exponent), this.retryMaxDelayMs); + return new Date(now.getTime() + delay); + } + + private truncateError(message: string): string { + return message.length > 2000 ? `${message.slice(0, 2000)}…` : message; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/outbox.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/outbox.service.spec.ts new file mode 100644 index 000000000..4e8d3f71b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/outbox.service.spec.ts @@ -0,0 +1,136 @@ +import { OutboxService } from './outbox.service'; + +describe(OutboxService.name, () => { + it('creates an outbox row on first call', async () => { + const prisma = { + outbox: { + create: jest.fn(), + findUnique: jest.fn(), + }, + } as any; + + const service = new OutboxService(prisma); + + const created = { + id: 'o1', + dedupeKey: 'prompt:run1:step1:TEXT_CLARIFICATION', + aggregateId: 'run1', + eventType: 'user_prompt.created', + payload: { foo: 'bar' }, + }; + + prisma.outbox.create.mockResolvedValueOnce(created); + + const result = await service.enqueueOnce({ + dedupeKey: created.dedupeKey, + aggregateId: 'run1', + eventType: 'user_prompt.created', + payload: { foo: 'bar' }, + }); + + expect(result).toBe(created); + expect(prisma.outbox.create).toHaveBeenCalledTimes(1); + }); + + it('dedupes by returning existing outbox row on unique violation', async () => { + const prisma = { + outbox: { + create: jest.fn(), + findUnique: jest.fn(), + }, + } as any; + + const service = new OutboxService(prisma); + + const existing = { + id: 'o1', + dedupeKey: 'prompt:run1:step1:TEXT_CLARIFICATION', + aggregateId: 'run1', + eventType: 'user_prompt.created', + payload: { foo: 'bar' }, + }; + + prisma.outbox.create.mockRejectedValueOnce({ code: 'P2002' }); + prisma.outbox.findUnique.mockResolvedValueOnce(existing); + + const result = await service.enqueueOnce({ + dedupeKey: existing.dedupeKey, + aggregateId: 'run1', + eventType: 'user_prompt.created', + payload: { foo: 'bar' }, + }); + + expect(result).toBe(existing); + expect(prisma.outbox.findUnique).toHaveBeenCalledWith({ + where: { dedupeKey: existing.dedupeKey }, + }); + }); + + it('retries if dedupe race returns null', async () => { + const prisma = { + outbox: { + create: jest.fn(), + findUnique: jest.fn(), + }, + } as any; + + const service = new OutboxService(prisma); + + const created = { + id: 'o1', + dedupeKey: 'prompt:run1:step1:TEXT_CLARIFICATION', + aggregateId: 'run1', + eventType: 'user_prompt.created', + payload: { foo: 'bar' }, + }; + + prisma.outbox.create.mockRejectedValueOnce({ code: 'P2002' }).mockResolvedValueOnce(created); + prisma.outbox.findUnique.mockResolvedValueOnce(null); + + const result = await service.enqueueOnce({ + dedupeKey: created.dedupeKey, + aggregateId: 'run1', + eventType: 'user_prompt.created', + payload: { foo: 'bar' }, + }); + + expect(result).toBe(created); + expect(prisma.outbox.create).toHaveBeenCalledTimes(2); + }); + + it('replays events with cursor and returns nextCursor', async () => { + const prisma = { + $queryRaw: jest.fn(), + } as any; + + const service = new OutboxService(prisma); + + prisma.$queryRaw.mockResolvedValueOnce([ + { + eventSequence: 5n, + dedupeKey: 'user_prompt.created:p-1', + aggregateId: 'gr-1', + eventType: 'user_prompt.created', + payload: { tenantId: 't-1', promptId: 'p-1' }, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + processedAt: null, + }, + { + eventSequence: 6n, + dedupeKey: 'user_prompt.resolved:p-1', + aggregateId: 'gr-1', + eventType: 'user_prompt.resolved', + payload: { tenantId: 't-1', promptId: 'p-1' }, + createdAt: new Date('2026-01-01T00:01:00.000Z'), + processedAt: new Date('2026-01-01T00:01:01.000Z'), + }, + ]); + + const result = await service.replayEvents({ tenantId: 't-1', cursor: 0n, limit: 10 }); + + expect(result.events).toHaveLength(2); + expect(result.events[0].eventSequence).toBe('5'); + expect(result.events[1].eventSequence).toBe('6'); + expect(result.nextCursor).toBe('6'); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/outbox.service.ts b/packages/bytebot-workflow-orchestrator/src/services/outbox.service.ts new file mode 100644 index 000000000..f6c33c106 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/outbox.service.ts @@ -0,0 +1,111 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { createId } from '@paralleldrive/cuid2'; +import { PrismaService } from './prisma.service'; +import { Outbox, Prisma } from '@prisma/client'; + +export interface EnqueueOutboxOnceRequest { + dedupeKey: string; + eventType: string; + aggregateId?: string; + payload: Prisma.InputJsonValue; +} + +export type OutboxReplayEvent = { + eventSequence: string; + dedupeKey: string; + aggregateId: string | null; + eventType: string; + payload: any; + createdAt: string; + processedAt: string | null; +}; + +@Injectable() +export class OutboxService { + private readonly logger = new Logger(OutboxService.name); + + constructor(private readonly prisma: PrismaService) {} + + /** + * Create (or return existing) outbox row. + * Idempotent via unique Outbox.dedupeKey. + */ + async enqueueOnce(request: EnqueueOutboxOnceRequest): Promise { + try { + return await this.prisma.outbox.create({ + data: { + id: createId(), + dedupeKey: request.dedupeKey, + aggregateId: request.aggregateId, + eventType: request.eventType, + payload: request.payload, + }, + }); + } catch (error: any) { + if (error?.code !== 'P2002') { + throw error; + } + + const existing = await this.prisma.outbox.findUnique({ + where: { dedupeKey: request.dedupeKey }, + }); + if (existing) { + return existing; + } + + this.logger.warn(`Outbox dedupe race for ${request.dedupeKey}; retrying`); + return this.enqueueOnce(request); + } + } + + async replayEvents(request: { + tenantId: string; + cursor?: bigint; + limit?: number; + eventType?: string; + }): Promise<{ events: OutboxReplayEvent[]; nextCursor: string | null }> { + const cursor = request.cursor ?? 0n; + const limit = Math.min(Math.max(request.limit ?? 50, 1), 500); + + const rows = await this.prisma.$queryRaw< + Array<{ + eventSequence: bigint; + dedupeKey: string; + aggregateId: string | null; + eventType: string; + payload: any; + createdAt: Date; + processedAt: Date | null; + }> + >` + SELECT + o.event_sequence AS "eventSequence", + o.dedupe_key AS "dedupeKey", + o.aggregate_id AS "aggregateId", + o.event_type AS "eventType", + o.payload AS "payload", + o.created_at AS "createdAt", + o.processed_at AS "processedAt" + FROM "workflow_orchestrator"."outbox" o + WHERE o.event_sequence IS NOT NULL + AND o.event_sequence > ${cursor} + AND (o.payload->>'tenantId') = ${request.tenantId} + AND (${request.eventType ?? null}::text IS NULL OR o.event_type = ${request.eventType ?? null}::text) + ORDER BY o.event_sequence ASC + LIMIT ${limit}; + `; + + const events: OutboxReplayEvent[] = rows.map((r) => ({ + eventSequence: String(r.eventSequence), + dedupeKey: r.dedupeKey, + aggregateId: r.aggregateId, + eventType: r.eventType, + payload: r.payload, + createdAt: r.createdAt.toISOString(), + processedAt: r.processedAt ? r.processedAt.toISOString() : null, + })); + + const nextCursor = events.length > 0 ? events[events.length - 1].eventSequence : null; + return { events, nextCursor }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/planner.desktop-surface-gate.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/planner.desktop-surface-gate.spec.ts new file mode 100644 index 000000000..222bed141 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/planner.desktop-surface-gate.spec.ts @@ -0,0 +1,104 @@ +import { PlannerService } from './planner.service'; +import { StepType } from '@prisma/client'; + +describe('PlannerService desktop surface feasibility gate', () => { + const makeService = () => { + const prisma = {} as any; + const goalRunService = {} as any; + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + return new PlannerService(prisma, goalRunService, configService); + }; + + it('sets requiresDesktop=true when suggestedTools include desktop execution tools', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Open a website in the browser', + type: StepType.EXECUTE, + expectedOutcome: 'Website loaded', + suggestedTools: ['browser'], + requiresDesktop: false, + }, + { + description: 'Verify page loaded', + type: StepType.EXECUTE, + expectedOutcome: 'Page verified', + suggestedTools: ['screenshot'], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + const output = await (service as any).callLLMForPlan('Open site', {}); + expect(output.items[0].requiresDesktop).toBe(true); + expect(output.items[1].requiresDesktop).toBe(true); + }); + + it('does not force requiresDesktop=true for non-desktop tools', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Parse a document', + type: StepType.EXECUTE, + expectedOutcome: 'Document parsed', + suggestedTools: ['document_parse'], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + const output = await (service as any).callLLMForPlan('Parse doc', {}); + expect(output.items[0].requiresDesktop).toBe(false); + }); + + it('forces DESKTOP feasibility for travel-shopping goals even when planner omits tools', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Search for round-trip flights from DTW to LAS for Feb 21-28', + type: StepType.EXECUTE, + expectedOutcome: 'Top flight options collected', + suggestedTools: [], + requiresDesktop: false, + }, + { + description: 'Search for hotels in Las Vegas for the same dates', + type: StepType.EXECUTE, + expectedOutcome: 'Top hotel options collected', + suggestedTools: [], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + const output = await (service as any).callLLMForPlan( + 'Please price a trip with flight and hotel from Detroit to Las Vegas Feb 21-28', + {}, + ); + + expect(output.items[0].requiresDesktop).toBe(true); + expect(output.items[0].suggestedTools).toContain('browser'); + expect(output.items[1].requiresDesktop).toBe(true); + expect(output.items[1].suggestedTools).toContain('browser'); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/planner.errors.ts b/packages/bytebot-workflow-orchestrator/src/services/planner.errors.ts new file mode 100644 index 000000000..f6c0a10cc --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/planner.errors.ts @@ -0,0 +1,49 @@ +import { StepType } from '@prisma/client'; +import { hasUserInteractionTool } from '../contracts/planner-tools'; + +export type PlannerChecklistItem = { + description: string; + type: StepType; + expectedOutcome?: string; + suggestedTools?: string[]; + requiresDesktop?: boolean; +}; + +export type PlannerFirstStepUserInputReason = + | 'USER_INPUT_REQUIRED_TYPE' + | 'ASK_USER_TOOL'; + +/** + * Raised when the planner attempts to start a plan with a user-interaction step. + * + * We treat this as a GoalSpec intake requirement rather than saving a prompt-first plan. + * This prevents runs that immediately enter WAITING_USER_INPUT with no progress. + */ +export class PlannerFirstStepUserInputError extends Error { + readonly firstStep: PlannerChecklistItem; + readonly mode: 'initial' | 'replan'; + readonly reason: PlannerFirstStepUserInputReason; + + constructor(params: { + mode: 'initial' | 'replan'; + firstStep: PlannerChecklistItem; + reason: PlannerFirstStepUserInputReason; + }) { + super( + `Planner produced a prompt-first plan (${params.reason}) in ${params.mode} mode: ${params.firstStep.description}`, + ); + this.name = 'PlannerFirstStepUserInputError'; + this.mode = params.mode; + this.firstStep = params.firstStep; + this.reason = params.reason; + } +} + +export function detectPlannerFirstStepUserInputReason( + firstStep: PlannerChecklistItem, +): PlannerFirstStepUserInputReason | null { + if (firstStep.type === StepType.USER_INPUT_REQUIRED) return 'USER_INPUT_REQUIRED_TYPE'; + if (hasUserInteractionTool(firstStep.suggestedTools)) return 'ASK_USER_TOOL'; + + return null; +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/planner.no-user-input-first.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/planner.no-user-input-first.spec.ts new file mode 100644 index 000000000..58c97eef1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/planner.no-user-input-first.spec.ts @@ -0,0 +1,211 @@ +import { PlannerService } from './planner.service'; +import { StepType } from '@prisma/client'; +import { PlannerFirstStepUserInputError } from './planner.errors'; +import { ExecuteStepHasInteractionToolError, UnknownSuggestedToolTokenError } from '../contracts/planner-tools'; + +describe('PlannerService no USER_INPUT_REQUIRED first step', () => { + const makeService = () => { + const prisma = {} as any; + const goalRunService = {} as any; + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + return new PlannerService(prisma, goalRunService, configService); + }; + + it('rejects prompt-first output when the LLM returns USER_INPUT_REQUIRED as step 1', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Ask the user for the fire-drill codeword', + type: StepType.USER_INPUT_REQUIRED, + expectedOutcome: 'User provides codeword', + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + }, + { + description: 'Validate codeword', + type: StepType.EXECUTE, + expectedOutcome: 'Codeword validated', + suggestedTools: ['Shell'], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + await expect((service as any).callLLMForPlan('Fire drill', {})).rejects.toBeInstanceOf( + PlannerFirstStepUserInputError, + ); + }); + + it('rejects prompt-first output when step 1 is EXECUTE but has suggestedTools=["ASK_USER"]', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Clarify target URL with the user', + type: StepType.EXECUTE, + expectedOutcome: 'URL clarified', + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + }, + { + description: 'Proceed with task', + type: StepType.EXECUTE, + expectedOutcome: 'Task progressed', + suggestedTools: [], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + const promise = (service as any).callLLMForPlan('Do the thing', {}); + await expect(promise).rejects.toBeInstanceOf(PlannerFirstStepUserInputError); + + await promise.catch((error: PlannerFirstStepUserInputError) => { + expect(error.reason).toBe('ASK_USER_TOOL'); + }); + }); + + it('rejects prompt-first output when step 1 is EXECUTE but has suggestedTools=["CHAT"]', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Prompt the user to provide the missing inputs', + type: StepType.EXECUTE, + expectedOutcome: 'Inputs provided', + suggestedTools: ['CHAT'], + requiresDesktop: false, + }, + { + description: 'Proceed with task', + type: StepType.EXECUTE, + expectedOutcome: 'Task progressed', + suggestedTools: [], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + const promise = (service as any).callLLMForPlan('Do the thing', {}); + await expect(promise).rejects.toBeInstanceOf(PlannerFirstStepUserInputError); + + await promise.catch((error: PlannerFirstStepUserInputError) => { + expect(error.reason).toBe('ASK_USER_TOOL'); + }); + }); + + it('rejects prompt-first replans (so the orchestrator can convert to GOAL_INTAKE)', async () => { + const service = makeService(); + + jest.spyOn(service as any, 'callLLM').mockResolvedValueOnce( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Ask the user for missing required details', + type: StepType.USER_INPUT_REQUIRED, + expectedOutcome: 'User provides details', + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + }, + { + description: 'Proceed with task', + type: StepType.EXECUTE, + expectedOutcome: 'Task progressed', + suggestedTools: [], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + await expect( + (service as any).callLLMForReplan( + 'Do stuff', + {}, + { checklistItems: [] }, + 'Need more info', + undefined, + undefined, + ), + ).rejects.toBeInstanceOf(PlannerFirstStepUserInputError); + }); + + it('rejects plan output when a non-first EXECUTE step contains an interaction tool token', async () => { + const service = makeService(); + const llm = jest.spyOn(service as any, 'callLLM'); + + llm.mockResolvedValue( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Do step 1', + type: StepType.EXECUTE, + expectedOutcome: 'Step 1 done', + suggestedTools: [], + requiresDesktop: false, + }, + { + description: 'Ask the user for clarification', + type: StepType.EXECUTE, // misclassified + expectedOutcome: 'User clarified', + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + await expect((service as any).callLLMForPlan('Do the thing', {})).rejects.toBeInstanceOf( + ExecuteStepHasInteractionToolError, + ); + expect(llm).toHaveBeenCalled(); + }); + + it('rejects plan output when suggestedTools contains an unknown tool token (fail-closed)', async () => { + const service = makeService(); + const llm = jest.spyOn(service as any, 'callLLM'); + + llm.mockResolvedValue( + JSON.stringify({ + summary: 'test', + items: [ + { + description: 'Do step 1', + type: StepType.EXECUTE, + expectedOutcome: 'Step 1 done', + suggestedTools: ['totally_not_a_real_tool'], + requiresDesktop: false, + }, + ], + confidence: 0.9, + }), + ); + + await expect((service as any).callLLMForPlan('Do the thing', {})).rejects.toBeInstanceOf( + UnknownSuggestedToolTokenError, + ); + expect(llm).toHaveBeenCalled(); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/planner.service.ts b/packages/bytebot-workflow-orchestrator/src/services/planner.service.ts new file mode 100644 index 000000000..00a7f2b8d --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/planner.service.ts @@ -0,0 +1,1209 @@ +/** + * Planner Service + * v2.0.0: Context-Preserving Replanning (Industry Standard Fix) + * - Include actualOutcome in replan prompts so LLM knows what was accomplished + * - Preserve completed steps during replanning (don't re-do work) + * - Add explicit "COMPLETED WORK - DO NOT REDO" section in prompts + * - Manus-style: External state representation via checkpoint + * - LangGraph-style: Don't re-run successful nodes on recovery + * v1.0.1: Added defensive check in createPlanVersion to prevent duplicate version errors + * v1.0.0: LLM-powered plan generation for Manus-style orchestration + * + * Responsibilities: + * - Generate initial plans from natural language goals + * - Generate replans when execution fails + * - Validate plan structure and feasibility + * - Estimate goal complexity + * - Preserve context across replanning (v2.0.0) + * + * Race Condition Safeguard (v1.0.1): + * - createPlanVersion: Checks if version already exists before creating + * - Returns existing plan version if found (defensive programming) + * + * Context Preservation (v2.0.0): + * - buildReplanningPrompt: Now includes actualOutcome for COMPLETED steps + * - generateReplan: Option to preserve completed items in new plan version + * - Follows industry best practices from Manus AI, LangGraph, OpenAI Assistants + * + * @see /documentation/2026-01-03-CONTEXT_PRESERVING_REPLAN_FIX.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import { GoalRunService } from './goal-run.service'; +import { createId } from '@paralleldrive/cuid2'; +import { + ChecklistItemStatus, + ExecutionSurface, + GoalRunPhase, + GoalSpecStatus, + StepType, + UserPromptKind, + UserPromptStatus, +} from '@prisma/client'; +import { z } from 'zod'; +import { detectPlannerFirstStepUserInputReason, PlannerFirstStepUserInputError } from './planner.errors'; +import { + ExecuteStepHasInteractionToolError, + PlannerOutputContractViolationError, + hasDesktopExecutionTool, + hasUserInteractionTool, + normalizeSuggestedToolsOrThrow, +} from '../contracts/planner-tools'; +import { inferGoalFeasibility } from '../contracts/goal-feasibility'; + +// Zod schema for LLM plan output validation +const ChecklistItemSchema = z.object({ + description: z.string().min(5).max(500), + type: z.nativeEnum(StepType), + expectedOutcome: z.string().optional(), + suggestedTools: z.array(z.string()).optional(), + requiresDesktop: z.boolean().optional(), + isPreservedFromPrevious: z.boolean().optional(), // v2.0.0: Marks items preserved from previous plan +}); + +const PlanOutputSchema = z.object({ + summary: z.string().max(500), + items: z.array(ChecklistItemSchema).min(1).max(20), + confidence: z.number().min(0).max(1).optional(), + preserveCompletedSteps: z.boolean().optional(), // v2.0.0: Indicates LLM respects completed work +}); + +type PlanOutput = z.infer; + +// v2.0.0: Interface for preserved checklist item from previous plan +interface PreservedChecklistItem { + id: string; + order: number; + description: string; + status: string; + actualOutcome: string | null; + expectedOutcome: string | null; + suggestedTools: string[]; + requiresDesktop: boolean; + type: StepType; + executionSurface: ExecutionSurface; + startedAt: Date | null; + completedAt: Date | null; +} + +export interface PlanGenerationResult { + planVersionId: string; + version: number; + summary: string; + items: { + id: string; + order: number; + description: string; + expectedOutcome?: string; + suggestedTools: string[]; + requiresDesktop: boolean; + }[]; + confidence?: number; + tokensUsed?: number; +} + +export interface ComplexityEstimate { + level: 'simple' | 'moderate' | 'complex'; + estimatedSteps: number; + requiresDesktop: boolean; + riskLevel: 'low' | 'medium' | 'high'; + warnings: string[]; +} + +@Injectable() +export class PlannerService { + private readonly logger = new Logger(PlannerService.name); + private readonly llmModel: string; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + + constructor( + private prisma: PrismaService, + private goalRunService: GoalRunService, + private configService: ConfigService, + ) { + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + } + + /** + * Generate initial plan from goal + */ + async generateInitialPlan(goalRunId: string): Promise { + this.logger.log(`Generating initial plan for goal run ${goalRunId}`); + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + if (!goalRun) { + throw new Error(`Goal run ${goalRunId} not found`); + } + + const goalSpec = await this.prisma.goalSpec.findUnique({ + where: { goalRunId }, + select: { status: true, schemaId: true, schemaVersion: true, values: true }, + }); + + const goalSpecContext = + goalSpec?.status === GoalSpecStatus.COMPLETE + ? `\n\nGOAL INTAKE (schema=${goalSpec.schemaId}@${goalSpec.schemaVersion}):\n${JSON.stringify(goalSpec.values, null, 2)}` + : ''; + + // Generate plan using LLM + const planOutput = await this.callLLMForPlan(goalRun.goal, goalRun.constraints as any, { + goalSpecContext, + }); + + // Create plan version + const result = await this.createPlanVersion(goalRunId, planOutput, 1); + + // Update goal run current plan version + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { currentPlanVersion: 1 }, + }); + + // Create activity event + await this.goalRunService.createActivityEvent(goalRunId, { + eventType: 'PLANNING_COMPLETED', + title: 'Plan generated', + description: `Generated ${result.items.length} steps`, + planVersionId: result.planVersionId, + }); + + return result; + } + + /** + * Generate replan after failure + * + * v2.0.0: Context-Preserving Replanning + * - Preserves completed steps from previous plan (LangGraph-style) + * - Copies actualOutcome, timestamps, and status to new plan + * - Only generates new steps for failed/pending items + * - Manus-style: Keeps completed work visible and builds upon it + */ + async generateReplan( + goalRunId: string, + reason: string, + context?: { + failedItemId?: string; + failureDetails?: string; + }, + ): Promise { + this.logger.log(`Generating replan for goal run ${goalRunId}: ${reason}`); + + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) { + throw new Error(`Goal run ${goalRunId} not found`); + } + + const userPrompts = await this.prisma.userPrompt.findMany({ + where: { goalRunId }, + orderBy: { createdAt: 'asc' }, + select: { + id: true, + checklistItemId: true, + kind: true, + status: true, + payload: true, + answers: true, + createdAt: true, + resolvedAt: true, + }, + }); + const userPromptContext = this.formatUserPromptContext(userPrompts); + + // Update phase to REPLANNING + await this.goalRunService.updatePhase(goalRunId, GoalRunPhase.REPLANNING); + + const currentPlan = goalRun.planVersions[0]; + const newVersion = (currentPlan?.version || 0) + 1; + + // v2.0.0: Extract completed items to preserve (LangGraph-style: don't re-run successful nodes) + const completedItems: PreservedChecklistItem[] = (currentPlan?.checklistItems || []) + .filter((item: any) => item.status === 'COMPLETED') + .map((item: any) => ({ + id: item.id, + order: item.order, + description: item.description, + status: item.status, + actualOutcome: item.actualOutcome, + expectedOutcome: item.expectedOutcome, + suggestedTools: item.suggestedTools, + requiresDesktop: item.requiresDesktop, + type: item.type ?? StepType.EXECUTE, + executionSurface: + item.executionSurface ?? (item.requiresDesktop ? ExecutionSurface.DESKTOP : ExecutionSurface.TEXT_ONLY), + startedAt: item.startedAt, + completedAt: item.completedAt, + })); + + this.logger.log( + `Preserving ${completedItems.length} completed steps from previous plan v${currentPlan?.version || 0}`, + ); + + // Generate replan using LLM with context (includes completed work) + const planOutput = await this.callLLMForReplan( + goalRun.goal, + goalRun.constraints as any, + currentPlan, + reason, + context, + userPromptContext, + ); + + // v2.0.0: Create new plan version with preserved completed items + const result = await this.createPlanVersionWithPreservedSteps( + goalRunId, + planOutput, + newVersion, + completedItems, + currentPlan?.id, + reason, + ); + + // Update goal run current plan version + await this.prisma.goalRun.update({ + where: { id: goalRunId }, + data: { currentPlanVersion: newVersion }, + }); + + // Create activity event with context preservation info + await this.goalRunService.createActivityEvent(goalRunId, { + eventType: 'REPLAN_COMPLETED', + title: 'Plan updated (with preserved progress)', + description: `Replan reason: ${reason}. Preserved ${completedItems.length} completed steps.`, + planVersionId: result.planVersionId, + }); + + return result; + } + + /** + * Validate plan structure + */ + async validatePlan(planOutput: unknown): Promise<{ + valid: boolean; + errors: string[]; + }> { + try { + PlanOutputSchema.parse(planOutput); + return { valid: true, errors: [] }; + } catch (error: any) { + if (error instanceof z.ZodError) { + return { + valid: false, + errors: error.errors.map((e: z.ZodIssue) => `${e.path.join('.')}: ${e.message}`), + }; + } + return { valid: false, errors: [error.message] }; + } + } + + /** + * Estimate goal complexity + */ + async estimateComplexity(goal: string): Promise { + this.logger.log(`Estimating complexity for goal: "${goal.substring(0, 50)}..."`); + + // Simple heuristic-based estimation + const lowerGoal = goal.toLowerCase(); + + // Check for desktop-related keywords + const desktopKeywords = [ + 'click', 'type', 'browser', 'website', 'login', 'download', + 'upload', 'screenshot', 'navigate', 'fill', 'form', 'button', + ]; + const requiresDesktop = desktopKeywords.some((kw) => lowerGoal.includes(kw)); + + // Check for high-risk keywords + const highRiskKeywords = [ + 'email', 'send', 'delete', 'transfer', 'payment', 'purchase', + 'password', 'credentials', 'sensitive', 'confidential', + ]; + const highRiskCount = highRiskKeywords.filter((kw) => lowerGoal.includes(kw)).length; + + // Estimate step count based on goal complexity + const connectors = ['and', 'then', 'after', 'before', 'also', 'finally']; + const connectorCount = connectors.filter((c) => lowerGoal.includes(` ${c} `)).length; + const estimatedSteps = Math.max(2, Math.min(10, connectorCount + 2)); + + // Determine complexity level + let level: 'simple' | 'moderate' | 'complex'; + if (estimatedSteps <= 3 && highRiskCount === 0) { + level = 'simple'; + } else if (estimatedSteps <= 6 || highRiskCount <= 1) { + level = 'moderate'; + } else { + level = 'complex'; + } + + // Determine risk level + let riskLevel: 'low' | 'medium' | 'high'; + if (highRiskCount === 0) { + riskLevel = 'low'; + } else if (highRiskCount <= 2) { + riskLevel = 'medium'; + } else { + riskLevel = 'high'; + } + + // Generate warnings + const warnings: string[] = []; + if (highRiskCount > 0) { + warnings.push('Goal involves potentially sensitive operations'); + } + if (goal.length < 20) { + warnings.push('Goal description may be too vague'); + } + if (goal.length > 500) { + warnings.push('Goal description is very long - consider breaking into smaller goals'); + } + + return { + level, + estimatedSteps, + requiresDesktop, + riskLevel, + warnings, + }; + } + + // Private methods + + /** + * Create a new plan version with checklist items + * v1.0.1: Added defensive check to prevent duplicate version errors (Option 3) + */ + private async createPlanVersion( + goalRunId: string, + planOutput: PlanOutput, + version: number, + previousVersionId?: string, + replanReason?: string, + ): Promise { + // Defensive check: verify version doesn't already exist (Option 3 safeguard) + // This catches any edge cases that slip through the primary fix in orchestrator-loop + const existingVersion = await this.prisma.planVersion.findFirst({ + where: { goalRunId, version }, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }); + + if (existingVersion) { + this.logger.warn( + `Plan version ${version} already exists for goal run ${goalRunId}, returning existing`, + ); + return { + planVersionId: existingVersion.id, + version: existingVersion.version, + summary: existingVersion.summary || '', + items: existingVersion.checklistItems.map((item) => ({ + id: item.id, + order: item.order, + description: item.description, + expectedOutcome: item.expectedOutcome || undefined, + suggestedTools: item.suggestedTools, + requiresDesktop: item.requiresDesktop, + })), + confidence: existingVersion.confidence || undefined, + }; + } + + const planVersionId = `pv-${createId()}`; + + // Create plan version with checklist items in a transaction + await this.prisma.$transaction(async (tx) => { + await tx.planVersion.create({ + data: { + id: planVersionId, + goalRunId, + version, + summary: planOutput.summary, + previousVersionId, + replanReason, + llmModel: this.llmModel, + confidence: planOutput.confidence, + }, + }); + + // Create checklist items + for (let i = 0; i < planOutput.items.length; i++) { + const item = planOutput.items[i]; + await tx.checklistItem.create({ + data: { + id: `ci-${createId()}`, + planVersionId, + order: i + 1, + description: item.description, + type: item.type, + status: ChecklistItemStatus.PENDING, + expectedOutcome: item.expectedOutcome, + suggestedTools: item.suggestedTools || [], + requiresDesktop: item.requiresDesktop || false, + executionSurface: item.requiresDesktop ? ExecutionSurface.DESKTOP : ExecutionSurface.TEXT_ONLY, + }, + }); + } + }); + + // Fetch created items + const createdItems = await this.prisma.checklistItem.findMany({ + where: { planVersionId }, + orderBy: { order: 'asc' }, + }); + + return { + planVersionId, + version, + summary: planOutput.summary, + items: createdItems.map((item) => ({ + id: item.id, + order: item.order, + description: item.description, + expectedOutcome: item.expectedOutcome || undefined, + suggestedTools: item.suggestedTools, + requiresDesktop: item.requiresDesktop, + })), + confidence: planOutput.confidence || undefined, + }; + } + + /** + * v2.0.0: Create plan version with preserved completed steps + * + * This implements LangGraph-style "don't re-run successful nodes": + * 1. Preserved completed items are added FIRST with their original data + * 2. New items from LLM are added AFTER, starting at the next order number + * 3. Completed items retain actualOutcome, timestamps, and COMPLETED status + * + * This ensures: + * - Completed work is never lost during replanning + * - The agent can continue from where it left off + * - Context is preserved for dependent steps + */ + private async createPlanVersionWithPreservedSteps( + goalRunId: string, + planOutput: PlanOutput, + version: number, + preservedItems: PreservedChecklistItem[], + previousVersionId?: string, + replanReason?: string, + ): Promise { + // Defensive check: verify version doesn't already exist + const existingVersion = await this.prisma.planVersion.findFirst({ + where: { goalRunId, version }, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }); + + if (existingVersion) { + this.logger.warn( + `Plan version ${version} already exists for goal run ${goalRunId}, returning existing`, + ); + return { + planVersionId: existingVersion.id, + version: existingVersion.version, + summary: existingVersion.summary || '', + items: existingVersion.checklistItems.map((item) => ({ + id: item.id, + order: item.order, + description: item.description, + expectedOutcome: item.expectedOutcome || undefined, + suggestedTools: item.suggestedTools, + requiresDesktop: item.requiresDesktop, + })), + confidence: existingVersion.confidence || undefined, + }; + } + + const planVersionId = `pv-${createId()}`; + const preservedCount = preservedItems.length; + + this.logger.log( + `Creating plan v${version} with ${preservedCount} preserved steps + ${planOutput.items.length} new steps`, + ); + + // Create plan version with checklist items in a transaction + await this.prisma.$transaction(async (tx) => { + // Create plan version with metadata about preservation + await tx.planVersion.create({ + data: { + id: planVersionId, + goalRunId, + version, + summary: `${planOutput.summary} (preserved ${preservedCount} completed steps)`, + previousVersionId, + replanReason, + llmModel: this.llmModel, + confidence: planOutput.confidence, + }, + }); + + // Step 1: Create preserved completed items FIRST + // These keep their COMPLETED status and actualOutcome + for (let i = 0; i < preservedItems.length; i++) { + const preserved = preservedItems[i]; + await tx.checklistItem.create({ + data: { + id: `ci-${createId()}`, + planVersionId, + order: i + 1, // Preserved items get order 1, 2, 3... + description: preserved.description, + type: preserved.type, + status: ChecklistItemStatus.COMPLETED, // Keep COMPLETED status! + expectedOutcome: preserved.expectedOutcome, + actualOutcome: preserved.actualOutcome, // CRITICAL: Preserve the outcome! + suggestedTools: preserved.suggestedTools, + requiresDesktop: preserved.requiresDesktop, + executionSurface: preserved.executionSurface, + startedAt: preserved.startedAt, + completedAt: preserved.completedAt, + }, + }); + } + + // Step 2: Create new items from LLM AFTER preserved items + // These are PENDING and start after the preserved items + const startOrder = preservedItems.length + 1; + for (let i = 0; i < planOutput.items.length; i++) { + const item = planOutput.items[i]; + + // Skip items that LLM marked as "isPreservedFromPrevious" since we already added them + if (item.isPreservedFromPrevious) { + this.logger.debug(`Skipping LLM-preserved item (already added): ${item.description}`); + continue; + } + + await tx.checklistItem.create({ + data: { + id: `ci-${createId()}`, + planVersionId, + order: startOrder + i, + description: item.description, + type: item.type, + status: ChecklistItemStatus.PENDING, + expectedOutcome: item.expectedOutcome, + suggestedTools: item.suggestedTools || [], + requiresDesktop: item.requiresDesktop || false, + executionSurface: item.requiresDesktop ? ExecutionSurface.DESKTOP : ExecutionSurface.TEXT_ONLY, + }, + }); + } + }); + + // Fetch created items + const createdItems = await this.prisma.checklistItem.findMany({ + where: { planVersionId }, + orderBy: { order: 'asc' }, + }); + + this.logger.log( + `Created plan v${version}: ${createdItems.filter(i => i.status === 'COMPLETED').length} completed + ` + + `${createdItems.filter(i => i.status === 'PENDING').length} pending`, + ); + + return { + planVersionId, + version, + summary: planOutput.summary, + items: createdItems.map((item) => ({ + id: item.id, + order: item.order, + description: item.description, + expectedOutcome: item.expectedOutcome || undefined, + suggestedTools: item.suggestedTools, + requiresDesktop: item.requiresDesktop, + })), + confidence: planOutput.confidence || undefined, + }; + } + + private async callLLMForPlan( + goal: string, + constraints: any, + options?: { goalSpecContext?: string }, + ): Promise { + const basePrompt = this.buildPlanningPrompt(goal, constraints, options?.goalSpecContext); + const maxContractAttempts = 2; + + let lastContractError: PlannerOutputContractViolationError | null = null; + + for (let attempt = 1; attempt <= maxContractAttempts; attempt++) { + const prompt = + attempt === 1 + ? basePrompt + : `${basePrompt}\n\n` + + `CORRECTION REQUIRED:\n` + + `- Your previous JSON violated the planner-output contract.\n` + + `- suggestedTools MUST be either [] or a subset of the Allowed tools list. Do not invent tool tokens.\n` + + `- If a step requires user clarification, set type=USER_INPUT_REQUIRED and suggestedTools=[\"ASK_USER\"] (never \"CHAT\").\n`; + + try { + const response = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(response); + + // Validate + const validation = await this.validatePlan(parsed); + if (!validation.valid) { + this.logger.warn(`Plan validation failed: ${validation.errors.join(', ')}`); + // Return a fallback simple plan + return this.generateFallbackPlan(goal); + } + + return this.normalizePlanOutput(parsed as PlanOutput, { + goal, + mode: 'initial', + allowedTools: constraints?.allowedTools, + }); + } catch (error: any) { + if (error instanceof PlannerFirstStepUserInputError) { + throw error; + } + + if (error instanceof PlannerOutputContractViolationError) { + lastContractError = error; + this.logger.warn( + `Planner output contract violation on attempt ${attempt}/${maxContractAttempts}: ${error.code}`, + ); + continue; + } + + this.logger.error(`LLM call failed: ${error.message}`); + // Return fallback plan + return this.generateFallbackPlan(goal); + } + } + + // Fail-closed: do not persist a plan with drifted/unknown tool vocabulary. + if (lastContractError) { + throw lastContractError; + } + + return this.generateFallbackPlan(goal); + } + + private async callLLMForReplan( + goal: string, + constraints: any, + currentPlan: any, + reason: string, + context?: { failedItemId?: string; failureDetails?: string }, + userPromptContext?: string, + ): Promise { + const basePrompt = this.buildReplanningPrompt(goal, constraints, currentPlan, reason, context, userPromptContext); + const maxContractAttempts = 2; + + let lastContractError: PlannerOutputContractViolationError | null = null; + + for (let attempt = 1; attempt <= maxContractAttempts; attempt++) { + const prompt = + attempt === 1 + ? basePrompt + : `${basePrompt}\n\n` + + `CORRECTION REQUIRED:\n` + + `- Your previous JSON violated the planner-output contract.\n` + + `- suggestedTools MUST be either [] or a subset of the Allowed tools list. Do not invent tool tokens.\n` + + `- If a step requires user clarification, set type=USER_INPUT_REQUIRED and suggestedTools=[\"ASK_USER\"] (never \"CHAT\").\n`; + + try { + const response = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(response); + + const validation = await this.validatePlan(parsed); + if (!validation.valid) { + this.logger.warn(`Replan validation failed: ${validation.errors.join(', ')}`); + return this.generateFallbackPlan(goal); + } + + return this.normalizePlanOutput(parsed as PlanOutput, { + goal, + mode: 'replan', + allowedTools: constraints?.allowedTools, + }); + } catch (error: any) { + if (error instanceof PlannerFirstStepUserInputError) { + throw error; + } + + if (error instanceof PlannerOutputContractViolationError) { + lastContractError = error; + this.logger.warn( + `Planner replan output contract violation on attempt ${attempt}/${maxContractAttempts}: ${error.code}`, + ); + continue; + } + + this.logger.error(`LLM replan call failed: ${error.message}`); + return this.generateFallbackPlan(goal); + } + } + + if (lastContractError) { + throw lastContractError; + } + + return this.generateFallbackPlan(goal); + } + + private normalizePlanOutput( + planOutput: PlanOutput, + context: { goal: string; mode: 'initial' | 'replan'; allowedTools?: string[] | null }, + ): PlanOutput { + const maxItems = 20; + const items = Array.isArray(planOutput.items) ? [...planOutput.items] : []; + if (items.length === 0) return planOutput; + + const goalFeasibility = inferGoalFeasibility(context.goal); + const forceDesktopSurface = goalFeasibility?.requiredSurface === ExecutionSurface.DESKTOP; + + const normalizedItems = items.slice(0, maxItems).map((item) => { + const suggestedTools = normalizeSuggestedToolsOrThrow({ + suggestedTools: item.suggestedTools, + allowedTools: context.allowedTools, + }); + + if (item.type === StepType.USER_INPUT_REQUIRED) { + return { + ...item, + suggestedTools: ['ASK_USER'], + requiresDesktop: false, + }; + } + + const requiresDesktop = + Boolean(item.requiresDesktop) || hasDesktopExecutionTool(suggestedTools); + + if (forceDesktopSurface && item.type === StepType.EXECUTE) { + const ensuredTools = hasDesktopExecutionTool(suggestedTools) + ? suggestedTools + : [...suggestedTools, 'browser']; + + return { + ...item, + suggestedTools: ensuredTools, + requiresDesktop: true, + }; + } + + return { ...item, suggestedTools, requiresDesktop }; + }); + + const first = normalizedItems[0]; + const reason = detectPlannerFirstStepUserInputReason(first); + if (reason) { + throw new PlannerFirstStepUserInputError({ mode: context.mode, firstStep: first, reason }); + } + + // Safety rail: an EXECUTE step may never be "interaction-shaped" anywhere in the plan. + for (let i = 1; i < normalizedItems.length; i++) { + const item = normalizedItems[i]; + if (item.type === StepType.EXECUTE && hasUserInteractionTool(item.suggestedTools)) { + const interactionTool = normalizeSuggestedToolsOrThrow({ + suggestedTools: item.suggestedTools, + allowedTools: context.allowedTools, + }).find((t) => hasUserInteractionTool([t])); + + throw new ExecuteStepHasInteractionToolError({ + stepIndex: i, + stepDescription: item.description, + interactionTool: interactionTool ?? 'ASK_USER', + }); + } + } + + return { ...planOutput, items: normalizedItems }; + } + + private buildPlanningPrompt(goal: string, constraints: any, goalSpecContext?: string): string { + const allowedTools = constraints?.allowedTools?.join(', ') || 'any'; + + return `You are a task planning assistant. Your job is to break down a user's goal into a clear, actionable checklist. + +USER GOAL: ${goal} +${goalSpecContext || ''} + +CONSTRAINTS: +- Allowed tools: ${allowedTools} +- Workspace mode: ${constraints?.workspaceMode || 'SHARED'} +${constraints?.riskPolicy?.requireApproval?.length ? `- Actions requiring approval: ${constraints.riskPolicy.requireApproval.join(', ')}` : ''} + +OUTPUT FORMAT (JSON): +{ + "summary": "Brief description of the plan", + "items": [ + { + "description": "Clear, actionable step description", + "type": "EXECUTE | USER_INPUT_REQUIRED", + "expectedOutcome": "What should happen when this step succeeds", + "suggestedTools": ["tool1", "tool2"], + "requiresDesktop": true/false + } + ], + "confidence": 0.0-1.0 +} + +RULES: +1. Break the goal into 2-10 discrete, verifiable steps +2. Each step should be independently executable OR explicitly marked as USER_INPUT_REQUIRED +3. Steps should be in logical execution order +4. Include expected outcomes for verification +5. Mark steps requiring desktop interaction +6. suggestedTools MUST be [] or a subset of the Allowed tools list. Never invent tool tokens. +7. Only use USER_INPUT_REQUIRED when truly blocked on external input (e.g., credentials, user-specific facts that cannot be assumed by policy, explicit approvals, or human takeover). Never ask the user to choose strategy (e.g., which website/tool to use) — pick a reliable default and proceed. +8. The FIRST step MUST NOT be USER_INPUT_REQUIRED (or suggestedTools=["ASK_USER"]). If clarification is needed to start, assume safe defaults and proceed with an EXECUTE step. +9. Consider potential failure points + +Generate the plan:`; + } + + /** + * v2.0.0: Context-Preserving Replanning Prompt + * + * Key improvements: + * 1. COMPLETED WORK section: Shows actualOutcome for completed steps + * 2. Explicit DO NOT REDO instruction: Prevents re-executing completed work + * 3. BUILD UPON completed results: Uses previous outputs as inputs + * 4. Manus-style: Keeps completed work "in the model's recent attention span" + * 5. LangGraph-style: "Don't re-run successful nodes" + */ + private buildReplanningPrompt( + goal: string, + constraints: any, + currentPlan: any, + reason: string, + context?: { failedItemId?: string; failureDetails?: string }, + userPromptContext?: string, + ): string { + const checklistItems = currentPlan?.checklistItems || []; + + // v2.0.0: Separate completed items with their actual outcomes + const completedItems = checklistItems.filter((item: any) => item.status === 'COMPLETED'); + const failedItems = checklistItems.filter((item: any) => item.status === 'FAILED'); + const pendingItems = checklistItems.filter((item: any) => + item.status === 'PENDING' || item.status === 'IN_PROGRESS' + ); + + // Build COMPLETED WORK section with actualOutcome (the key fix!) + let completedWorkSection = ''; + if (completedItems.length > 0) { + const completedDetails = completedItems.map((item: any) => { + let entry = `✓ Step ${item.order}: ${item.description}`; + if (item.actualOutcome) { + // Parse and format actualOutcome for readability + let outcome = item.actualOutcome; + try { + const parsed = JSON.parse(outcome); + if (typeof parsed === 'object') { + outcome = JSON.stringify(parsed, null, 2); + } + } catch { + // Keep as string if not JSON + } + entry += `\n RESULT: ${outcome}`; + } + return entry; + }).join('\n\n'); + + completedWorkSection = ` +=== COMPLETED WORK (DO NOT REDO) === +The following steps have ALREADY been completed successfully. Their results are shown below. +DO NOT regenerate or re-execute these steps. Use their results as inputs for remaining work. + +${completedDetails} +=== END COMPLETED WORK === +`; + } + + // Build FAILED items section + let failedSection = ''; + if (failedItems.length > 0) { + const failedDetails = failedItems.map((item: any) => { + let entry = `✗ Step ${item.order}: ${item.description}`; + if (item.actualOutcome) { + entry += `\n ERROR: ${item.actualOutcome}`; + } + return entry; + }).join('\n\n'); + + failedSection = ` +=== FAILED STEPS (NEED ALTERNATIVE APPROACH) === +${failedDetails} +=== END FAILED STEPS === +`; + } + + // Build PENDING items section + let pendingSection = ''; + if (pendingItems.length > 0) { + const pendingDetails = pendingItems.map((item: any) => + `○ Step ${item.order}: ${item.description}` + ).join('\n'); + + pendingSection = ` +=== REMAINING STEPS (TO BE REVISED) === +${pendingDetails} +=== END REMAINING STEPS === +`; + } + + const userInteractionSection = userPromptContext + ? `\n=== USER INTERACTION CONTEXT ===\n${userPromptContext}\n=== END USER INTERACTION CONTEXT ===\n` + : ''; + + return `You are a task planning assistant. A previous plan encountered an issue and needs revision. + +USER GOAL: ${goal} +${completedWorkSection}${failedSection}${pendingSection}${userInteractionSection} +FAILURE REASON: ${reason} +${context?.failureDetails ? `FAILURE DETAILS: ${context.failureDetails}` : ''} + +CONSTRAINTS: +- Allowed tools: ${constraints?.allowedTools?.join(', ') || 'any'} +- Workspace mode: ${constraints?.workspaceMode || 'SHARED'} + +OUTPUT FORMAT (JSON): +{ + "summary": "Brief description of the revised plan", + "preserveCompletedSteps": true, + "items": [ + { + "description": "Clear, actionable step description", + "type": "EXECUTE | USER_INPUT_REQUIRED", + "expectedOutcome": "What should happen when this step succeeds", + "suggestedTools": ["tool1", "tool2"], + "requiresDesktop": true/false, + "isPreservedFromPrevious": false + } + ], + "confidence": 0.0-1.0 +} + +CRITICAL RULES: +1. ⚠️ DO NOT recreate steps that are already COMPLETED - their results are available above +2. If a step depends on completed work, reference the results shown in COMPLETED WORK section +3. Only generate NEW steps for failed/pending work +4. For failed steps, try a different approach (different selector, different website, etc.) +5. Set "isPreservedFromPrevious": true for any steps you're keeping from the original plan +6. The goal is to CONTINUE from where we left off, not start over +7. Be more specific than the previous plan for steps that failed +8. suggestedTools MUST be [] or a subset of the Allowed tools list. Never invent tool tokens. +9. Only use USER_INPUT_REQUIRED when truly blocked on external input (credentials, user-specific facts that cannot be assumed by policy, explicit approvals, or human takeover). Never ask the user to choose strategy (e.g., which website/tool to use) — pick a reliable default and proceed. +10. The FIRST NEW step you generate MUST NOT be USER_INPUT_REQUIRED (or suggestedTools=["ASK_USER"]). If clarification is needed, start with an EXECUTE "preflight" step that extracts assumptions/defaults and continues with safe defaults. Only ask the user later if policy forbids assumptions. + +EXAMPLE OF GOOD REPLANNING: +- If "Search for flights" COMPLETED with results showing "Found $299 on United" +- Then next step should be "Book the $299 United flight" NOT "Search for flights again" + +Generate the revised plan that BUILDS UPON completed work:`; + } + + // PR5: When replanning, include open prompts + resolved answers so the LLM doesn't re-ask + // and can safely build on user-provided clarifications. + private formatUserPromptContext( + prompts: Array<{ + id: string; + checklistItemId: string | null; + kind: UserPromptKind; + status: UserPromptStatus; + payload: any; + answers: any; + createdAt: Date; + resolvedAt: Date | null; + }>, + ): string { + if (!prompts.length) return ''; + + const open = prompts.filter((p) => p.status === UserPromptStatus.OPEN); + const resolved = prompts.filter((p) => p.status === UserPromptStatus.RESOLVED); + + const toShortJson = (value: any, maxChars: number) => { + try { + const str = JSON.stringify(value); + if (str.length <= maxChars) return str; + return `${str.slice(0, maxChars)}…`; + } catch { + return String(value); + } + }; + + const lines: string[] = []; + + if (open.length > 0) { + lines.push('OPEN PROMPTS (awaiting user input):'); + for (const p of open.slice(0, 10)) { + const reason = p.payload?.reason ?? p.payload?.message ?? null; + const stepDesc = p.payload?.step?.description ?? p.payload?.title ?? null; + lines.push( + `- promptId=${p.id} kind=${p.kind} checklistItemId=${p.checklistItemId ?? 'n/a'}` + + (stepDesc ? ` step=${JSON.stringify(stepDesc)}` : '') + + (reason ? ` reason=${JSON.stringify(reason)}` : ''), + ); + } + } + + if (resolved.length > 0) { + lines.push('RESOLVED PROMPTS (user-provided answers):'); + for (const p of resolved.slice(-10)) { + const ts = p.resolvedAt ? p.resolvedAt.toISOString() : p.createdAt.toISOString(); + lines.push( + `- promptId=${p.id} kind=${p.kind} checklistItemId=${p.checklistItemId ?? 'n/a'} at=${ts} answers=${toShortJson(p.answers, 1200)}`, + ); + } + } + + return lines.join('\n'); + } + + private async callLLM(prompt: string): Promise { + // If no API key, use mock response for development + if (!this.llmApiKey) { + this.logger.warn('No LLM API key configured, using mock response'); + return this.getMockLLMResponse(prompt); + } + + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: this.llmModel, + max_tokens: 2000, + messages: [ + { + role: 'user', + content: prompt, + }, + ], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json(); + return data.content[0].text; + } + + private parseLLMResponse(response: string): unknown { + // Extract JSON from response (handle markdown code blocks) + const jsonMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/); + const jsonStr = jsonMatch ? jsonMatch[1] : response; + + try { + return JSON.parse(jsonStr.trim()); + } catch { + // Try to find JSON object in response + const objectMatch = response.match(/\{[\s\S]*\}/); + if (objectMatch) { + return JSON.parse(objectMatch[0]); + } + throw new Error('Failed to parse LLM response as JSON'); + } + } + + private generateFallbackPlan(goal: string): PlanOutput { + // Generate a simple fallback plan based on the goal + return { + summary: `Execute goal: ${goal.substring(0, 100)}`, + items: [ + { + description: `Analyze the goal: "${goal.substring(0, 200)}"`, + type: StepType.EXECUTE, + expectedOutcome: 'Understanding of required actions', + suggestedTools: [], + requiresDesktop: false, + }, + { + description: 'Execute the primary action required by the goal', + type: StepType.EXECUTE, + expectedOutcome: 'Goal objective achieved', + suggestedTools: ['browser'], + requiresDesktop: true, + }, + { + description: 'Verify the goal was accomplished successfully', + type: StepType.EXECUTE, + expectedOutcome: 'Confirmation of success', + suggestedTools: ['screenshot'], + requiresDesktop: true, + }, + ], + confidence: 0.5, + }; + } + + private getMockLLMResponse(prompt: string): string { + // Extract goal from prompt for mock response + const goalMatch = prompt.match(/USER GOAL: (.+?)(?:\n|CONSTRAINTS)/s); + const goal = goalMatch ? goalMatch[1].trim() : 'Unknown goal'; + + // Generate mock plan based on common patterns + const isLoginTask = goal.toLowerCase().includes('login') || goal.toLowerCase().includes('log in'); + const isDownloadTask = goal.toLowerCase().includes('download'); + const isEmailTask = goal.toLowerCase().includes('email'); + + let items: any[] = []; + + if (isLoginTask) { + items = [ + { description: 'Open the target website in browser', type: StepType.EXECUTE, expectedOutcome: 'Website loaded', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Locate and click the login button/link', type: StepType.EXECUTE, expectedOutcome: 'Login form visible', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Enter credentials and submit', type: StepType.EXECUTE, expectedOutcome: 'Successfully logged in', suggestedTools: ['browser'], requiresDesktop: true }, + ]; + } else if (isDownloadTask) { + items = [ + { description: 'Navigate to the download location', type: StepType.EXECUTE, expectedOutcome: 'Download page visible', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Locate the download link/button', type: StepType.EXECUTE, expectedOutcome: 'Download option found', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Click download and wait for completion', type: StepType.EXECUTE, expectedOutcome: 'File downloaded', suggestedTools: ['browser', 'file_download'], requiresDesktop: true }, + ]; + } else if (isEmailTask) { + items = [ + { description: 'Open email client or webmail', type: StepType.EXECUTE, expectedOutcome: 'Email interface ready', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Compose new email with required content', type: StepType.EXECUTE, expectedOutcome: 'Email composed', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Review and send the email', type: StepType.EXECUTE, expectedOutcome: 'Email sent successfully', suggestedTools: ['browser', 'email'], requiresDesktop: true }, + ]; + } else { + items = [ + { description: 'Analyze and understand the goal requirements', type: StepType.EXECUTE, expectedOutcome: 'Clear understanding of tasks', suggestedTools: [], requiresDesktop: false }, + { description: 'Execute the main action for the goal', type: StepType.EXECUTE, expectedOutcome: 'Primary objective completed', suggestedTools: ['browser'], requiresDesktop: true }, + { description: 'Verify successful completion', type: StepType.EXECUTE, expectedOutcome: 'Goal achieved', suggestedTools: ['screenshot'], requiresDesktop: true }, + ]; + } + + return JSON.stringify({ + summary: `Plan to: ${goal.substring(0, 100)}`, + items, + confidence: 0.75, + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/prisma.service.ts b/packages/bytebot-workflow-orchestrator/src/services/prisma.service.ts new file mode 100644 index 000000000..8f62716c7 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/prisma.service.ts @@ -0,0 +1,116 @@ +/** + * Prisma Service + * Database connection and query handling + * + * v1.1.0: Added connection pool limits to prevent database exhaustion + * - connection_limit: Max connections per instance (default: 2) + * - pool_timeout: Max seconds to wait for connection (default: 30) + */ + +import { + Injectable, + OnModuleInit, + OnModuleDestroy, + Logger, +} from '@nestjs/common'; +import { PrismaClient } from '@prisma/client'; + +/** + * Build datasource URL with connection pool parameters + * Prevents database connection exhaustion in multi-pod deployments + */ +function buildDatasourceUrl(): string { + const baseUrl = process.env.DATABASE_URL || ''; + const connectionLimit = parseInt(process.env.DB_CONNECTION_LIMIT || '2', 10); + const poolTimeout = parseInt(process.env.DB_POOL_TIMEOUT || '30', 10); + + // Parse and append pool parameters + const url = new URL(baseUrl); + url.searchParams.set('connection_limit', connectionLimit.toString()); + url.searchParams.set('pool_timeout', poolTimeout.toString()); + + return url.toString(); +} + +@Injectable() +export class PrismaService + extends PrismaClient + implements OnModuleInit, OnModuleDestroy +{ + private readonly logger = new Logger(PrismaService.name); + + constructor() { + const datasourceUrl = buildDatasourceUrl(); + super({ + datasourceUrl, + log: [ + { emit: 'event', level: 'query' }, + { emit: 'event', level: 'error' }, + { emit: 'event', level: 'warn' }, + ], + }); + + // Log connection pool configuration + const connectionLimit = process.env.DB_CONNECTION_LIMIT || '2'; + const poolTimeout = process.env.DB_POOL_TIMEOUT || '30'; + this.logger.log( + `PrismaService initialized with connection_limit=${connectionLimit}, pool_timeout=${poolTimeout}`, + ); + } + + async onModuleInit() { + await this.$connect(); + this.logger.log('Connected to database'); + + // Log slow queries in development + if (process.env.NODE_ENV !== 'production') { + (this as any).$on('query', (e: any) => { + if (e.duration > 100) { + this.logger.warn(`Slow query (${e.duration}ms): ${e.query}`); + } + }); + } + } + + async onModuleDestroy() { + await this.$disconnect(); + this.logger.log('Disconnected from database'); + } + + /** + * Execute a transaction with retry on serialization failures + */ + async executeWithRetry( + fn: () => Promise, + maxRetries: number = 3, + ): Promise { + let lastError: Error | null = null; + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + return await fn(); + } catch (error: any) { + lastError = error; + + // Check for serialization/deadlock errors (PostgreSQL) + const isRetryable = + error.code === '40001' || // Serialization failure + error.code === '40P01' || // Deadlock + error.code === 'P2034'; // Transaction conflict + + if (!isRetryable || attempt === maxRetries) { + throw error; + } + + // Exponential backoff + const delay = Math.min(100 * Math.pow(2, attempt - 1), 1000); + this.logger.warn( + `Transaction retry ${attempt}/${maxRetries} after ${delay}ms: ${error.code}`, + ); + await new Promise((resolve) => setTimeout(resolve, delay)); + } + } + + throw lastError; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/prompt-resume-reconciler.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/prompt-resume-reconciler.service.spec.ts new file mode 100644 index 000000000..a1629f41b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/prompt-resume-reconciler.service.spec.ts @@ -0,0 +1,80 @@ +import { PromptResumeReconcilerService } from './prompt-resume-reconciler.service'; + +describe(PromptResumeReconcilerService.name, () => { + afterEach(() => { + jest.restoreAllMocks(); + }); + + it('enqueues canonical resume outbox when missing', async () => { + const configService = { get: jest.fn((_k: string, fallback: string) => fallback) } as any; + const prisma = { $queryRaw: jest.fn() } as any; + const outboxService = { enqueueOnce: jest.fn() } as any; + const leaderElection = { isLeader: true } as any; + const resumeOutboxEnqueuedTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + + const service = new PromptResumeReconcilerService( + configService, + prisma, + outboxService, + leaderElection, + resumeOutboxEnqueuedTotal, + ); + + prisma.$queryRaw.mockResolvedValueOnce([ + { promptId: 'p-1', goalRunId: 'gr-1', tenantId: 't-1', outboxId: null, outboxProcessedAt: null }, + ]); + + await service.tick(); + + expect(outboxService.enqueueOnce).toHaveBeenCalledWith({ + dedupeKey: 'user_prompt.resume:p-1', + aggregateId: 'gr-1', + eventType: 'user_prompt.resume', + payload: expect.objectContaining({ + promptId: 'p-1', + goalRunId: 'gr-1', + tenantId: 't-1', + updateId: 'user_prompt.resume:p-1', + }), + }); + expect(resumeOutboxEnqueuedTotal.labels).toHaveBeenCalledWith('reconciler'); + }); + + it('re-arms the canonical resume outbox when it was processed and ack is still missing', async () => { + const configService = { + get: jest.fn((key: string, fallback: string) => { + if (key === 'PROMPT_RESUME_RECONCILER_BUCKET_MINUTES') return '5'; + return fallback; + }), + } as any; + const prisma = { $queryRaw: jest.fn(), outbox: { updateMany: jest.fn() } } as any; + const outboxService = { enqueueOnce: jest.fn() } as any; + const leaderElection = { isLeader: true } as any; + const resumeOutboxEnqueuedTotal = { labels: jest.fn(() => ({ inc: jest.fn() })) } as any; + + const service = new PromptResumeReconcilerService( + configService, + prisma, + outboxService, + leaderElection, + resumeOutboxEnqueuedTotal, + ); + + prisma.$queryRaw.mockResolvedValueOnce([ + { promptId: 'p-2', goalRunId: 'gr-2', tenantId: 't-2', outboxId: 'o-xyz', outboxProcessedAt: new Date() }, + ]); + + await service.tick(); + + expect(outboxService.enqueueOnce).not.toHaveBeenCalled(); + expect(prisma.outbox.updateMany).toHaveBeenCalledWith({ + where: { id: 'o-xyz', processedAt: { not: null } }, + data: expect.objectContaining({ + processedAt: null, + retryCount: 0, + nextAttemptAt: expect.any(Date), + }), + }); + expect(resumeOutboxEnqueuedTotal.labels).toHaveBeenCalledWith('reconciler_rearm'); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/prompt-resume-reconciler.service.ts b/packages/bytebot-workflow-orchestrator/src/services/prompt-resume-reconciler.service.ts new file mode 100644 index 000000000..ff9353a4f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/prompt-resume-reconciler.service.ts @@ -0,0 +1,180 @@ +/** + * Prompt Resume Reconciler Service + * + * Purpose: + * - Ensures prompt resolutions always lead to a durable resume attempt. + * - Repairs rare edge cases where a prompt is RESOLVED in DB but the resume outbox row is missing. + * + * Design: + * - DB is the record of truth (prompt status + resolution record). + * - Resume is executed via an outbox event (user_prompt.resume) processed by OutboxPublisherService. + * - This reconciler is idempotent and safe under retries (dedupeKey uniqueness). + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import type { Counter } from 'prom-client'; +import { PrismaService } from './prisma.service'; +import { OutboxService } from './outbox.service'; +import { LeaderElectionService } from './leader-election.service'; + +@Injectable() +export class PromptResumeReconcilerService implements OnModuleInit { + private readonly logger = new Logger(PromptResumeReconcilerService.name); + + private readonly enabled: boolean; + private readonly lookbackMinutes: number; + private readonly batchSize: number; + private readonly graceSeconds: number; + private readonly reconcileBucketMinutes: number; + + constructor( + private readonly configService: ConfigService, + private readonly prisma: PrismaService, + private readonly outboxService: OutboxService, + private readonly leaderElection: LeaderElectionService, + @InjectMetric('resume_outbox_enqueued_total') + private readonly resumeOutboxEnqueuedTotal: Counter, + ) { + this.enabled = this.configService.get('PROMPT_RESUME_RECONCILER_ENABLED', 'true') === 'true'; + this.lookbackMinutes = parseInt( + this.configService.get('PROMPT_RESUME_RECONCILER_LOOKBACK_MINUTES', '1440'), // 24h + 10, + ); + this.batchSize = parseInt( + this.configService.get('PROMPT_RESUME_RECONCILER_BATCH_SIZE', '50'), + 10, + ); + this.graceSeconds = parseInt( + this.configService.get('PROMPT_RESUME_RECONCILER_GRACE_SECONDS', '30'), + 10, + ); + this.reconcileBucketMinutes = parseInt( + this.configService.get('PROMPT_RESUME_RECONCILER_BUCKET_MINUTES', '5'), + 10, + ); + } + + onModuleInit(): void { + if (!this.enabled) { + this.logger.warn('Prompt resume reconciler is disabled'); + return; + } + this.logger.log( + `Prompt resume reconciler enabled (lookbackMinutes=${this.lookbackMinutes}, batchSize=${this.batchSize})`, + ); + } + + @Cron(CronExpression.EVERY_MINUTE) + async tick(): Promise { + if (!this.enabled) return; + if (!this.leaderElection.isLeader) return; + + try { + await this.reconcile(); + } catch (error: any) { + this.logger.warn(`Prompt resume reconcile tick failed: ${error.message}`); + } + } + + private async reconcile(): Promise { + const since = new Date(Date.now() - this.lookbackMinutes * 60 * 1000); + const graceCutoff = new Date(Date.now() - this.graceSeconds * 1000); + const rearmCutoff = new Date(Date.now() - this.reconcileBucketMinutes * 60 * 1000); + + const candidates = await this.prisma.$queryRaw< + Array<{ + promptId: string; + goalRunId: string; + tenantId: string | null; + outboxId: string | null; + outboxProcessedAt: Date | null; + }> + >` + SELECT + p.id AS "promptId", + p.goal_run_id AS "goalRunId", + p.tenant_id AS "tenantId", + o.id AS "outboxId", + o.processed_at AS "outboxProcessedAt" + FROM "workflow_orchestrator"."user_prompts" p + JOIN "workflow_orchestrator"."goal_runs" gr + ON gr.id = p.goal_run_id + LEFT JOIN "workflow_orchestrator"."user_prompt_resolutions" r + ON r.prompt_id = p.id + LEFT JOIN "workflow_orchestrator"."outbox" o + ON o.dedupe_key = ('user_prompt.resume:' || p.id) + WHERE p.status = 'RESOLVED' + AND p.resolved_at IS NOT NULL + AND p.resolved_at >= ${since} + AND p.resolved_at <= ${graceCutoff} + AND gr.status IN ('PENDING', 'RUNNING') + AND gr.execution_engine = 'TEMPORAL_WORKFLOW' + AND (r.resume_acknowledged_at IS NULL) + -- Prefer the canonical outbox row; only repair when missing, or when it was processed + -- without a durable resume ack (i.e., needs a re-arm) and the last processing is older + -- than the configured reconcile bucket. + AND ( + o.id IS NULL + OR ( + o.processed_at IS NOT NULL + AND o.processed_at <= ${rearmCutoff} + ) + ) + ORDER BY p.resolved_at DESC + LIMIT ${this.batchSize}; + `; + + if (candidates.length === 0) return; + + this.logger.warn(`Found ${candidates.length} resolved prompts needing resume reconciliation; enqueueing repairs`); + + for (const row of candidates) { + const canonicalDedupeKey = `user_prompt.resume:${row.promptId}`; + + if (!row.outboxId) { + await this.outboxService.enqueueOnce({ + dedupeKey: canonicalDedupeKey, + aggregateId: row.goalRunId, + eventType: 'user_prompt.resume', + payload: { + promptId: row.promptId, + goalRunId: row.goalRunId, + tenantId: row.tenantId, + updateId: canonicalDedupeKey, + source: 'reconciler', + }, + }); + + try { + this.resumeOutboxEnqueuedTotal.labels('reconciler').inc(); + } catch { + // Ignore metric errors + } + continue; + } + + // Re-arm the canonical outbox row instead of creating unbounded new outbox rows. + // This keeps the outbox table growth predictable under extended Temporal outages. + await this.prisma.outbox.updateMany({ + where: { + id: row.outboxId, + processedAt: { not: null }, + }, + data: { + processedAt: null, + retryCount: 0, + nextAttemptAt: new Date(), + }, + }); + + try { + this.resumeOutboxEnqueuedTotal.labels('reconciler_rearm').inc(); + } catch { + // Ignore metric errors + } + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/retry-context.service.ts b/packages/bytebot-workflow-orchestrator/src/services/retry-context.service.ts new file mode 100644 index 000000000..8bb380596 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/retry-context.service.ts @@ -0,0 +1,456 @@ +/** + * Retry Context Service + * v1.0.0: Nice-to-Have Enhancement for Intelligent Retry with Context + * + * Implements retry logic that preserves and enhances context across attempts: + * - Captures error details from failed attempts + * - Builds enriched context for AI agent on retry + * - Classifies errors as retriable vs non-retriable + * - Provides suggested adjustments based on failure patterns + * + * Uses Cockatiel for retry policies with custom backoff. + * + * @see /docs/CONTEXT_PROPAGATION_FIX_JAN_2026.md + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { + retry, + handleWhen, + DelegateBackoff, + IRetryContext, +} from 'cockatiel'; + +// Retry attempt information +export interface RetryAttemptInfo { + attemptNumber: number; + previousError: Error; + errorType: string; + errorMessage: string; + errorClassification: 'retriable' | 'non-retriable' | 'unknown'; + timestamp: Date; + backoffMs: number; +} + +// Task context for retry +export interface TaskRetryContext { + taskId: string; + goalRunId: string; + checklistItemId: string; + stepDescription: string; + goalContext?: string; + previousStepResults?: string; +} + +// Enhanced context after retry processing +export interface EnhancedRetryContext extends TaskRetryContext { + retryHistory: RetryAttemptInfo[]; + isRetry: boolean; + totalAttempts: number; + suggestedAdjustments: string[]; + retryContextSummary: string; +} + +// Retry result +export interface RetryResult { + success: boolean; + result?: T; + error?: Error; + attempts: number; + retryHistory: RetryAttemptInfo[]; +} + +// Error classification result +export interface ErrorClassification { + classification: 'retriable' | 'non-retriable' | 'unknown'; + reason: string; + suggestedAction: 'retry' | 'escalate' | 'fail'; + retryDelayMs?: number; +} + +// Retry configuration +interface RetryConfig { + maxAttempts: number; + initialDelayMs: number; + maxDelayMs: number; + jitterFactor: number; +} + +@Injectable() +export class RetryContextService { + private readonly logger = new Logger(RetryContextService.name); + + // Default retry configuration + private readonly retryConfig: RetryConfig; + + // Retry history per task (keyed by taskId) + private readonly retryHistories: Map = new Map(); + + // Error patterns for classification + private readonly retriablePatterns: RegExp[] = [ + /ECONNRESET/i, + /ETIMEDOUT/i, + /ENOTFOUND/i, + /ECONNREFUSED/i, + /rate limit/i, + /503/, + /502/, + /504/, + /429/, + /timeout/i, + /temporarily unavailable/i, + /service unavailable/i, + /connection refused/i, + /network error/i, + ]; + + private readonly nonRetriablePatterns: RegExp[] = [ + /validation error/i, + /invalid input/i, + /permission denied/i, + /unauthorized/i, + /forbidden/i, + /not found/i, + /400/, + /401/, + /403/, + /404/, + /syntax error/i, + /invalid request/i, + ]; + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.retryConfig = { + maxAttempts: parseInt( + this.configService.get('RETRY_MAX_ATTEMPTS', '3'), + 10, + ), + initialDelayMs: parseInt( + this.configService.get('RETRY_INITIAL_DELAY_MS', '1000'), + 10, + ), + maxDelayMs: parseInt( + this.configService.get('RETRY_MAX_DELAY_MS', '30000'), + 10, + ), + jitterFactor: parseFloat( + this.configService.get('RETRY_JITTER_FACTOR', '0.3'), + ), + }; + + this.logger.log( + `Retry config: max=${this.retryConfig.maxAttempts}, ` + + `initial=${this.retryConfig.initialDelayMs}ms, ` + + `max=${this.retryConfig.maxDelayMs}ms`, + ); + } + + /** + * Execute a function with intelligent retry and context enhancement + * + * This wraps the provided function in a retry policy that: + * 1. Classifies errors to determine if retry is appropriate + * 2. Records retry history for context enhancement + * 3. Uses exponential backoff with jitter + * 4. Generates suggested adjustments for the AI agent + */ + async executeWithRetry( + context: TaskRetryContext, + fn: (enhancedContext: EnhancedRetryContext) => Promise, + options?: Partial, + ): Promise> { + const config = { ...this.retryConfig, ...options }; + const retryHistory: RetryAttemptInfo[] = []; + + // Create error handler that only retries retriable errors + const errorHandler = handleWhen((error: unknown) => { + const classification = this.classifyError(error as Error); + return classification.classification === 'retriable'; + }); + + // Create retry policy with context-aware backoff + const retryPolicy = retry(errorHandler, { + maxAttempts: config.maxAttempts, + backoff: new DelegateBackoff((retryContext) => { + // Cockatiel v3 uses `error` instead of `result` + const error = (retryContext as any).error as Error || new Error('Unknown error'); + const classification = this.classifyError(error); + + // Calculate backoff with jitter + const backoffMs = this.calculateBackoff( + retryContext.attempt, + config, + classification, + ); + + // Record this attempt + const attemptInfo: RetryAttemptInfo = { + attemptNumber: retryContext.attempt, + previousError: error, + errorType: error.constructor.name, + errorMessage: error.message, + errorClassification: classification.classification, + timestamp: new Date(), + backoffMs, + }; + retryHistory.push(attemptInfo); + + this.logger.warn( + `Retry ${retryContext.attempt}/${config.maxAttempts} for task ${context.taskId}: ` + + `${error.message} (backing off ${backoffMs}ms)`, + ); + + // Emit event for monitoring + this.eventEmitter.emit('retry.attempt', { + taskId: context.taskId, + goalRunId: context.goalRunId, + attempt: retryContext.attempt, + error: error.message, + classification: classification.classification, + backoffMs, + }); + + return backoffMs; + }), + }); + + // Subscribe to retry events for logging + const retryListener = retryPolicy.onRetry((data) => { + this.logger.debug(`Retry triggered for task ${context.taskId}`); + }); + + const giveUpListener = retryPolicy.onGiveUp((data) => { + // Cockatiel v3 uses `error` instead of `reason` + const errorMessage = (data as any).error?.message || (data as any).reason?.message || 'Unknown error'; + this.logger.error( + `Giving up on task ${context.taskId} after ${config.maxAttempts} attempts: ${errorMessage}`, + ); + }); + + try { + // Execute with retry policy + const result = await retryPolicy.execute(async ({ attempt }) => { + // Build enhanced context with retry information + const enhancedContext = this.enhanceContextForRetry( + context, + retryHistory, + attempt, + ); + + return await fn(enhancedContext); + }); + + // Store successful retry history + this.retryHistories.set(context.taskId, retryHistory); + + return { + success: true, + result, + attempts: retryHistory.length + 1, + retryHistory, + }; + } catch (error) { + // Store failed retry history + this.retryHistories.set(context.taskId, retryHistory); + + this.eventEmitter.emit('retry.exhausted', { + taskId: context.taskId, + goalRunId: context.goalRunId, + attempts: retryHistory.length, + finalError: (error as Error).message, + }); + + return { + success: false, + error: error as Error, + attempts: retryHistory.length + 1, + retryHistory, + }; + } finally { + retryListener.dispose(); + giveUpListener.dispose(); + } + } + + /** + * Classify an error as retriable or not + */ + classifyError(error: Error): ErrorClassification { + const message = error.message || ''; + const name = error.name || ''; + const combined = `${name}: ${message}`; + + // Check non-retriable patterns first (more specific) + for (const pattern of this.nonRetriablePatterns) { + if (pattern.test(combined)) { + return { + classification: 'non-retriable', + reason: `Error matches non-retriable pattern: ${pattern}`, + suggestedAction: 'fail', + }; + } + } + + // Check retriable patterns + for (const pattern of this.retriablePatterns) { + if (pattern.test(combined)) { + // Check for rate limit with Retry-After + const retryAfterMatch = message.match(/retry.after[:\s]*(\d+)/i); + const retryDelayMs = retryAfterMatch + ? parseInt(retryAfterMatch[1], 10) * 1000 + : undefined; + + return { + classification: 'retriable', + reason: `Error matches retriable pattern: ${pattern}`, + suggestedAction: 'retry', + retryDelayMs, + }; + } + } + + // Default: unknown, retry with caution + return { + classification: 'unknown', + reason: 'Error does not match known patterns', + suggestedAction: 'retry', + }; + } + + /** + * Enhance context with retry information for AI agent + */ + enhanceContextForRetry( + context: TaskRetryContext, + retryHistory: RetryAttemptInfo[], + currentAttempt: number, + ): EnhancedRetryContext { + const suggestedAdjustments = this.generateSuggestedAdjustments(retryHistory); + const retryContextSummary = this.buildRetryContextSummary(retryHistory); + + return { + ...context, + retryHistory, + isRetry: retryHistory.length > 0, + totalAttempts: currentAttempt, + suggestedAdjustments, + retryContextSummary, + }; + } + + /** + * Get retry history for a task + */ + getRetryHistory(taskId: string): RetryAttemptInfo[] { + return this.retryHistories.get(taskId) || []; + } + + /** + * Clear retry history for a task (after completion) + */ + clearRetryHistory(taskId: string): void { + this.retryHistories.delete(taskId); + } + + /** + * Build a summary string for AI context enhancement + */ + buildRetryContextSummary(retryHistory: RetryAttemptInfo[]): string { + if (retryHistory.length === 0) { + return ''; + } + + const parts: string[] = [ + `--- RETRY CONTEXT (Attempt ${retryHistory.length + 1}) ---`, + `Previous attempts: ${retryHistory.length}`, + ]; + + // Summarize error types + const errorTypes = [...new Set(retryHistory.map((r) => r.errorType))]; + parts.push(`Error types encountered: ${errorTypes.join(', ')}`); + + // Last error details + const lastError = retryHistory[retryHistory.length - 1]; + parts.push(`Last error: ${lastError.errorMessage.slice(0, 200)}`); + + parts.push('--- END RETRY CONTEXT ---'); + + return parts.join('\n'); + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + /** + * Calculate backoff delay with jitter + */ + private calculateBackoff( + attempt: number, + config: RetryConfig, + classification: ErrorClassification, + ): number { + // Respect Retry-After header if present + if (classification.retryDelayMs) { + return classification.retryDelayMs; + } + + // Exponential backoff with full jitter + // Full jitter: random value between 0 and exponential delay + const exponentialDelay = Math.min( + config.initialDelayMs * Math.pow(2, attempt - 1), + config.maxDelayMs, + ); + + // Apply jitter (random factor of the delay) + const jitter = Math.random() * config.jitterFactor * exponentialDelay; + const jitteredDelay = Math.random() * exponentialDelay; // Full jitter + + return Math.round(Math.min(jitteredDelay + jitter, config.maxDelayMs)); + } + + /** + * Generate suggested adjustments based on error patterns + */ + private generateSuggestedAdjustments( + retryHistory: RetryAttemptInfo[], + ): string[] { + const suggestions: string[] = []; + const errorTypes = retryHistory.map((r) => r.errorType); + const errorMessages = retryHistory.map((r) => r.errorMessage); + + // Analyze error patterns and suggest adjustments + if (errorTypes.includes('TimeoutError') || errorMessages.some((m) => /timeout/i.test(m))) { + suggestions.push('Consider breaking the task into smaller sub-tasks'); + suggestions.push('Reduce complexity of the operation'); + } + + if (errorMessages.some((m) => /rate limit/i.test(m) || /429/.test(m))) { + suggestions.push('Reduce API call frequency'); + suggestions.push('Batch operations where possible'); + } + + if (errorMessages.some((m) => /network/i.test(m) || /connection/i.test(m))) { + suggestions.push('Previous attempts encountered network issues'); + suggestions.push('Consider simpler approaches that require fewer external calls'); + } + + if (errorMessages.some((m) => /element not found/i.test(m) || /selector/i.test(m))) { + suggestions.push('UI elements may have changed - try alternative selectors'); + suggestions.push('Wait for page to fully load before interacting'); + } + + // Add summary of previous errors for AI understanding + if (retryHistory.length > 0) { + const uniqueErrors = [...new Set(errorMessages)].slice(0, 3); + suggestions.push(`Previous error(s): ${uniqueErrors.join('; ')}`); + } + + return suggestions; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/scheduler.service.ts b/packages/bytebot-workflow-orchestrator/src/services/scheduler.service.ts new file mode 100644 index 000000000..9def06e83 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/scheduler.service.ts @@ -0,0 +1,360 @@ +/** + * Scheduler Service + * v1.0.5: Added poll-based dispatch mode - skips push-based NodeExecutor when enabled + * v1.0.4: Fixed snake_case/camelCase column mapping for raw SQL queries + * + * This service implements the scheduling loop that: + * 1. Finds nodes ready for execution using FOR UPDATE SKIP LOCKED + * 2. Dispatches nodes to the NodeExecutorService (push-based, legacy) + * 3. Handles node completion and dependency resolution + * + * v1.0.5: When TASK_DISPATCH_ENABLED=true, the scheduler skips push-based + * dispatch because TaskDispatchService handles execution via events. + * + * Uses Kubernetes Lease-based leader election to ensure only one replica + * runs the scheduler at a time. + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { WorkflowService, NodeStatus, WorkflowStatus } from './workflow.service'; +import { NodeExecutorService } from './node-executor.service'; +import { WorkspaceService } from './workspace.service'; +import { + LeaderElectionService, + LEADER_ELECTED_EVENT, + LEADER_LOST_EVENT, +} from './leader-election.service'; + +@Injectable() +export class SchedulerService implements OnModuleInit { + private readonly logger = new Logger(SchedulerService.name); + private readonly enabled: boolean; + private readonly batchSize: number; + // v1.0.5: When true, skip push-based dispatch (TaskDispatchService handles via events) + private readonly pollBasedDispatch: boolean; + private isProcessing: boolean = false; + + constructor( + private configService: ConfigService, + private prisma: PrismaService, + private workflowService: WorkflowService, + private nodeExecutor: NodeExecutorService, + private workspaceService: WorkspaceService, + private eventEmitter: EventEmitter2, + private leaderElection: LeaderElectionService, + ) { + this.enabled = this.configService.get('SCHEDULER_ENABLED', 'true') === 'true'; + this.batchSize = parseInt( + this.configService.get('SCHEDULER_BATCH_SIZE', '10'), + 10, + ); + // v1.0.5: Poll-based dispatch - skip push-based NodeExecutor calls + this.pollBasedDispatch = this.configService.get('TASK_DISPATCH_ENABLED', 'true') === 'true'; + } + + async onModuleInit() { + if (this.enabled) { + this.logger.log(`Scheduler enabled with batch size ${this.batchSize}`); + if (this.pollBasedDispatch) { + this.logger.log('Poll-based dispatch enabled - push-based NodeExecutor calls will be skipped'); + } + this.logger.log('Scheduler will start processing when this instance becomes leader'); + } else { + this.logger.warn('Scheduler is disabled'); + } + } + + /** + * Handle becoming the leader + */ + @OnEvent(LEADER_ELECTED_EVENT) + handleLeaderElected(payload: { identity: string }): void { + this.logger.log(`Became leader with identity: ${payload.identity} - scheduler will now process`); + } + + /** + * Handle losing leadership + */ + @OnEvent(LEADER_LOST_EVENT) + handleLeaderLost(payload: { identity: string }): void { + this.logger.warn(`Lost leadership: ${payload.identity} - scheduler paused`); + this.isProcessing = false; + } + + /** + * Main scheduling loop - runs every 5 seconds + * Only executes if this instance is the leader + */ + @Cron(CronExpression.EVERY_5_SECONDS) + async runSchedulingLoop(): Promise { + // Guard: Scheduler must be enabled + if (!this.enabled) { + return; + } + + // Guard: Only leader processes workflows + if (!this.leaderElection.isLeader) { + this.logger.debug('Skipping workflow processing - not the leader'); + return; + } + + // Guard: Prevent overlapping executions + if (this.isProcessing) { + this.logger.warn('Previous processing still running, skipping this cycle'); + return; + } + + this.isProcessing = true; + const startTime = Date.now(); + + try { + await this.scheduleReadyNodes(); + await this.checkCompletedNodes(); + await this.checkWorkflowCompletion(); + + const duration = Date.now() - startTime; + if (duration > 1000) { + this.logger.log(`Scheduling loop completed in ${duration}ms`); + } + } catch (error: any) { + this.logger.error(`Scheduling loop error: ${error.message}`); + } finally { + this.isProcessing = false; + } + } + + /** + * Get scheduler status for monitoring + */ + getSchedulerStatus(): { + enabled: boolean; + isLeader: boolean; + isProcessing: boolean; + batchSize: number; + } { + return { + enabled: this.enabled, + isLeader: this.leaderElection.isLeader, + isProcessing: this.isProcessing, + batchSize: this.batchSize, + }; + } + + /** + * Find and dispatch ready nodes using FOR UPDATE SKIP LOCKED + */ + private async scheduleReadyNodes(): Promise { + // Use raw query for FOR UPDATE SKIP LOCKED support + // Note: Uses schema-qualified table names (workflow_orchestrator.workflow_nodes) + const readyNodes = await this.prisma.$queryRaw` + SELECT wn.* + FROM workflow_orchestrator.workflow_nodes wn + JOIN workflow_orchestrator.workflow_runs wr ON wn.workflow_run_id = wr.id + WHERE wn.status = 'READY' + AND wr.status = 'RUNNING' + ORDER BY wn."order" ASC + LIMIT ${this.batchSize} + FOR UPDATE OF wn SKIP LOCKED + `; + + if (readyNodes.length === 0) { + return; + } + + this.logger.debug(`Found ${readyNodes.length} ready nodes to schedule`); + + // Process each node + for (const node of readyNodes) { + await this.dispatchNode(node); + } + } + + /** + * Dispatch a node for execution + * v1.0.5: When poll-based dispatch is enabled, skip push-based NodeExecutor calls + * (TaskDispatchService handles execution via workflow.node-ready events) + */ + private async dispatchNode(node: any): Promise { + // v1.0.5: Skip push-based dispatch when poll-based is enabled + // TaskDispatchService listens to workflow.node-ready events and creates tasks + // in the agent system. Agents then poll and claim those tasks. + if (this.pollBasedDispatch) { + this.logger.debug(`Skipping push-based dispatch for node ${node.id} (poll-based dispatch enabled)`); + // Node stays in READY status - TaskDispatchService will handle it + // via the workflow.node-ready event emitted by orchestrator-loop + return; + } + + try { + // Mark node as RUNNING + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { + status: NodeStatus.RUNNING, + startedAt: new Date(), + }, + }); + + // Get workflow for workspace info + // v1.0.4: Use snake_case column name from raw SQL query result + const workflowRunId = node.workflow_run_id; + const workflow = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + include: { workspace: true }, + }); + + if (!workflow) { + throw new Error(`Workflow not found: ${workflowRunId}`); + } + + // Acquire workspace lock + const lockAcquired = await this.workspaceService.acquireLock( + workflow.workspaceId, + node.id, + 300000, // 5 minute lock + ); + + if (!lockAcquired) { + this.logger.warn(`Could not acquire lock for workspace ${workflow.workspaceId}`); + // Revert to READY status + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { status: NodeStatus.READY, startedAt: null }, + }); + return; + } + + // Execute the node (async) - push-based to registered agents + this.nodeExecutor.executeNode(node, workflow).catch((error) => { + this.logger.error(`Node ${node.id} execution error: ${error.message}`); + }); + + this.eventEmitter.emit('node.dispatched', { + nodeId: node.id, + workflowId: workflowRunId, + }); + } catch (error: any) { + this.logger.error(`Failed to dispatch node ${node.id}: ${error.message}`); + + // Mark node as failed + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { + status: NodeStatus.FAILED, + error: error.message, + completedAt: new Date(), + }, + }); + } + } + + /** + * Check for completed nodes and resolve dependencies + */ + private async checkCompletedNodes(): Promise { + // Find nodes that have completed and may unblock others + const completedNodes = await this.prisma.workflowNode.findMany({ + where: { + status: { in: [NodeStatus.COMPLETED, NodeStatus.FAILED] }, + dependencyResolved: false, + }, + include: { + workflowRun: true, + }, + }); + + for (const node of completedNodes) { + await this.resolveDependencies(node); + } + } + + /** + * Resolve dependencies after a node completes + */ + private async resolveDependencies(completedNode: any): Promise { + // Find nodes that depend on this one + const dependentNodes = await this.prisma.workflowNode.findMany({ + where: { + workflowRunId: completedNode.workflowRunId, + status: NodeStatus.PENDING, + dependencies: { has: completedNode.id }, + }, + }); + + for (const dependentNode of dependentNodes) { + // Check if all dependencies are satisfied + const allDependencies = dependentNode.dependencies as string[]; + const completedDeps = await this.prisma.workflowNode.count({ + where: { + id: { in: allDependencies }, + status: NodeStatus.COMPLETED, + }, + }); + + if (completedDeps === allDependencies.length) { + // All dependencies satisfied - mark as READY + await this.prisma.workflowNode.update({ + where: { id: dependentNode.id }, + data: { status: NodeStatus.READY }, + }); + + this.logger.debug(`Node ${dependentNode.id} is now READY`); + } + } + + // Mark this node's dependencies as resolved + await this.prisma.workflowNode.update({ + where: { id: completedNode.id }, + data: { dependencyResolved: true }, + }); + } + + /** + * Check if any workflows have completed + */ + private async checkWorkflowCompletion(): Promise { + // Find running workflows with all nodes completed + const runningWorkflows = await this.prisma.workflowRun.findMany({ + where: { status: WorkflowStatus.RUNNING }, + include: { + nodes: true, + }, + }); + + for (const workflow of runningWorkflows) { + const allNodesCompleted = workflow.nodes.every( + (n) => + n.status === NodeStatus.COMPLETED || + n.status === NodeStatus.SKIPPED || + n.status === NodeStatus.FAILED, + ); + + if (!allNodesCompleted) { + continue; + } + + // Determine final status + const hasFailedNodes = workflow.nodes.some( + (n) => n.status === NodeStatus.FAILED, + ); + + const finalStatus = hasFailedNodes + ? WorkflowStatus.FAILED + : WorkflowStatus.COMPLETED; + + const failedNode = workflow.nodes.find( + (n) => n.status === NodeStatus.FAILED, + ); + + await this.workflowService.completeWorkflow( + workflow.id, + finalStatus, + hasFailedNodes ? `Node ${failedNode?.name} failed: ${failedNode?.error}` : undefined, + ); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/slack-bridge.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/slack-bridge.service.spec.ts new file mode 100644 index 000000000..aba59e5a3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/slack-bridge.service.spec.ts @@ -0,0 +1,123 @@ +import { SlackBridgeService } from './slack-bridge.service'; +import { createHmac } from 'crypto'; + +describe('SlackBridgeService', () => { + function makeService(options: { + signingSecret: string; + botToken?: string; + enabled?: string; + interactivityTimeoutMs?: string; + userPromptResolutionService?: { resolvePrompt: jest.Mock }; + }): SlackBridgeService { + const configService = { + get: jest.fn((key: string) => { + switch (key) { + case 'SLACK_SIGNING_SECRET': + return options.signingSecret; + case 'SLACK_BOT_TOKEN': + return options.botToken || ''; + case 'SLACK_REPLY_IN_SLACK_ENABLED': + return options.enabled ?? 'true'; + case 'SLACK_INTERACTIVITY_TIMEOUT_MS': + return options.interactivityTimeoutMs ?? ''; + default: + return ''; + } + }), + } as any; + + const userPromptResolutionService = (options.userPromptResolutionService ?? + ({ + resolvePrompt: jest.fn(), + } as any)) as any; + + return new SlackBridgeService(configService, userPromptResolutionService); + } + + it('is disabled unless enabled flag + signing secret + bot token are configured', () => { + const serviceMissingToken = makeService({ signingSecret: 'secret', enabled: 'true', botToken: '' }); + expect(serviceMissingToken.isEnabled()).toBe(false); + + const serviceMissingSecret = makeService({ signingSecret: '', enabled: 'true', botToken: 'xoxb-test' }); + expect(serviceMissingSecret.isEnabled()).toBe(false); + + const serviceDisabled = makeService({ signingSecret: 'secret', enabled: 'false', botToken: 'xoxb-test' }); + expect(serviceDisabled.isEnabled()).toBe(false); + + const serviceEnabled = makeService({ signingSecret: 'secret', enabled: 'true', botToken: 'xoxb-test' }); + expect(serviceEnabled.isEnabled()).toBe(true); + }); + + it('verifies Slack signatures for valid requests', () => { + const signingSecret = 'test_signing_secret'; + const service = makeService({ signingSecret, botToken: 'xoxb-test' }); + + const rawBody = 'payload=%7B%22hello%22%3A%22world%22%7D'; + const timestamp = '1700000000'; + const base = `v0:${timestamp}:${rawBody}`; + const digest = createHmac('sha256', signingSecret).update(base, 'utf8').digest('hex'); + const signature = `v0=${digest}`; + + expect( + service.verifySlackRequest({ + rawBody, + signature, + timestamp, + nowMs: parseInt(timestamp, 10) * 1000, + }), + ).toBe(true); + }); + + it('rejects stale Slack timestamps (replay protection)', () => { + const signingSecret = 'test_signing_secret'; + const service = makeService({ signingSecret, botToken: 'xoxb-test' }); + + const rawBody = 'payload=test'; + const timestamp = '1700000000'; + const base = `v0:${timestamp}:${rawBody}`; + const digest = createHmac('sha256', signingSecret).update(base, 'utf8').digest('hex'); + const signature = `v0=${digest}`; + + expect( + service.verifySlackRequest({ + rawBody, + signature, + timestamp, + nowMs: (parseInt(timestamp, 10) + 60 * 10) * 1000, + }), + ).toBe(false); + }); + + it('clears the view submission even if prompt resolution exceeds the handler timeout', async () => { + const resolvePrompt = jest.fn( + () => + new Promise((_resolve) => { + // Never resolves (simulate slow DB / downstream) + }), + ); + + const service = makeService({ + signingSecret: 'secret', + botToken: 'xoxb-test', + enabled: 'true', + interactivityTimeoutMs: '5', + userPromptResolutionService: { resolvePrompt }, + }); + + const startedAt = Date.now(); + const result = await service.handleInteractivity({ + type: 'view_submission', + user: { id: 'U1' }, + team: { id: 'T1' }, + view: { + id: 'V1', + private_metadata: JSON.stringify({ promptId: 'p1', tenantId: 't1', kind: 'TEXT_CLARIFICATION' }), + state: { values: { answer_block: { answer: { type: 'plain_text_input', value: 'hello' } } } }, + }, + } as any); + + expect(resolvePrompt).toHaveBeenCalled(); + expect(result).toEqual({ response_action: 'clear' }); + expect(Date.now() - startedAt).toBeLessThan(200); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/slack-bridge.service.ts b/packages/bytebot-workflow-orchestrator/src/services/slack-bridge.service.ts new file mode 100644 index 000000000..932d1476b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/slack-bridge.service.ts @@ -0,0 +1,334 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { createHmac, timingSafeEqual } from 'crypto'; +import { UserPromptResolutionService } from './user-prompt-resolution.service'; +import { ActorType } from '@prisma/client'; + +type SlackInteractivePayload = + | { + type: 'block_actions'; + user?: { id?: string; username?: string }; + team?: { id?: string; domain?: string }; + trigger_id?: string; + actions?: Array<{ action_id?: string; value?: string }>; + } + | { + type: 'view_submission'; + user?: { id?: string; username?: string }; + team?: { id?: string; domain?: string }; + view?: { + id?: string; + private_metadata?: string; + state?: { + values?: Record>; + }; + }; + } + | { type: string; [key: string]: any }; + +type SlackPromptMetadata = { + promptId: string; + tenantId: string; + kind?: string; + goalRunId?: string; +}; + +@Injectable() +export class SlackBridgeService { + private readonly logger = new Logger(SlackBridgeService.name); + private readonly signingSecret: string; + private readonly botToken: string; + private readonly enabled: boolean; + private readonly interactivityTimeoutMs: number; + + constructor( + private readonly configService: ConfigService, + private readonly userPromptResolutionService: UserPromptResolutionService, + ) { + this.signingSecret = (this.configService.get('SLACK_SIGNING_SECRET') || '').trim(); + this.botToken = (this.configService.get('SLACK_BOT_TOKEN') || '').trim(); + this.enabled = + (this.configService.get('SLACK_REPLY_IN_SLACK_ENABLED') || '').trim().toLowerCase() === + 'true'; + this.interactivityTimeoutMs = this.parsePositiveInt( + this.configService.get('SLACK_INTERACTIVITY_TIMEOUT_MS') ?? '', + 2500, + ); + } + + isEnabled(): boolean { + // Reply-in-Slack requires both signature verification and an app token for modals. + return this.enabled && this.signingSecret.length > 0 && this.botToken.length > 0; + } + + verifySlackRequest(args: { + rawBody: Buffer | string | undefined; + signature: string | undefined; + timestamp: string | undefined; + nowMs?: number; + }): boolean { + if (!this.signingSecret) return false; + if (!args.rawBody) return false; + if (!args.signature) return false; + if (!args.timestamp) return false; + + const timestamp = parseInt(args.timestamp, 10); + if (!Number.isFinite(timestamp)) return false; + + const nowMs = args.nowMs ?? Date.now(); + const nowSec = Math.floor(nowMs / 1000); + const ageSec = Math.abs(nowSec - timestamp); + if (ageSec > 60 * 5) { + // Replay protection (Slack recommends 5 minutes). + return false; + } + + const bodyString = Buffer.isBuffer(args.rawBody) ? args.rawBody.toString('utf8') : String(args.rawBody); + const base = `v0:${timestamp}:${bodyString}`; + const digest = createHmac('sha256', this.signingSecret).update(base, 'utf8').digest('hex'); + const expected = `v0=${digest}`; + + const sigBuf = Buffer.from(args.signature, 'utf8'); + const expBuf = Buffer.from(expected, 'utf8'); + if (sigBuf.length !== expBuf.length) return false; + return timingSafeEqual(sigBuf, expBuf); + } + + async handleInteractivity(payload: SlackInteractivePayload, headers?: Record): Promise { + if (!this.isEnabled()) { + return { + text: 'Reply-in-Slack is not enabled. Use the "Open Prompt" button instead.', + response_type: 'ephemeral', + replace_original: false, + }; + } + + if (payload?.type === 'block_actions') { + return await this.handleBlockActions( + payload as Extract, + ); + } + + if (payload?.type === 'view_submission') { + return await this.handleViewSubmission( + payload as Extract, + headers, + ); + } + + return { text: 'Unsupported Slack payload type', response_type: 'ephemeral', replace_original: false }; + } + + private async handleBlockActions(payload: Extract): Promise { + const action = Array.isArray(payload.actions) ? payload.actions[0] : undefined; + const actionId = typeof action?.action_id === 'string' ? action.action_id : ''; + const value = typeof action?.value === 'string' ? action.value : ''; + + if (actionId !== 'bytebot_user_prompt_reply') { + return { text: 'Unsupported action', response_type: 'ephemeral', replace_original: false }; + } + + const triggerId = typeof payload.trigger_id === 'string' ? payload.trigger_id : ''; + const metadata = this.parseMetadata(value); + + if (!triggerId || !metadata) { + return { + text: 'Missing prompt metadata. Use the "Open Prompt" button instead.', + response_type: 'ephemeral', + replace_original: false, + }; + } + + const ok = await this.openResolveModal(triggerId, metadata); + if (!ok) { + return { + text: 'Failed to open Slack modal. Use the "Open Prompt" button instead.', + response_type: 'ephemeral', + replace_original: false, + }; + } + + // Empty string response acknowledges the action (Slack will replace original only if asked). + return ''; + } + + private async handleViewSubmission( + payload: Extract, + headers?: Record, + ): Promise { + const metaRaw = typeof payload.view?.private_metadata === 'string' ? payload.view.private_metadata : ''; + const metadata = this.parseMetadata(metaRaw); + if (!metadata) { + return { response_action: 'errors', errors: { answer: 'Missing prompt metadata.' } }; + } + + const answer = this.extractAnswer(payload.view?.state?.values); + if (!answer) { + return { response_action: 'errors', errors: { answer: 'Answer is required.' } }; + } + + const slackUserId = typeof payload.user?.id === 'string' ? payload.user.id : undefined; + const slackTeamId = typeof payload.team?.id === 'string' ? payload.team.id : undefined; + + const viewId = typeof payload.view?.id === 'string' ? payload.view.id : 'unknown'; + const idempotencyKey = `slack:view_submission:${viewId}`; + + const resolvePromise = this.userPromptResolutionService.resolvePrompt({ + promptId: metadata.promptId, + tenantId: metadata.tenantId, + actor: { + type: ActorType.HUMAN, + id: slackUserId, + authContext: { + source: 'slack', + slackTeamId, + }, + }, + answers: { text: answer }, + idempotencyKey, + requestId: typeof headers?.['x-request-id'] === 'string' ? headers['x-request-id'] : undefined, + clientRequestId: undefined, + ipAddress: undefined, + userAgent: undefined, + }); + + // Slack expects handlers to respond quickly (commonly ~3 seconds). If resolution takes too long, + // clear the modal and allow the resolution to finish asynchronously. Idempotency prevents duplicates. + resolvePromise.catch((err: any) => { + this.logger.warn( + `Slack prompt resolve failed (will surface via prompt status): ${err?.message || String(err)}`, + ); + }); + + try { + await this.withTimeout(resolvePromise, this.interactivityTimeoutMs); + } catch (err: any) { + const msg = err?.message || String(err); + if (msg === 'timeout') { + this.logger.warn( + `Slack view_submission handler exceeded ${this.interactivityTimeoutMs}ms budget; continuing resolve asynchronously`, + ); + return { response_action: 'clear' }; + } + + return { response_action: 'errors', errors: { answer: 'Failed to resolve prompt. Use "Open Prompt".' } }; + } + + return { response_action: 'clear' }; + } + + private parseMetadata(value: string): SlackPromptMetadata | null { + if (!value) return null; + try { + const parsed = JSON.parse(value); + if (!parsed || typeof parsed !== 'object') return null; + + const promptId = typeof (parsed as any).promptId === 'string' ? (parsed as any).promptId : ''; + const tenantId = typeof (parsed as any).tenantId === 'string' ? (parsed as any).tenantId : ''; + if (!promptId || !tenantId) return null; + + return { + promptId, + tenantId, + kind: typeof (parsed as any).kind === 'string' ? (parsed as any).kind : undefined, + goalRunId: typeof (parsed as any).goalRunId === 'string' ? (parsed as any).goalRunId : undefined, + }; + } catch { + return null; + } + } + + private extractAnswer( + values: Record> | undefined, + ): string | null { + if (!values || typeof values !== 'object') return null; + for (const blockId of Object.keys(values)) { + const block = values[blockId]; + if (!block || typeof block !== 'object') continue; + for (const actionId of Object.keys(block)) { + const entry = block[actionId]; + if (!entry || typeof entry !== 'object') continue; + if (typeof entry.value === 'string' && entry.value.trim() !== '') { + return entry.value.trim(); + } + } + } + return null; + } + + private async openResolveModal(triggerId: string, metadata: SlackPromptMetadata): Promise { + const view = { + type: 'modal', + callback_id: 'bytebot_user_prompt_resolve', + private_metadata: JSON.stringify(metadata), + title: { type: 'plain_text', text: 'ByteBot Prompt' }, + submit: { type: 'plain_text', text: 'Resolve' }, + close: { type: 'plain_text', text: 'Cancel' }, + blocks: [ + { + type: 'section', + text: { + type: 'mrkdwn', + text: + `*Prompt:* \`${metadata.promptId}\`` + + (metadata.goalRunId ? `\n*Goal Run:* \`${metadata.goalRunId}\`` : '') + + (metadata.kind ? `\n*Kind:* ${metadata.kind}` : ''), + }, + }, + { + type: 'input', + block_id: 'answer_block', + label: { type: 'plain_text', text: 'Answer' }, + element: { + type: 'plain_text_input', + action_id: 'answer', + multiline: true, + }, + }, + ], + }; + + const controller = new AbortController(); + const t = setTimeout(() => controller.abort(), this.interactivityTimeoutMs); + let response: Response; + try { + response = await fetch('https://slack.com/api/views.open', { + method: 'POST', + headers: { + Authorization: `Bearer ${this.botToken}`, + 'Content-Type': 'application/json; charset=utf-8', + }, + body: JSON.stringify({ trigger_id: triggerId, view }), + signal: controller.signal, + }); + } catch (err: any) { + const msg = err?.name === 'AbortError' ? 'timeout' : err?.message || String(err); + this.logger.warn(`Slack views.open failed: ${msg}`); + clearTimeout(t); + return false; + } finally { + clearTimeout(t); + } + + const json = await response.json().catch(() => null); + if (!response.ok || !json || json.ok !== true) { + this.logger.warn(`Slack views.open failed: ${response.status} ${JSON.stringify(json)}`); + return false; + } + + return true; + } + + private parsePositiveInt(raw: string, fallback: number): number { + const val = parseInt(String(raw || '').trim(), 10); + if (!Number.isFinite(val) || val <= 0) return fallback; + return val; + } + + private async withTimeout(p: Promise, timeoutMs: number): Promise { + return await Promise.race([ + p, + new Promise((_resolve, reject) => setTimeout(() => reject(new Error('timeout')), timeoutMs)), + ]); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/slack-notification.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/slack-notification.service.spec.ts new file mode 100644 index 000000000..2cdb2f8f9 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/slack-notification.service.spec.ts @@ -0,0 +1,215 @@ +import { SlackNotificationService, SlackEventType } from './slack-notification.service'; + +type PrismaMock = { + notificationChannel: { findMany: jest.Mock }; + notificationDelivery: { create: jest.Mock }; +}; + +describe('SlackNotificationService', () => { + const originalFetch = global.fetch; + + beforeEach(() => { + jest.resetAllMocks(); + }); + + afterAll(() => { + global.fetch = originalFetch; + }); + + function makeService(options: { + botToken?: string; + channels: Array<{ id: string; config: any }>; + }): { service: SlackNotificationService; prisma: PrismaMock } { + const prisma: PrismaMock = { + notificationChannel: { + findMany: jest.fn(async () => + options.channels.map((c) => ({ + id: c.id, + tenantId: 't1', + type: 'SLACK', + enabled: true, + events: [SlackEventType.GOAL_STARTED], + config: c.config, + })), + ), + }, + notificationDelivery: { + create: jest.fn(async () => ({})), + }, + }; + + const configService = { + get: jest.fn((key: string, fallback?: any) => { + switch (key) { + case 'APP_BASE_URL': + return 'https://app.bytebot.ai'; + case 'SLACK_TIMEOUT_MS': + return '1000'; + case 'SLACK_BOT_TOKEN': + return options.botToken ?? ''; + case 'SLACK_CHANNEL_LOOKUP_TTL_MS': + return ''; + case 'SLACK_CHANNEL_LOOKUP_MAX_PAGES': + return ''; + case 'SLACK_REPLY_IN_SLACK_ENABLED': + return 'false'; + case 'SLACK_SIGNING_SECRET': + return ''; + default: + return fallback ?? ''; + } + }), + } as any; + + return { + service: new SlackNotificationService(prisma as any, configService), + prisma, + }; + } + + it('delivers via webhook when webhookUrl is configured', async () => { + const webhookUrl = 'https://hooks.slack.test/services/T00000000/B00000000/XXXX'; + const fetchMock = jest.fn(async () => ({ ok: true, status: 200, text: async () => 'ok' })) as any; + global.fetch = fetchMock; + + const { service } = makeService({ + channels: [ + { + id: 'c1', + config: { webhookUrl }, + }, + ], + }); + + const results = await service.sendGoalNotification(SlackEventType.GOAL_STARTED, { + goalRunId: 'gr1', + tenantId: 't1', + goal: 'test', + status: 'STARTED', + }); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(fetchMock).toHaveBeenCalledWith( + webhookUrl, + expect.objectContaining({ method: 'POST', headers: expect.any(Object) }), + ); + }); + + it('delivers via bot token when webhookUrl is not configured and channelId is provided', async () => { + const fetchMock = jest.fn(async (_url: string, _init: any) => ({ + ok: true, + status: 200, + json: async () => ({ ok: true }), + headers: new Map(), + })) as any; + global.fetch = fetchMock; + + const { service } = makeService({ + botToken: 'xoxb-test', + channels: [ + { + id: 'c1', + config: { channelId: 'C1234567890' }, + }, + ], + }); + + const results = await service.sendGoalNotification(SlackEventType.GOAL_STARTED, { + goalRunId: 'gr1', + tenantId: 't1', + goal: 'test', + status: 'STARTED', + }); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + + const [url, init] = fetchMock.mock.calls[0]; + expect(url).toBe('https://slack.com/api/chat.postMessage'); + expect(init.headers.Authorization).toBe('Bearer xoxb-test'); + const body = JSON.parse(init.body); + expect(body.channel).toBe('C1234567890'); + }); + + it('resolves #channel names via conversations.list and caches results', async () => { + const fetchMock = jest.fn(async (url: string, init: any) => { + if (url.startsWith('https://slack.com/api/conversations.list')) { + return { + ok: true, + status: 200, + json: async () => ({ + ok: true, + channels: [{ id: 'C999', name: 'butler-vantage' }], + response_metadata: { next_cursor: '' }, + }), + }; + } + if (url === 'https://slack.com/api/chat.postMessage') { + return { + ok: true, + status: 200, + json: async () => ({ ok: true }), + headers: new Map(), + }; + } + throw new Error(`unexpected fetch url: ${url}`); + }) as any; + global.fetch = fetchMock; + + const { service } = makeService({ + botToken: 'xoxb-test', + channels: [ + { + id: 'c1', + config: { channel: '#butler-vantage' }, + }, + ], + }); + + await service.sendGoalNotification(SlackEventType.GOAL_STARTED, { + goalRunId: 'gr1', + tenantId: 't1', + goal: 'test', + status: 'STARTED', + }); + await service.sendGoalNotification(SlackEventType.GOAL_STARTED, { + goalRunId: 'gr2', + tenantId: 't1', + goal: 'test2', + status: 'STARTED', + }); + + // First send does lookup + post; second send should reuse cache and only post. + const calls = fetchMock.mock.calls as unknown as Array<[string, any]>; + expect(calls.filter((call) => String(call[0]).includes('conversations.list'))).toHaveLength(1); + expect(calls.filter((call) => call[0] === 'https://slack.com/api/chat.postMessage')).toHaveLength(2); + }); + + it('fails bot-based delivery when SLACK_BOT_TOKEN is missing', async () => { + const fetchMock = jest.fn(); + global.fetch = fetchMock as any; + + const { service } = makeService({ + botToken: '', + channels: [ + { + id: 'c1', + config: { channelId: 'C1234567890' }, + }, + ], + }); + + const results = await service.sendGoalNotification(SlackEventType.GOAL_STARTED, { + goalRunId: 'gr1', + tenantId: 't1', + goal: 'test', + status: 'STARTED', + }); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].error).toContain('Missing SLACK_BOT_TOKEN'); + expect(fetchMock).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/slack-notification.service.ts b/packages/bytebot-workflow-orchestrator/src/services/slack-notification.service.ts new file mode 100644 index 000000000..f6de66bf4 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/slack-notification.service.ts @@ -0,0 +1,1216 @@ +/** + * Slack Notification Service + * Phase 8: External Integrations + * + * Sends notifications to Slack via Incoming Webhooks. + * Uses Block Kit for rich message formatting. + * + * Features: + * - Rich Block Kit message formatting + * - Goal run notifications (started, completed, failed) + * - Batch progress notifications + * - Approval request notifications with action buttons + * - Configurable per-channel settings + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import * as crypto from 'crypto'; + +// Slack configuration interface +export interface SlackConfig { + webhookUrl?: string; + /** + * Slack channel identifier for bot-based delivery (e.g., C0123456789). + * If omitted, ByteBot may resolve `channel` (name) via Slack Web API when + * `SLACK_BOT_TOKEN` has conversations:read. + */ + channelId?: string; + channel?: string; + username?: string; + iconEmoji?: string; + iconUrl?: string; +} + +// Slack Block Kit block types +interface SlackBlock { + type: string; + text?: { + type: string; + text: string; + emoji?: boolean; + }; + fields?: Array<{ + type: string; + text: string; + }>; + accessory?: any; + elements?: any[]; + block_id?: string; +} + +// Slack message payload +interface SlackMessage { + text: string; // Fallback text for notifications + blocks?: SlackBlock[]; + attachments?: Array<{ + color?: string; + blocks?: SlackBlock[]; + }>; + channel?: string; + username?: string; + icon_emoji?: string; + icon_url?: string; +} + +// Event types supported by Slack notifications +export enum SlackEventType { + GOAL_STARTED = 'goal.started', + GOAL_COMPLETED = 'goal.completed', + GOAL_FAILED = 'goal.failed', + GOAL_CANCELLED = 'goal.cancelled', + BATCH_STARTED = 'batch.started', + BATCH_PROGRESS = 'batch.progress', + BATCH_COMPLETED = 'batch.completed', + BATCH_FAILED = 'batch.failed', + APPROVAL_REQUESTED = 'approval.requested', + APPROVAL_APPROVED = 'approval.approved', + APPROVAL_REJECTED = 'approval.rejected', + APPROVAL_EXPIRED = 'approval.expired', + USER_PROMPT_CREATED = 'user_prompt.created', + USER_PROMPT_RESOLVED = 'user_prompt.resolved', + USER_PROMPT_CANCELLED = 'user_prompt.cancelled', +} + +// Event data interfaces +export interface GoalEventData { + goalRunId: string; + tenantId: string; + goal: string; + status: string; + phase?: string; + templateName?: string; + duration?: number; // milliseconds + error?: string; + stepsCompleted?: number; + totalSteps?: number; + links?: { + goalRun?: string; + }; +} + +export interface BatchEventData { + batchId: string; + tenantId: string; + name: string; + status: string; + totalGoals: number; + completedGoals: number; + failedGoals: number; + progress: number; + links?: { + batch?: string; + }; +} + +export interface ApprovalEventData { + approvalId: string; + tenantId: string; + toolName: string; + riskLevel: string; + summary: string; + decision?: { + status: string; + reviewerId?: string; + reason?: string; + }; + links?: { + approval?: string; + }; +} + +export interface UserPromptEventData { + promptId: string; + tenantId: string; + goalRunId: string; + checklistItemId: string | null; + kind: string; + stepDescription?: string | null; + links?: { + goalRun?: string; + prompt?: string; + desktopTakeover?: string | null; + }; +} + +// Delivery result +export interface SlackDeliveryResult { + success: boolean; + channelId: string; + eventId: string; + statusCode?: number; + error?: string; + attempts: number; + deliveredAt?: Date; +} + +// Retry configuration +const RETRY_CONFIG = { + maxAttempts: 3, + baseDelayMs: 1000, + maxDelayMs: 30000, + backoffMultiplier: 2, +}; + +@Injectable() +export class SlackNotificationService { + private readonly logger = new Logger(SlackNotificationService.name); + private readonly baseUrl: string; + private readonly timeoutMs: number; + private readonly botToken: string; + private readonly channelLookupTtlMs: number; + private readonly channelLookupMaxPages: number; + private readonly channelIdCache = new Map(); + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + ) { + this.baseUrl = this.configService.get( + 'APP_BASE_URL', + 'https://app.bytebot.ai', + ); + this.timeoutMs = parseInt( + this.configService.get('SLACK_TIMEOUT_MS', '10000'), + 10, + ); + this.botToken = (this.configService.get('SLACK_BOT_TOKEN') || '').trim(); + this.channelLookupTtlMs = this.parsePositiveInt( + this.configService.get('SLACK_CHANNEL_LOOKUP_TTL_MS') ?? '', + 10 * 60 * 1000, + ); + this.channelLookupMaxPages = this.parsePositiveInt( + this.configService.get('SLACK_CHANNEL_LOOKUP_MAX_PAGES') ?? '', + 5, + ); + this.logger.log('SlackNotificationService initialized'); + } + + /** + * Send a goal event notification + */ + async sendGoalNotification( + eventType: SlackEventType, + data: GoalEventData, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Slack channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const message = this.buildGoalMessage(eventType, data); + return this.deliverToChannels(channels, eventType, message); + } + + /** + * Send a batch event notification + */ + async sendBatchNotification( + eventType: SlackEventType, + data: BatchEventData, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Slack channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const message = this.buildBatchMessage(eventType, data); + return this.deliverToChannels(channels, eventType, message); + } + + /** + * Send an approval event notification + */ + async sendApprovalNotification( + eventType: SlackEventType, + data: ApprovalEventData, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Slack channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const message = this.buildApprovalMessage(eventType, data); + return this.deliverToChannels(channels, eventType, message); + } + + /** + * Send a user prompt notification (durable WAIT surface) + */ + async sendUserPromptNotification( + eventType: SlackEventType, + data: UserPromptEventData, + options?: { eventId?: string }, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Slack channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const message = this.buildUserPromptMessage(eventType, data); + return this.deliverToChannels(channels, eventType, message, options); + } + + /** + * Build Slack message for goal events + */ + private buildGoalMessage(eventType: SlackEventType, data: GoalEventData): SlackMessage { + const { emoji, color, title } = this.getGoalEventStyle(eventType); + + const blocks: SlackBlock[] = [ + { + type: 'header', + text: { + type: 'plain_text', + text: `${emoji} ${title}`, + emoji: true, + }, + }, + { + type: 'section', + text: { + type: 'mrkdwn', + text: `*Goal:* ${this.truncateText(data.goal, 200)}`, + }, + }, + ]; + + // Add details fields + const fields: Array<{ type: string; text: string }> = [ + { type: 'mrkdwn', text: `*Status:* ${data.status}` }, + ]; + + if (data.phase) { + fields.push({ type: 'mrkdwn', text: `*Phase:* ${data.phase}` }); + } + + if (data.templateName) { + fields.push({ type: 'mrkdwn', text: `*Template:* ${data.templateName}` }); + } + + if (data.duration) { + fields.push({ + type: 'mrkdwn', + text: `*Duration:* ${this.formatDuration(data.duration)}`, + }); + } + + if (data.stepsCompleted !== undefined) { + fields.push({ + type: 'mrkdwn', + text: `*Steps:* ${data.stepsCompleted}/${data.totalSteps || '?'}`, + }); + } + + blocks.push({ + type: 'section', + fields, + }); + + // Add error message for failed goals + if (data.error && eventType === SlackEventType.GOAL_FAILED) { + blocks.push({ + type: 'section', + text: { + type: 'mrkdwn', + text: `*Error:*\n\`\`\`${this.truncateText(data.error, 500)}\`\`\``, + }, + }); + } + + // Add action button + if (data.links?.goalRun) { + blocks.push({ + type: 'actions', + elements: [ + { + type: 'button', + text: { + type: 'plain_text', + text: 'View Goal Run', + emoji: true, + }, + url: data.links.goalRun, + style: 'primary', + }, + ], + }); + } + + // Add timestamp + blocks.push({ + type: 'context', + elements: [ + { + type: 'mrkdwn', + text: `Goal Run ID: \`${data.goalRunId}\` | ${new Date().toISOString()}`, + }, + ], + }); + + return { + text: `${emoji} ${title}: ${this.truncateText(data.goal, 100)}`, + attachments: [ + { + color, + blocks, + }, + ], + }; + } + + /** + * Build Slack message for batch events + */ + private buildBatchMessage(eventType: SlackEventType, data: BatchEventData): SlackMessage { + const { emoji, color, title } = this.getBatchEventStyle(eventType); + + const progressBar = this.buildProgressBar(data.progress); + + const blocks: SlackBlock[] = [ + { + type: 'header', + text: { + type: 'plain_text', + text: `${emoji} ${title}`, + emoji: true, + }, + }, + { + type: 'section', + text: { + type: 'mrkdwn', + text: `*Batch:* ${data.name}`, + }, + }, + { + type: 'section', + fields: [ + { type: 'mrkdwn', text: `*Status:* ${data.status}` }, + { type: 'mrkdwn', text: `*Progress:* ${data.progress}%` }, + { type: 'mrkdwn', text: `*Completed:* ${data.completedGoals}/${data.totalGoals}` }, + { type: 'mrkdwn', text: `*Failed:* ${data.failedGoals}` }, + ], + }, + { + type: 'section', + text: { + type: 'mrkdwn', + text: progressBar, + }, + }, + ]; + + if (data.links?.batch) { + blocks.push({ + type: 'actions', + elements: [ + { + type: 'button', + text: { + type: 'plain_text', + text: 'View Batch', + emoji: true, + }, + url: data.links.batch, + style: 'primary', + }, + ], + }); + } + + blocks.push({ + type: 'context', + elements: [ + { + type: 'mrkdwn', + text: `Batch ID: \`${data.batchId}\` | ${new Date().toISOString()}`, + }, + ], + }); + + return { + text: `${emoji} ${title}: ${data.name} (${data.progress}%)`, + attachments: [ + { + color, + blocks, + }, + ], + }; + } + + /** + * Build Slack message for approval events + */ + private buildApprovalMessage( + eventType: SlackEventType, + data: ApprovalEventData, + ): SlackMessage { + const { emoji, color, title } = this.getApprovalEventStyle(eventType); + + const blocks: SlackBlock[] = [ + { + type: 'header', + text: { + type: 'plain_text', + text: `${emoji} ${title}`, + emoji: true, + }, + }, + { + type: 'section', + text: { + type: 'mrkdwn', + text: `*Action:* ${data.summary}`, + }, + }, + { + type: 'section', + fields: [ + { type: 'mrkdwn', text: `*Tool:* ${data.toolName}` }, + { type: 'mrkdwn', text: `*Risk Level:* ${this.getRiskEmoji(data.riskLevel)} ${data.riskLevel}` }, + ], + }, + ]; + + // Add decision info for resolved approvals + if (data.decision) { + blocks.push({ + type: 'section', + fields: [ + { type: 'mrkdwn', text: `*Decision:* ${data.decision.status}` }, + ...(data.decision.reviewerId + ? [{ type: 'mrkdwn', text: `*Reviewer:* ${data.decision.reviewerId}` }] + : []), + ...(data.decision.reason + ? [{ type: 'mrkdwn', text: `*Reason:* ${data.decision.reason}` }] + : []), + ], + }); + } + + if (data.links?.approval) { + const buttonStyle = eventType === SlackEventType.APPROVAL_REQUESTED ? 'danger' : 'primary'; + blocks.push({ + type: 'actions', + elements: [ + { + type: 'button', + text: { + type: 'plain_text', + text: eventType === SlackEventType.APPROVAL_REQUESTED ? 'Review & Approve' : 'View Details', + emoji: true, + }, + url: data.links.approval, + style: buttonStyle, + }, + ], + }); + } + + blocks.push({ + type: 'context', + elements: [ + { + type: 'mrkdwn', + text: `Approval ID: \`${data.approvalId}\` | ${new Date().toISOString()}`, + }, + ], + }); + + return { + text: `${emoji} ${title}: ${data.summary}`, + attachments: [ + { + color, + blocks, + }, + ], + }; + } + + /** + * Get active Slack channels for a tenant and event type + */ + private async getActiveChannels( + tenantId: string, + eventType: SlackEventType, + ): Promise> { + try { + const channels = await this.prisma.notificationChannel.findMany({ + where: { + tenantId, + type: 'SLACK', + enabled: true, + events: { + has: eventType, + }, + }, + }); + + return channels.map((c) => ({ + id: c.id, + config: c.config as unknown as SlackConfig, + })); + } catch (error: any) { + if (error.code === 'P2021' || error.message?.includes('does not exist')) { + return []; + } + throw error; + } + } + + /** + * Deliver message to multiple channels + */ + private async deliverToChannels( + channels: Array<{ id: string; config: SlackConfig }>, + eventType: SlackEventType, + message: SlackMessage, + options?: { eventId?: string }, + ): Promise { + const eventId = options?.eventId ?? this.generateEventId(); + const results: SlackDeliveryResult[] = []; + + for (const channel of channels) { + const result = await this.deliverToChannel(channel, eventType, eventId, message); + results.push(result); + await this.recordDelivery(channel.id, eventId, eventType, result, message); + } + + return results; + } + + private buildUserPromptMessage(eventType: SlackEventType, data: UserPromptEventData): SlackMessage { + const { emoji, color, title } = this.getUserPromptEventStyle(eventType); + + const goalRunLink = data.links?.goalRun || `${this.baseUrl}/goals/${data.goalRunId}`; + const promptLink = data.links?.prompt || `${this.baseUrl}/prompts/${data.promptId}`; + const desktopTakeoverLink = data.links?.desktopTakeover || null; + const checklistLabel = data.checklistItemId ? `Checklist Item: \`${data.checklistItemId}\`` : 'Checklist Item: (none)'; + + const blocks: SlackBlock[] = [ + { + type: 'header', + text: { + type: 'plain_text', + text: `${emoji} ${title}`, + emoji: true, + }, + }, + { + type: 'section', + text: { + type: 'mrkdwn', + text: `*Step:* ${this.truncateText(data.stepDescription || '(no description)', 200)}`, + }, + }, + { + type: 'section', + fields: [ + { type: 'mrkdwn', text: `*Prompt Kind:* ${data.kind}` }, + { type: 'mrkdwn', text: `*Goal Run:* \`${data.goalRunId}\`` }, + ], + }, + ]; + + const actionElements: any[] = []; + if (promptLink) { + actionElements.push({ + type: 'button', + text: { + type: 'plain_text', + text: 'Open Prompt', + emoji: true, + }, + url: promptLink, + style: eventType === SlackEventType.USER_PROMPT_CREATED ? 'primary' : 'default', + }); + } + + // Reply-in-Slack (v2): requires a Slack app with interactive components + server-side signature verification. + if ( + eventType === SlackEventType.USER_PROMPT_CREATED && + data.kind === 'TEXT_CLARIFICATION' && + this.isReplyInSlackEnabled() + ) { + actionElements.push({ + type: 'button', + action_id: 'bytebot_user_prompt_reply', + text: { + type: 'plain_text', + text: 'Reply in Slack', + emoji: true, + }, + value: JSON.stringify({ + promptId: data.promptId, + tenantId: data.tenantId, + goalRunId: data.goalRunId, + kind: data.kind, + }), + }); + } + + if (goalRunLink) { + actionElements.push({ + type: 'button', + text: { + type: 'plain_text', + text: 'Open Goal Run', + emoji: true, + }, + url: goalRunLink, + style: eventType === SlackEventType.USER_PROMPT_CREATED ? 'primary' : 'default', + }); + } + if (desktopTakeoverLink) { + actionElements.push({ + type: 'button', + text: { + type: 'plain_text', + text: 'Take Over Desktop', + emoji: true, + }, + url: desktopTakeoverLink, + style: 'primary', + }); + } + + if (actionElements.length > 0) { + blocks.push({ + type: 'actions', + elements: actionElements, + }); + } + + blocks.push({ + type: 'context', + elements: [ + { + type: 'mrkdwn', + text: `Prompt ID: \`${data.promptId}\` | ${checklistLabel} | ${new Date().toISOString()}`, + }, + ], + }); + + return { + text: `${title}: ${this.truncateText(data.stepDescription || 'User input required', 120)}`, + attachments: [ + { + color, + blocks, + }, + ], + }; + } + + private isReplyInSlackEnabled(): boolean { + const enabledFlag = + (this.configService.get('SLACK_REPLY_IN_SLACK_ENABLED') || '').trim().toLowerCase() === 'true'; + if (!enabledFlag) return false; + + const signingSecret = (this.configService.get('SLACK_SIGNING_SECRET') || '').trim(); + const botToken = (this.configService.get('SLACK_BOT_TOKEN') || '').trim(); + + return signingSecret.length > 0 && botToken.length > 0; + } + + /** + * Deliver message to a single channel with retry + */ + private async deliverToChannel( + channel: { id: string; config: SlackConfig }, + eventType: SlackEventType, + eventId: string, + message: SlackMessage, + ): Promise { + let lastError: string | undefined; + let lastStatusCode: number | undefined; + let attempts = 0; + + const isWebhookMode = + typeof channel.config.webhookUrl === 'string' && channel.config.webhookUrl.trim().length > 0; + + // Apply channel-specific settings + const finalMessage: SlackMessage = { + ...message, + channel: channel.config.channel || message.channel, + username: channel.config.username || message.username || 'ByteBot', + icon_emoji: channel.config.iconEmoji || message.icon_emoji || ':robot_face:', + icon_url: channel.config.iconUrl, + }; + + for (let attempt = 1; attempt <= RETRY_CONFIG.maxAttempts; attempt++) { + attempts = attempt; + + try { + const result = isWebhookMode + ? await this.sendToSlackWebhook(channel.config.webhookUrl as string, finalMessage) + : await this.sendToSlackBot(channel.config, finalMessage); + + if (result.success) { + this.logger.log(`Slack notification delivered: ${channel.id} (attempt ${attempt})`); + return { + success: true, + channelId: channel.id, + eventId, + statusCode: result.statusCode, + attempts, + deliveredAt: new Date(), + }; + } + + lastStatusCode = result.statusCode; + lastError = result.error; + + // Don't retry on 4xx errors + if (result.statusCode && result.statusCode >= 400 && result.statusCode < 500) { + break; + } + } catch (error: any) { + lastError = error.message; + this.logger.warn(`Slack delivery failed (attempt ${attempt}): ${error.message}`); + } + + // Exponential backoff + if (attempt < RETRY_CONFIG.maxAttempts) { + const delay = Math.min( + RETRY_CONFIG.baseDelayMs * Math.pow(RETRY_CONFIG.backoffMultiplier, attempt - 1), + RETRY_CONFIG.maxDelayMs, + ); + await this.sleep(delay); + } + } + + this.logger.error(`Slack delivery failed after ${attempts} attempts: ${lastError}`); + return { + success: false, + channelId: channel.id, + eventId, + statusCode: lastStatusCode, + error: lastError, + attempts, + }; + } + + /** + * Send HTTP request to Slack webhook + */ + private async sendToSlack( + webhookUrl: string, + message: SlackMessage, + ): Promise<{ success: boolean; statusCode?: number; error?: string }> { + return this.sendToSlackWebhook(webhookUrl, message); + } + + private async sendToSlackWebhook( + webhookUrl: string, + message: SlackMessage, + ): Promise<{ success: boolean; statusCode?: number; error?: string }> { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs); + + try { + const response = await fetch(webhookUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(message), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (response.ok) { + return { success: true, statusCode: response.status }; + } + + const responseText = await response.text().catch(() => ''); + return { + success: false, + statusCode: response.status, + error: `HTTP ${response.status}: ${responseText.substring(0, 200)}`, + }; + } catch (error: any) { + clearTimeout(timeoutId); + + if (error.name === 'AbortError') { + return { success: false, error: `Timeout after ${this.timeoutMs}ms` }; + } + + return { success: false, error: error.message }; + } + } + + private async sendToSlackBot( + config: SlackConfig, + message: SlackMessage, + ): Promise<{ success: boolean; statusCode?: number; error?: string }> { + if (!this.botToken) { + return { success: false, error: 'Missing SLACK_BOT_TOKEN for bot-based Slack delivery' }; + } + + const resolvedChannelId = await this.resolveChannelId(config); + if (!resolvedChannelId) { + return { + success: false, + error: + 'Slack channelId resolution failed. Provide config.channelId or grant conversations:read to SLACK_BOT_TOKEN.', + }; + } + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs); + + const payload: Record = { + channel: resolvedChannelId, + text: message.text, + ...(message.blocks ? { blocks: message.blocks } : {}), + ...(message.attachments ? { attachments: message.attachments } : {}), + }; + + try { + const response = await fetch('https://slack.com/api/chat.postMessage', { + method: 'POST', + headers: { + Authorization: `Bearer ${this.botToken}`, + 'Content-Type': 'application/json; charset=utf-8', + }, + body: JSON.stringify(payload), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + // Slack uses 200 with ok=false for most errors; also uses 429 for rate limits. + if (response.status === 429) { + const retryAfter = response.headers.get('retry-after'); + return { + success: false, + statusCode: response.status, + error: `Rate limited${retryAfter ? ` (retry-after ${retryAfter}s)` : ''}`, + }; + } + + const json = (await response.json().catch(() => null)) as any; + if (response.ok && json?.ok === true) { + return { success: true, statusCode: response.status }; + } + + const err = typeof json?.error === 'string' ? json.error : 'unknown_error'; + return { success: false, statusCode: response.status, error: `Slack API error: ${err}` }; + } catch (error: any) { + clearTimeout(timeoutId); + + if (error.name === 'AbortError') { + return { success: false, error: `Timeout after ${this.timeoutMs}ms` }; + } + + return { success: false, error: error.message }; + } + } + + private async resolveChannelId(config: SlackConfig): Promise { + const explicit = typeof config.channelId === 'string' ? config.channelId.trim() : ''; + if (explicit) return explicit; + + const channel = typeof config.channel === 'string' ? config.channel.trim() : ''; + if (!channel) return null; + + // Treat values that already look like Slack conversation IDs as IDs. + if (/^[CDG][A-Z0-9]{8,}$/.test(channel)) return channel; + + const normalized = channel.startsWith('#') ? channel.slice(1) : channel; + if (!normalized) return null; + + const cached = this.channelIdCache.get(normalized); + const nowMs = Date.now(); + if (cached && cached.expiresAtMs > nowMs) return cached.channelId; + + const found = await this.lookupChannelIdByName(normalized); + if (found) { + this.channelIdCache.set(normalized, { channelId: found, expiresAtMs: nowMs + this.channelLookupTtlMs }); + } + return found; + } + + private async lookupChannelIdByName(channelName: string): Promise { + if (!this.botToken) return null; + + let cursor: string | undefined; + for (let page = 0; page < this.channelLookupMaxPages; page++) { + const qs = new URLSearchParams({ + limit: '200', + types: 'public_channel,private_channel', + exclude_archived: 'true', + }); + if (cursor) qs.set('cursor', cursor); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs); + + try { + const response = await fetch(`https://slack.com/api/conversations.list?${qs.toString()}`, { + method: 'GET', + headers: { + Authorization: `Bearer ${this.botToken}`, + 'Content-Type': 'application/x-www-form-urlencoded', + }, + signal: controller.signal, + }); + clearTimeout(timeoutId); + + const json = (await response.json().catch(() => null)) as any; + if (!response.ok || !json || json.ok !== true) { + const err = typeof json?.error === 'string' ? json.error : 'unknown_error'; + this.logger.warn(`Slack channel lookup failed: ${err}`); + return null; + } + + const channels = Array.isArray(json.channels) ? json.channels : []; + const match = channels.find((c: any) => typeof c?.name === 'string' && c.name === channelName); + if (match && typeof match?.id === 'string') return match.id; + + cursor = + typeof json?.response_metadata?.next_cursor === 'string' ? json.response_metadata.next_cursor : ''; + if (!cursor) return null; + } catch (err: any) { + clearTimeout(timeoutId); + if (err?.name === 'AbortError') { + this.logger.warn(`Slack channel lookup timeout after ${this.timeoutMs}ms`); + return null; + } + this.logger.warn(`Slack channel lookup error: ${err?.message || String(err)}`); + return null; + } + } + + this.logger.warn( + `Slack channel lookup exceeded max pages (${this.channelLookupMaxPages}) for channel: ${channelName}`, + ); + return null; + } + + private parsePositiveInt(raw: string, defaultValue: number): number { + const n = parseInt(raw, 10); + if (!Number.isFinite(n)) return defaultValue; + if (n <= 0) return defaultValue; + return n; + } + + /** + * Record delivery attempt + */ + private async recordDelivery( + channelId: string, + eventId: string, + eventType: SlackEventType, + result: SlackDeliveryResult, + payload: SlackMessage, + ): Promise { + try { + await this.prisma.notificationDelivery.create({ + data: { + channelId, + eventId, + eventType, + success: result.success, + statusCode: result.statusCode, + error: result.error, + attempts: result.attempts, + payload: payload as any, + deliveredAt: result.deliveredAt, + }, + }); + } catch (error: any) { + this.logger.warn(`Failed to record delivery: ${error.message}`); + } + } + + /** + * Test a Slack channel configuration + */ + async testChannel(channelId: string): Promise { + const channel = await this.prisma.notificationChannel.findUnique({ + where: { id: channelId }, + }); + + if (!channel || channel.type !== 'SLACK') { + throw new Error('Slack channel not found'); + } + + const config = channel.config as unknown as SlackConfig; + const eventId = this.generateEventId(); + + const testMessage: SlackMessage = { + text: 'ByteBot Test Notification', + blocks: [ + { + type: 'header', + text: { + type: 'plain_text', + text: ':white_check_mark: ByteBot Connection Test', + emoji: true, + }, + }, + { + type: 'section', + text: { + type: 'mrkdwn', + text: 'This is a test notification from ByteBot. If you see this message, your Slack integration is working correctly!', + }, + }, + { + type: 'context', + elements: [ + { + type: 'mrkdwn', + text: `Channel: \`${channel.name}\` | Tenant: \`${channel.tenantId}\` | ${new Date().toISOString()}`, + }, + ], + }, + ], + }; + + const result = await this.deliverToChannel( + { id: channelId, config }, + SlackEventType.GOAL_STARTED, // Use a dummy event type + eventId, + testMessage, + ); + + // Update verified status + if (result.success) { + await this.prisma.notificationChannel.update({ + where: { id: channelId }, + data: { verified: true }, + }); + } + + return result; + } + + // Helper methods + + private getGoalEventStyle(eventType: SlackEventType): { emoji: string; color: string; title: string } { + switch (eventType) { + case SlackEventType.GOAL_STARTED: + return { emoji: ':rocket:', color: '#2196F3', title: 'Goal Started' }; + case SlackEventType.GOAL_COMPLETED: + return { emoji: ':white_check_mark:', color: '#4CAF50', title: 'Goal Completed' }; + case SlackEventType.GOAL_FAILED: + return { emoji: ':x:', color: '#F44336', title: 'Goal Failed' }; + case SlackEventType.GOAL_CANCELLED: + return { emoji: ':no_entry_sign:', color: '#9E9E9E', title: 'Goal Cancelled' }; + default: + return { emoji: ':information_source:', color: '#607D8B', title: 'Goal Update' }; + } + } + + private getBatchEventStyle(eventType: SlackEventType): { emoji: string; color: string; title: string } { + switch (eventType) { + case SlackEventType.BATCH_STARTED: + return { emoji: ':package:', color: '#2196F3', title: 'Batch Started' }; + case SlackEventType.BATCH_PROGRESS: + return { emoji: ':hourglass_flowing_sand:', color: '#FF9800', title: 'Batch Progress' }; + case SlackEventType.BATCH_COMPLETED: + return { emoji: ':tada:', color: '#4CAF50', title: 'Batch Completed' }; + case SlackEventType.BATCH_FAILED: + return { emoji: ':warning:', color: '#F44336', title: 'Batch Failed' }; + default: + return { emoji: ':package:', color: '#607D8B', title: 'Batch Update' }; + } + } + + private getApprovalEventStyle(eventType: SlackEventType): { emoji: string; color: string; title: string } { + switch (eventType) { + case SlackEventType.APPROVAL_REQUESTED: + return { emoji: ':raised_hand:', color: '#FF9800', title: 'Approval Required' }; + case SlackEventType.APPROVAL_APPROVED: + return { emoji: ':thumbsup:', color: '#4CAF50', title: 'Approval Granted' }; + case SlackEventType.APPROVAL_REJECTED: + return { emoji: ':thumbsdown:', color: '#F44336', title: 'Approval Rejected' }; + case SlackEventType.APPROVAL_EXPIRED: + return { emoji: ':hourglass:', color: '#9E9E9E', title: 'Approval Expired' }; + default: + return { emoji: ':clipboard:', color: '#607D8B', title: 'Approval Update' }; + } + } + + private getUserPromptEventStyle(eventType: SlackEventType): { emoji: string; color: string; title: string } { + switch (eventType) { + case SlackEventType.USER_PROMPT_CREATED: + return { emoji: ':speech_balloon:', color: '#FF9800', title: 'User Input Required' }; + case SlackEventType.USER_PROMPT_RESOLVED: + return { emoji: ':white_check_mark:', color: '#4CAF50', title: 'User Input Resolved' }; + case SlackEventType.USER_PROMPT_CANCELLED: + return { emoji: ':no_entry_sign:', color: '#9E9E9E', title: 'User Prompt Cancelled' }; + default: + return { emoji: ':speech_balloon:', color: '#607D8B', title: 'User Prompt Update' }; + } + } + + private getRiskEmoji(riskLevel: string): string { + switch (riskLevel?.toUpperCase()) { + case 'CRITICAL': + return ':rotating_light:'; + case 'HIGH': + return ':warning:'; + case 'MEDIUM': + return ':large_orange_diamond:'; + case 'LOW': + return ':large_blue_diamond:'; + default: + return ':grey_question:'; + } + } + + private buildProgressBar(progress: number): string { + const filled = Math.round(progress / 10); + const empty = 10 - filled; + return `\`[${'█'.repeat(filled)}${'░'.repeat(empty)}]\` ${progress}%`; + } + + private formatDuration(ms: number): string { + if (ms < 1000) return `${ms}ms`; + if (ms < 60000) return `${(ms / 1000).toFixed(1)}s`; + if (ms < 3600000) return `${Math.floor(ms / 60000)}m ${Math.floor((ms % 60000) / 1000)}s`; + return `${Math.floor(ms / 3600000)}h ${Math.floor((ms % 3600000) / 60000)}m`; + } + + private truncateText(text: string, maxLength: number): string { + if (text.length <= maxLength) return text; + return text.substring(0, maxLength - 3) + '...'; + } + + private generateEventId(): string { + const timestamp = Date.now().toString(36); + const random = crypto.randomBytes(8).toString('hex'); + return `slack_${timestamp}_${random}`; + } + + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/sso.service.ts b/packages/bytebot-workflow-orchestrator/src/services/sso.service.ts new file mode 100644 index 000000000..152fcf041 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/sso.service.ts @@ -0,0 +1,529 @@ +/** + * SSO (Single Sign-On) Service + * Phase 10 (v5.5.0): Enterprise Features - SAML/SSO Integration + * + * Provides SAML 2.0 SSO capabilities: + * - Service Provider (SP) metadata generation + * - SAML assertion validation + * - Just-in-time user provisioning + * - Attribute mapping + * - Session management + */ + +import { Injectable, Logger, NotFoundException, BadRequestException, UnauthorizedException } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import * as crypto from 'crypto'; + +// ============================================================================ +// Types and Interfaces +// ============================================================================ + +export enum SSOProvider { + SAML = 'saml', + OIDC = 'oidc', +} + +export interface SAMLConfig { + entityId: string; + ssoUrl: string; + sloUrl?: string; + certificate: string; + signatureAlgorithm?: 'sha256' | 'sha512'; +} + +export interface AttributeMapping { + email: string; + firstName?: string; + lastName?: string; + displayName?: string; + groups?: string; + role?: string; +} + +export interface SSOConfigInput { + provider: SSOProvider; + saml?: SAMLConfig; + attributeMapping?: AttributeMapping; + jitProvisioning?: boolean; + defaultRole?: string; + enforcedDomains?: string[]; + allowBypassSSO?: boolean; +} + +export interface SAMLAssertion { + nameId: string; + nameIdFormat?: string; + sessionIndex?: string; + attributes: Record; + issuer: string; + audience: string; + notBefore?: Date; + notOnOrAfter?: Date; +} + +export interface SSOUser { + email: string; + firstName?: string; + lastName?: string; + displayName?: string; + groups?: string[]; + role?: string; + tenantId: string; + ssoSessionId: string; + expiresAt: Date; +} + +export interface SPMetadata { + entityId: string; + acsUrl: string; + sloUrl: string; + certificate?: string; + nameIdFormat: string; + wantAssertionsSigned: boolean; +} + +// Default attribute mapping for common IdPs +const DEFAULT_ATTRIBUTE_MAPPINGS: Record = { + default: { + email: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress', + firstName: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname', + lastName: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname', + displayName: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name', + groups: 'http://schemas.microsoft.com/ws/2008/06/identity/claims/groups', + }, + okta: { + email: 'email', + firstName: 'firstName', + lastName: 'lastName', + displayName: 'displayName', + groups: 'groups', + }, + azure_ad: { + email: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress', + firstName: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname', + lastName: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname', + displayName: 'http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name', + groups: 'http://schemas.microsoft.com/ws/2008/06/identity/claims/groups', + }, + google: { + email: 'email', + firstName: 'first_name', + lastName: 'last_name', + displayName: 'name', + }, +}; + +@Injectable() +export class SSOService { + private readonly logger = new Logger(SSOService.name); + private readonly baseUrl: string; + private readonly spEntityId: string; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.baseUrl = this.configService.get('BASE_URL', 'http://localhost:8080'); + this.spEntityId = this.configService.get('SAML_SP_ENTITY_ID', `${this.baseUrl}/saml/metadata`); + this.logger.log('SSOService initialized'); + } + + // ========================================================================== + // SSO Configuration Management + // ========================================================================== + + /** + * Configure SSO for a tenant + */ + async configureSSOv(tenantId: string, input: SSOConfigInput): Promise { + // Validate tenant exists + const tenant = await this.prisma.tenant.findUnique({ + where: { id: tenantId }, + }); + + if (!tenant) { + throw new NotFoundException(`Tenant ${tenantId} not found`); + } + + // Validate SAML config if provider is SAML + if (input.provider === SSOProvider.SAML && input.saml) { + this.validateSAMLConfig(input.saml); + } + + // Create or update SSO config + const config = await this.prisma.sSOConfiguration.upsert({ + where: { tenantId }, + create: { + tenantId, + provider: input.provider, + entityId: input.saml?.entityId, + ssoUrl: input.saml?.ssoUrl, + sloUrl: input.saml?.sloUrl, + certificate: input.saml?.certificate, + signatureAlgorithm: input.saml?.signatureAlgorithm || 'sha256', + attributeMapping: (input.attributeMapping || DEFAULT_ATTRIBUTE_MAPPINGS.default) as object, + jitProvisioning: input.jitProvisioning ?? true, + defaultRole: input.defaultRole || 'member', + enforcedDomains: input.enforcedDomains || [], + allowBypassSSO: input.allowBypassSSO ?? false, + enabled: false, // Requires verification to enable + verified: false, + }, + update: { + provider: input.provider, + entityId: input.saml?.entityId, + ssoUrl: input.saml?.ssoUrl, + sloUrl: input.saml?.sloUrl, + certificate: input.saml?.certificate, + signatureAlgorithm: input.saml?.signatureAlgorithm, + attributeMapping: input.attributeMapping as object | undefined, + jitProvisioning: input.jitProvisioning, + defaultRole: input.defaultRole, + enforcedDomains: input.enforcedDomains, + allowBypassSSO: input.allowBypassSSO, + }, + }); + + this.logger.log(`SSO configured for tenant ${tenantId}`); + this.eventEmitter.emit('sso.configured', { tenantId, provider: input.provider }); + + return config; + } + + /** + * Get SSO configuration for a tenant + */ + async getSSOConfig(tenantId: string): Promise { + const config = await this.prisma.sSOConfiguration.findUnique({ + where: { tenantId }, + }); + + if (!config) { + throw new NotFoundException(`SSO not configured for tenant ${tenantId}`); + } + + // Don't return sensitive data + return { + ...config, + certificate: config.certificate ? '***CONFIGURED***' : null, + }; + } + + /** + * Enable SSO for a tenant (requires prior verification) + */ + async enableSSO(tenantId: string): Promise { + const config = await this.prisma.sSOConfiguration.findUnique({ + where: { tenantId }, + }); + + if (!config) { + throw new NotFoundException(`SSO not configured for tenant ${tenantId}`); + } + + if (!config.verified) { + throw new BadRequestException('SSO configuration must be verified before enabling'); + } + + const updated = await this.prisma.sSOConfiguration.update({ + where: { tenantId }, + data: { enabled: true }, + }); + + this.logger.log(`SSO enabled for tenant ${tenantId}`); + this.eventEmitter.emit('sso.enabled', { tenantId }); + + return updated; + } + + /** + * Disable SSO for a tenant + */ + async disableSSO(tenantId: string): Promise { + const updated = await this.prisma.sSOConfiguration.update({ + where: { tenantId }, + data: { enabled: false }, + }); + + this.logger.log(`SSO disabled for tenant ${tenantId}`); + this.eventEmitter.emit('sso.disabled', { tenantId }); + + return updated; + } + + /** + * Verify SSO configuration with a test assertion + */ + async verifySSOConfig(tenantId: string): Promise<{ verified: boolean; message: string }> { + const config = await this.prisma.sSOConfiguration.findUnique({ + where: { tenantId }, + }); + + if (!config) { + throw new NotFoundException(`SSO not configured for tenant ${tenantId}`); + } + + // Validate configuration + const validationErrors: string[] = []; + + if (!config.entityId) { + validationErrors.push('Entity ID is required'); + } + + if (!config.ssoUrl) { + validationErrors.push('SSO URL is required'); + } + + if (!config.certificate) { + validationErrors.push('IdP certificate is required'); + } + + if (validationErrors.length > 0) { + return { + verified: false, + message: `Configuration incomplete: ${validationErrors.join(', ')}`, + }; + } + + // Verify certificate format + try { + this.parseCertificate(config.certificate!); + } catch (error: any) { + return { + verified: false, + message: `Invalid certificate: ${error.message}`, + }; + } + + // Mark as verified + await this.prisma.sSOConfiguration.update({ + where: { tenantId }, + data: { verified: true }, + }); + + this.logger.log(`SSO configuration verified for tenant ${tenantId}`); + + return { + verified: true, + message: 'SSO configuration is valid and verified', + }; + } + + // ========================================================================== + // SAML Operations + // ========================================================================== + + /** + * Generate SP metadata for a tenant + */ + async generateSPMetadata(tenantId: string): Promise { + const acsUrl = `${this.baseUrl}/api/v1/sso/acs/${tenantId}`; + const sloUrl = `${this.baseUrl}/api/v1/sso/slo/${tenantId}`; + + return { + entityId: `${this.spEntityId}/${tenantId}`, + acsUrl, + sloUrl, + nameIdFormat: 'urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress', + wantAssertionsSigned: true, + }; + } + + /** + * Generate SP metadata XML + */ + async generateSPMetadataXML(tenantId: string): Promise { + const metadata = await this.generateSPMetadata(tenantId); + + return ` + + + ${metadata.nameIdFormat} + + + +`; + } + + /** + * Process SAML assertion and create user session + */ + async processAssertion(tenantId: string, assertion: SAMLAssertion): Promise { + const config = await this.prisma.sSOConfiguration.findUnique({ + where: { tenantId }, + }); + + if (!config || !config.enabled) { + throw new UnauthorizedException('SSO is not enabled for this tenant'); + } + + // Validate assertion + this.validateAssertion(assertion, config); + + // Extract user attributes + const attributeMapping = config.attributeMapping as unknown as AttributeMapping; + const user = this.extractUserFromAssertion(assertion, attributeMapping); + + // Validate domain if enforced + if (config.enforcedDomains && config.enforcedDomains.length > 0) { + const emailDomain = user.email.split('@')[1]; + if (!config.enforcedDomains.includes(emailDomain)) { + throw new UnauthorizedException(`Email domain ${emailDomain} is not allowed`); + } + } + + // Create SSO session + const sessionId = this.generateSessionId(); + const expiresAt = assertion.notOnOrAfter || new Date(Date.now() + 8 * 60 * 60 * 1000); // 8 hours default + + const ssoUser: SSOUser = { + ...user, + tenantId, + ssoSessionId: sessionId, + expiresAt, + }; + + this.logger.log(`SSO login successful for ${user.email} (tenant: ${tenantId})`); + this.eventEmitter.emit('sso.login', { tenantId, email: user.email, sessionId }); + + return ssoUser; + } + + /** + * Process Single Logout request + */ + async processSLO(tenantId: string, sessionId: string): Promise { + this.logger.log(`SSO logout for session ${sessionId} (tenant: ${tenantId})`); + this.eventEmitter.emit('sso.logout', { tenantId, sessionId }); + } + + // ========================================================================== + // Attribute Mapping + // ========================================================================== + + /** + * Get default attribute mappings for common IdPs + */ + getDefaultAttributeMappings(): Record { + return DEFAULT_ATTRIBUTE_MAPPINGS; + } + + /** + * Test attribute mapping with sample assertion + */ + testAttributeMapping( + mapping: AttributeMapping, + sampleAttributes: Record, + ): Record { + const result: Record = {}; + + for (const [field, attributeName] of Object.entries(mapping)) { + if (attributeName && sampleAttributes[attributeName]) { + result[field] = sampleAttributes[attributeName]; + } else { + result[field] = null; + } + } + + return result; + } + + // ========================================================================== + // Helper Methods + // ========================================================================== + + private validateSAMLConfig(config: SAMLConfig): void { + if (!config.entityId) { + throw new BadRequestException('Entity ID is required'); + } + + if (!config.ssoUrl) { + throw new BadRequestException('SSO URL is required'); + } + + try { + new URL(config.ssoUrl); + } catch { + throw new BadRequestException('Invalid SSO URL format'); + } + + if (config.sloUrl) { + try { + new URL(config.sloUrl); + } catch { + throw new BadRequestException('Invalid SLO URL format'); + } + } + + if (!config.certificate) { + throw new BadRequestException('IdP certificate is required'); + } + } + + private validateAssertion(assertion: SAMLAssertion, config: any): void { + const now = new Date(); + + // Check time validity + if (assertion.notBefore && assertion.notBefore > now) { + throw new UnauthorizedException('Assertion is not yet valid'); + } + + if (assertion.notOnOrAfter && assertion.notOnOrAfter < now) { + throw new UnauthorizedException('Assertion has expired'); + } + + // Check issuer matches configured IdP + if (assertion.issuer !== config.entityId) { + throw new UnauthorizedException('Assertion issuer does not match configured IdP'); + } + } + + private extractUserFromAssertion( + assertion: SAMLAssertion, + mapping: AttributeMapping, + ): Omit { + const getAttribute = (key: string): string | undefined => { + const value = assertion.attributes[key]; + return Array.isArray(value) ? value[0] : value; + }; + + const email = getAttribute(mapping.email) || assertion.nameId; + + if (!email) { + throw new UnauthorizedException('Email attribute not found in assertion'); + } + + return { + email, + firstName: mapping.firstName ? getAttribute(mapping.firstName) : undefined, + lastName: mapping.lastName ? getAttribute(mapping.lastName) : undefined, + displayName: mapping.displayName ? getAttribute(mapping.displayName) : undefined, + groups: mapping.groups + ? (assertion.attributes[mapping.groups] as string[] | undefined) + : undefined, + role: mapping.role ? getAttribute(mapping.role) : undefined, + }; + } + + private parseCertificate(certPem: string): void { + // Basic validation of PEM format + const certRegex = /-----BEGIN CERTIFICATE-----[\s\S]+-----END CERTIFICATE-----/; + if (!certRegex.test(certPem)) { + throw new Error('Certificate must be in PEM format'); + } + } + + private generateSessionId(): string { + return crypto.randomBytes(32).toString('hex'); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.needs-help-idempotency.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.needs-help-idempotency.spec.ts new file mode 100644 index 000000000..4bfead900 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.needs-help-idempotency.spec.ts @@ -0,0 +1,837 @@ +import { TaskDispatchService } from './task-dispatch.service'; +import { GoalRunExecutionEngine, GoalRunPhase, UserPromptKind } from '@prisma/client'; + +describe('TaskDispatchService NEEDS_HELP idempotency', () => { + it('emits a single needs-help activity and then stays quiet', async () => { + const prisma = { + checklistItem: { + updateMany: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn( + (goalRunId: string, stepId: string, kind: string) => `prompt:${goalRunId}:${stepId}:${kind}`, + ), + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid real HTTP calls + (service as any).taskControllerClient = { delete: jest.fn() }; + // Avoid deep DB activity plumbing; focus on idempotency behavior + (service as any).emitActivityEvent = jest.fn(); + + prisma.checklistItem.updateMany.mockResolvedValueOnce({ count: 1 }).mockResolvedValueOnce({ count: 0 }); + prisma.userPrompt.findUnique.mockResolvedValue(null); + prisma.goalRun.findUnique.mockResolvedValue({ phase: GoalRunPhase.EXECUTING, tenantId: 't-1' }); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }).mockResolvedValueOnce({ count: 0 }); + + userPromptService.ensureOpenPromptForStep.mockResolvedValue({ + id: 'p-1', + kind: UserPromptKind.TEXT_CLARIFICATION, + dedupeKey: 'prompt:gr-1:ci-1:TEXT_CLARIFICATION', + }); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Need clarification', + result: { errorCode: 'DISPATCHED_USER_PROMPT_STEP', message: 'Which account should I use?' }, + error: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + await (service as any).handleTaskNeedsHelp(record, task); + + expect(record.status).toBe('WAITING_USER'); + expect((service as any).taskControllerClient.delete).toHaveBeenCalledTimes(1); + expect((service as any).emitActivityEvent).toHaveBeenCalledTimes(2); + expect((service as any).emitActivityEvent).toHaveBeenCalledWith( + record.goalRunId, + 'USER_PROMPT_CREATED', + expect.any(String), + expect.objectContaining({ promptId: 'p-1' }), + ); + expect((service as any).emitActivityEvent).toHaveBeenCalledWith( + record.goalRunId, + 'STEP_NEEDS_HELP', + expect.any(String), + expect.objectContaining({ checklistItemId: record.checklistItemId }), + ); + expect(outboxService.enqueueOnce).toHaveBeenCalledTimes(1); + expect(eventEmitter.emit).toHaveBeenCalledWith('goal-run.phase-changed', expect.anything()); + }); + + it('preserves desktop for DESKTOP_TAKEOVER and extends timeout once', async () => { + const prisma = { + checklistItem: { + updateMany: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn( + (goalRunId: string, stepId: string, kind: string) => `prompt:${goalRunId}:${stepId}:${kind}`, + ), + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid real HTTP calls + (service as any).taskControllerClient = { delete: jest.fn(), post: jest.fn().mockResolvedValue({}) }; + // Avoid deep DB activity plumbing; focus on idempotency behavior + (service as any).emitActivityEvent = jest.fn(); + + prisma.checklistItem.updateMany.mockResolvedValueOnce({ count: 1 }).mockResolvedValueOnce({ count: 0 }); + prisma.userPrompt.findUnique.mockResolvedValue(null); + prisma.goalRun.findUnique.mockResolvedValue({ phase: GoalRunPhase.EXECUTING, tenantId: 't-1' }); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }).mockResolvedValueOnce({ count: 0 }); + + userPromptService.ensureOpenPromptForStep.mockResolvedValue({ + id: 'p-1', + kind: UserPromptKind.DESKTOP_TAKEOVER, + dedupeKey: 'prompt:gr-1:ci-1:DESKTOP_TAKEOVER', + }); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'User takeover required', + result: { errorCode: 'UI_BLOCKED_SIGNIN', message: 'MFA prompt in browser' }, + error: null, + requiresDesktop: true, + workspaceId: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + await (service as any).handleTaskNeedsHelp(record, task); + + expect(record.status).toBe('WAITING_USER'); + expect(userPromptService.ensureOpenPromptForStep).toHaveBeenCalledWith( + expect.objectContaining({ kind: UserPromptKind.DESKTOP_TAKEOVER }), + ); + expect((service as any).taskControllerClient.delete).not.toHaveBeenCalled(); + expect((service as any).taskControllerClient.post).toHaveBeenCalledTimes(1); + expect((service as any).taskControllerClient.post).toHaveBeenCalledWith( + `/api/v1/tasks/${record.taskId}/extend`, + expect.objectContaining({ additionalMinutes: 60 }), + ); + expect((service as any).emitActivityEvent).toHaveBeenCalledTimes(2); + expect((service as any).emitActivityEvent).toHaveBeenCalledWith( + record.goalRunId, + 'USER_PROMPT_CREATED', + expect.any(String), + expect.objectContaining({ promptId: 'p-1' }), + ); + expect((service as any).emitActivityEvent).toHaveBeenCalledWith( + record.goalRunId, + 'STEP_NEEDS_HELP', + expect.any(String), + expect.objectContaining({ checklistItemId: record.checklistItemId }), + ); + expect(outboxService.enqueueOnce).toHaveBeenCalledTimes(1); + expect(eventEmitter.emit).toHaveBeenCalledWith('goal-run.phase-changed', expect.anything()); + }); + + it('creates a step prompt without ChecklistItem FK for TEMPORAL_WORKFLOW runs', async () => { + const prisma = { + checklistItem: { + updateMany: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn( + (goalRunId: string, stepId: string, kind: string) => `prompt:${goalRunId}:${stepId}:${kind}`, + ), + ensureOpenPromptForStep: jest.fn(), + ensureOpenPromptForStepKey: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid real HTTP calls + (service as any).taskControllerClient = { delete: jest.fn() }; + // Avoid deep DB activity plumbing; focus on idempotency + Temporal FK safety + (service as any).emitActivityEvent = jest.fn(); + + prisma.goalRun.findUnique.mockResolvedValue({ + phase: GoalRunPhase.EXECUTING, + tenantId: 't-1', + executionEngine: GoalRunExecutionEngine.TEMPORAL_WORKFLOW, + }); + prisma.userPrompt.findUnique.mockResolvedValue(null); + prisma.goalRun.updateMany.mockResolvedValueOnce({ count: 1 }).mockResolvedValueOnce({ count: 0 }); + + userPromptService.ensureOpenPromptForStepKey.mockResolvedValue({ + id: 'p-1', + kind: UserPromptKind.TEXT_CLARIFICATION, + dedupeKey: 'prompt:gr-1:step-1:TEXT_CLARIFICATION', + }); + + const record: any = { + idempotencyKey: 'gr-1:gr-1-step-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'gr-1-step-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Need clarification', + result: { errorCode: 'DISPATCHED_USER_PROMPT_STEP', message: 'Which account should I use?' }, + error: null, + requiresDesktop: false, + workspaceId: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + await (service as any).handleTaskNeedsHelp(record, task); + + expect(record.status).toBe('WAITING_USER'); + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + expect(userPromptService.ensureOpenPromptForStepKey).toHaveBeenCalledWith( + expect.objectContaining({ goalRunId: 'gr-1', stepKey: 'step-1', kind: UserPromptKind.TEXT_CLARIFICATION }), + ); + expect(prisma.checklistItem.updateMany).not.toHaveBeenCalled(); + expect(outboxService.enqueueOnce).toHaveBeenCalledWith( + expect.objectContaining({ + payload: expect.objectContaining({ + checklistItemId: null, + stepKey: 'step-1', + }), + }), + ); + }); + + it('is restart-safe: if an OPEN prompt already exists, it does not re-emit NEEDS_HELP side effects', async () => { + const prisma = { + checklistItem: { + updateMany: jest.fn(), + findUnique: jest.fn(), + }, + userPrompt: { + findUnique: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn( + (goalRunId: string, stepId: string, kind: string) => `prompt:${goalRunId}:${stepId}:${kind}`, + ), + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + (service as any).taskControllerClient = { delete: jest.fn(), post: jest.fn() }; + (service as any).emitActivityEvent = jest.fn(); + + prisma.goalRun.findUnique + .mockResolvedValueOnce({ tenantId: 't-1', executionEngine: GoalRunExecutionEngine.LEGACY_DB_LOOP }) + .mockResolvedValueOnce({ phase: GoalRunPhase.WAITING_USER_INPUT }); + prisma.goalRun.updateMany.mockResolvedValue({ count: 0 }); + + prisma.userPrompt.findUnique.mockResolvedValue({ + id: 'p-1', + status: 'OPEN', + kind: UserPromptKind.TEXT_CLARIFICATION, + dedupeKey: 'prompt:gr-1:ci-1:TEXT_CLARIFICATION', + }); + + prisma.checklistItem.updateMany.mockResolvedValue({ count: 0 }); + prisma.checklistItem.findUnique.mockResolvedValue({ + status: 'BLOCKED', + blockedByPromptId: 'p-1', + }); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Need clarification', + result: { errorCode: 'DISPATCHED_USER_PROMPT_STEP', message: 'Which account should I use?' }, + error: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + + expect(record.status).toBe('WAITING_USER'); + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + expect(outboxService.enqueueOnce).toHaveBeenCalledTimes(1); + expect((service as any).emitActivityEvent).not.toHaveBeenCalled(); + expect((service as any).taskControllerClient.delete).not.toHaveBeenCalled(); + expect((service as any).taskControllerClient.post).not.toHaveBeenCalled(); + }); + + it('does not create a user prompt for internal desktop safety interrupts', async () => { + const prisma = { + checklistItem: { + update: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid deep DB activity plumbing; focus on classification behavior + (service as any).emitActivityEvent = jest.fn(); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Desktop loop detected', + requiresDesktop: true, + result: { + errorCode: 'LOOP_DETECTED_NO_PROGRESS', + message: 'Desktop automation appears stuck.', + details: { repeatThreshold: 5 }, + }, + error: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + + expect(record.status).toBe('FAILED'); + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + expect(outboxService.enqueueOnce).not.toHaveBeenCalled(); + expect((service as any).emitActivityEvent).toHaveBeenCalledWith( + record.goalRunId, + 'ERROR', + expect.any(String), + expect.objectContaining({ checklistItemId: record.checklistItemId }), + ); + expect(prisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: record.checklistItemId }, + data: expect.objectContaining({ status: 'FAILED' }), + }), + ); + }); + + it('does not create a user prompt for AGENT_REQUESTED_HELP (strategy-only)', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + update: jest.fn(), + }, + checklistItem: { + findUnique: jest.fn(), + update: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn( + (goalRunId: string, stepId: string, kind: string) => `prompt:${goalRunId}:${stepId}:${kind}`, + ), + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + (service as any).handleTaskFailed = jest.fn(); + (service as any).markAsInfrastructureFailure = jest.fn(); + (service as any).emitActivityEvent = jest.fn(); + + prisma.goalRun.findUnique.mockResolvedValue({ + goal: 'Search for flights and find the cheapest option', + constraints: {}, + }); + prisma.goalRun.update.mockResolvedValue({}); + prisma.checklistItem.findUnique.mockResolvedValue({ + description: 'Search for flights to Paris next week', + }); + prisma.checklistItem.update.mockResolvedValue({}); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Clarification requested', + requiresDesktop: false, + result: { + errorCode: 'AGENT_REQUESTED_HELP', + message: 'Which site should I use?', + }, + error: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + + expect((service as any).handleTaskFailed).not.toHaveBeenCalled(); + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + expect((service as any).markAsInfrastructureFailure).toHaveBeenCalled(); + expect(prisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: record.checklistItemId }, + data: expect.objectContaining({ + description: expect.stringContaining('[ByteBot Policy] Strategy default'), + }), + }), + ); + expect(prisma.goalRun.update).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: record.goalRunId }, + data: expect.objectContaining({ + constraints: expect.objectContaining({ + strategyDefaults: expect.objectContaining({ + flightSite: expect.objectContaining({ + name: 'Google Flights', + }), + }), + }), + }), + }), + ); + }); + + it('falls back to semantic failure if strategy default already injected (avoids infinite retry)', async () => { + const prisma = { + goalRun: { + findUnique: jest.fn(), + update: jest.fn(), + }, + checklistItem: { + findUnique: jest.fn(), + update: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn(), + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + (service as any).handleTaskFailed = jest.fn(); + (service as any).markAsInfrastructureFailure = jest.fn(); + (service as any).emitActivityEvent = jest.fn(); + + prisma.goalRun.findUnique.mockResolvedValue({ + goal: 'Search for flights and find the cheapest option', + constraints: {}, + }); + prisma.checklistItem.findUnique.mockResolvedValue({ + description: + 'Search for flights to Paris.\n\n[ByteBot Policy] Strategy default: Use Google Flights (https://www.google.com/travel/flights).', + }); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Clarification requested', + requiresDesktop: false, + result: { + errorCode: 'AGENT_REQUESTED_HELP', + message: 'Which site should I use?', + }, + error: null, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + + expect((service as any).markAsInfrastructureFailure).not.toHaveBeenCalled(); + expect((service as any).handleTaskFailed).toHaveBeenCalled(); + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + }); + + it('auto-upgrades DESKTOP_NOT_ALLOWED to DESKTOP and schedules a retry', async () => { + const prisma = { + checklistItem: { + updateMany: jest.fn(), + update: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + } as any; + + const dbTransientService = { + isInBackoff: jest.fn(() => false), + getBackoffRemainingMs: jest.fn(() => 0), + withTransientGuard: jest.fn(async (fn: any) => fn()), + } as any; + + const configService = { + get: jest.fn((_key: string, fallback: string) => fallback), + } as any; + + const eventEmitter = { + emit: jest.fn(), + } as any; + + const userPromptService = { + buildDedupeKey: jest.fn(), + ensureOpenPromptForStep: jest.fn(), + } as any; + + const outboxService = { + enqueueOnce: jest.fn(), + } as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid real HTTP calls and activity plumbing + (service as any).taskControllerClient = { delete: jest.fn(), post: jest.fn() }; + (service as any).emitActivityEvent = jest.fn(); + + prisma.checklistItem.updateMany.mockResolvedValueOnce({ count: 1 }); + prisma.checklistItem.update.mockResolvedValueOnce({}); + prisma.goalRun.findUnique.mockResolvedValue({ tenantId: 't-1', executionEngine: 'LEGACY_DB_LOOP' }); + + const record: any = { + idempotencyKey: 'gr-1:ci-1:1', + taskId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + status: 'RUNNING', + createdAt: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + + const task: any = { + id: 't-1', + status: 'NEEDS_HELP', + title: 'Desktop tools requested', + result: { errorCode: 'DESKTOP_NOT_ALLOWED', message: 'Misrouted as TEXT_ONLY' }, + error: null, + requiresDesktop: false, + }; + + await (service as any).handleTaskNeedsHelp(record, task); + + expect(prisma.checklistItem.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: expect.objectContaining({ id: 'ci-1', requiresDesktop: false }), + }), + ); + + // markAsInfrastructureFailure uses updateChecklistItemStatus which updates the item to FAILED with [INFRA] marker. + expect(prisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: 'ci-1' }, + data: expect.objectContaining({ + status: 'FAILED', + actualOutcome: expect.stringContaining('[INFRA]'), + }), + }), + ); + + expect(record.status).toBe('INFRA_FAILED'); + expect(outboxService.enqueueOnce).not.toHaveBeenCalled(); + expect(userPromptService.ensureOpenPromptForStep).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.service.ts b/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.service.ts new file mode 100644 index 000000000..b0ed33837 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.service.ts @@ -0,0 +1,2230 @@ +/** + * Task Dispatch Service + * v1.1.0: DB transient error resilience for polling + * v1.0.0: Poll-based task execution adapter + * + * Purpose: Bridge between orchestrator's checklist/workflow model and + * bytebot-agent's poll-based task system. + * + * Architecture Decision: Poll-based (not Push-based) + * - Creates tasks via bytebot-agent's API (POST /tasks) + * - Agent polls and claims tasks using claimNextTask() + * - Orchestrator polls for task completion + * - Updates checklist items based on results + * + * Key Features: + * - Idempotent task creation using goalRunId:checklistItemId:attempt key + * - Task completion detection via polling + * - Graceful error handling and retry support + * - Activity event emission for UI updates + * - v1.1.0: DB transient error handling with backoff + * + * @see /docs/ORCHESTRATOR_FIXES_DEC_2025.md + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2, OnEvent } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { DbTransientService } from './db-transient.service'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import axios, { AxiosInstance } from 'axios'; +import { + ChecklistItemStatus, + ExecutionSurface, + GoalRunExecutionEngine, + GoalRunPhase, + GoalRunWaitReason, + UserPromptKind, + UserPromptStatus, +} from '@prisma/client'; +import { UserPromptService } from './user-prompt.service'; +import { OutboxService } from './outbox.service'; +import { inferGoalFeasibility } from '../contracts/goal-feasibility'; +import { hasDesktopExecutionTool } from '../contracts/planner-tools'; + +// Phase 12: Task-based model routing configuration +// Phase 14.1: Updated browser model from fara-7b to gpt-oss-120b (2026-01-07) +// - fara-7b was scaled down (0/0 replicas) causing LLM connection errors +// - gpt-oss-120b is running (1/1 replicas) with 131K context window +// - 120B parameter model provides superior reasoning for browser automation +// +// Routes tasks to appropriate in-house/external models based on execution surface: +// - Browser tasks (requiresDesktop: false) → gpt-oss-120b (high-capability reasoning) +// - Desktop tasks (requiresDesktop: true) → claude-sonnet-4-5 (vision-capable model) +// - Fallback → claude-sonnet-4-5 (external API for complex reasoning) +// +// Model configuration for bytebot-agent's task schema +const BROWSER_TASK_MODEL = { + name: 'openai/gpt-oss-120b', + title: 'gpt-oss-120b', + provider: 'proxy', + contextWindow: 131072, +}; + +const DESKTOP_TASK_MODEL = { + // Phase 15: Switch desktop execution to Sonnet 4.5 for reliability. + // Note: Proxy layer treats claude-sonnet-4-5 as a desktop-vision model for endpoint ordering + no-cache. + name: 'claude-sonnet-4-5', + title: 'claude-sonnet-4-5', + provider: 'proxy', + contextWindow: 128000, +}; + +// Fallback model for tasks that don't specify execution surface +// or when in-house models fail +const FALLBACK_TASK_MODEL = { + name: 'claude-sonnet-4-5', + title: 'claude-sonnet-4-5', + provider: 'proxy', + contextWindow: 128000, +}; + +// Legacy constant for backward compatibility +const DEFAULT_TASK_MODEL = FALLBACK_TASK_MODEL; + +// Phase 2.1: Transient error handling configuration +// Only treat status check failures as problematic after exceeding threshold +const STATUS_CHECK_FAILURE_THRESHOLD = 6; // After 6 failures (~30 seconds at 5s interval) +const STATUS_CHECK_FAILURE_WINDOW_MS = 120000; // 2 minutes of consecutive failures = stale + +// v1.1.0: 404 tolerance configuration +// When a task is not found (404), it may have completed and been GC'd +// Enter grace window to allow fallback lookups before declaring failure +const NOTFOUND_GRACE_DEFAULT_MS = 60000; // 60 seconds to match TTL + +// v1.2.0 Phase C: Heartbeat-based timeout configuration +// Agent sends heartbeat every 15 seconds. Orchestrator polls every 5 seconds. +// v5.8.0: Option C Industry Standard - Increased tolerance for browser automation +// Browser automation can block for 30-60+ seconds during: +// - Heavy page loads with large JS bundles +// - Complex form submissions and file uploads +// - PDF rendering and screenshot captures +// Best practice (per industry research): 90-120 second tolerance +// At 5s polling: 18 checks = 90 second tolerance before timeout +// Combined with new heartbeat retry budget (5 retries via FailureClassificationService), +// total tolerance before REPLAN is: 90s × 5 retries = 450 seconds (~7.5 minutes) +const HEARTBEAT_UNHEALTHY_THRESHOLD = 18; // Consecutive unhealthy checks before timeout (was 9) + +// Failure type classification for replan semantics +type FailureType = 'SEMANTIC' | 'INFRASTRUCTURE' | 'UNKNOWN'; + +// v2.4.1: Context summarization configuration +// Prevents excessively long context from overwhelming the agent +const CONTEXT_SUMMARIZATION_CONFIG = { + maxPreviousStepsDetailed: 5, // Show full detail for last N steps + maxContextChars: 4000, // Max chars for previous step results + summaryFormat: 'numbered', // 'numbered' or 'bullet' +}; + +// Task status in bytebot-agent system +type AgentTaskStatus = 'PENDING' | 'RUNNING' | 'COMPLETED' | 'FAILED' | 'CANCELLED' | 'NEEDS_HELP' | 'NEEDS_REVIEW'; + +// Task dispatch request +interface TaskDispatchRequest { + goalRunId: string; + checklistItemId: string; + planVersionId?: string; + workspaceId?: string; + // Task details + title: string; + description: string; + expectedOutcome?: string; + // Execution config + allowedTools?: string[]; + highRiskTools?: string[]; + requiresDesktop?: boolean; + executionSurface?: ExecutionSurface; + // Retry tracking + attempt?: number; + // v2.4.0: Context propagation for autonomous operation + // Helps agent understand broader context and proceed without asking for clarification + goalContext?: string; + previousStepResults?: string; +} + +// Task created in agent system +interface AgentTask { + id: string; + description: string; + title?: string; + status: AgentTaskStatus; + workspaceId?: string; + nodeRunId?: string; + requiresDesktop?: boolean; + control?: 'USER' | 'ASSISTANT'; + result?: any; + error?: string; + createdAt: string; + completedAt?: string; +} + +type NeedsHelpResult = { + errorCode?: string; + message?: string; + details?: Record; +}; + +const CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP = 'CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP'; +const CONTRACT_VIOLATION_STRATEGY_AS_HELP = 'CONTRACT_VIOLATION_STRATEGY_AS_HELP'; + +const STRATEGY_DEFAULT_MARKER = '[ByteBot Policy] Strategy default'; + +type StrategyDefaultDecision = { + key: string; + name: string; + url: string; +}; + +// Task dispatch result +interface TaskDispatchResult { + success: boolean; + taskId?: string; + error?: string; +} + +// v1.2.0 Phase C: Heartbeat health response from task controller +interface HeartbeatHealthResponse { + taskId: string; + phase: string; + agentHeartbeat: string | null; + heartbeatMissedCount: number; + isHeartbeatHealthy: boolean; + lastActivityAt: string | null; + timeSinceHeartbeat: number | null; +} + +// Dispatch record for tracking +interface DispatchRecord { + idempotencyKey: string; + taskId: string; + goalRunId: string; + checklistItemId: string; + status: 'DISPATCHED' | 'RUNNING' | 'WAITING_USER' | 'COMPLETED' | 'FAILED' | 'INFRA_FAILED'; + createdAt: Date; + completedAt?: Date; + // Phase 2.1: Track status check health for transient error handling + lastSuccessfulCheck?: Date; + consecutiveCheckFailures: number; + // v1.1.0: 404 tolerance tracking + notFoundGraceStartedAt?: Date; // When 404 grace window started + notFoundCount: number; // Number of 404s seen during grace window + // v1.1.0: Failure type classification + failureType?: FailureType; // SEMANTIC = replan, INFRASTRUCTURE = retry + // v1.2.0 Phase C: Heartbeat health tracking + lastHeartbeatCheck?: Date; // When we last checked heartbeat health + lastHeartbeatTime?: Date; // Agent's last heartbeat timestamp + isHeartbeatHealthy: boolean; // Current heartbeat health status + consecutiveHeartbeatUnhealthy: number; // Consecutive unhealthy checks +} + +@Injectable() +export class TaskDispatchService implements OnModuleInit { + private readonly logger = new Logger(TaskDispatchService.name); + private readonly httpClient: AxiosInstance; + private readonly agentApiUrl: string; + private readonly pollIntervalMs: number; + private readonly dispatchEnabled: boolean; + // v1.1.0: 404 tolerance grace window configuration + private readonly notFoundGraceMs: number; + // v1.2.0 Phase C: Task controller client for heartbeat health + private readonly taskControllerClient: AxiosInstance; + private readonly taskControllerUrl: string; + // v1.2.1: Configurable heartbeat unhealthy threshold for browser automation tolerance + private readonly heartbeatUnhealthyThreshold: number; + + // In-memory dispatch tracking (for MVP, will move to DB) + private dispatchRecords: Map = new Map(); + + constructor( + private readonly configService: ConfigService, + private readonly prisma: PrismaService, + private readonly dbTransientService: DbTransientService, + private readonly eventEmitter: EventEmitter2, + private readonly userPromptService: UserPromptService, + private readonly outboxService: OutboxService, + ) { + // Configure agent API connection (bytebot-agent service runs on port 9991) + this.agentApiUrl = this.configService.get('BYTEBOT_AGENT_API_URL', 'http://bytebot-agent:9991'); + this.pollIntervalMs = parseInt( + this.configService.get('TASK_POLL_INTERVAL_MS', '5000'), + 10, + ); + this.dispatchEnabled = this.configService.get('TASK_DISPATCH_ENABLED', 'true') === 'true'; + // v1.1.0: 404 tolerance grace window (matches task-controller TTL) + this.notFoundGraceMs = parseInt( + this.configService.get('TASK_STATUS_NOTFOUND_GRACE_MS', String(NOTFOUND_GRACE_DEFAULT_MS)), + 10, + ); + + // Create HTTP client with timeout + this.httpClient = axios.create({ + baseURL: this.agentApiUrl, + timeout: 30000, + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.configService.get('INTERNAL_SERVICE_TOKEN', ''), + }, + }); + + // v1.2.0 Phase C: Task controller client for heartbeat health checks + this.taskControllerUrl = this.configService.get( + 'TASK_CONTROLLER_URL', + 'http://bytebot-task-controller:3000', + ); + this.taskControllerClient = axios.create({ + baseURL: this.taskControllerUrl, + timeout: 5000, // Short timeout for health checks + headers: { + 'Content-Type': 'application/json', + }, + }); + + // v1.2.1: Configurable heartbeat unhealthy threshold + // Default 9 = 45 seconds at 5s polling (browser automation tolerance) + // Old default was 3 = 15 seconds (too aggressive for browser automation) + this.heartbeatUnhealthyThreshold = parseInt( + this.configService.get('HEARTBEAT_UNHEALTHY_THRESHOLD', String(HEARTBEAT_UNHEALTHY_THRESHOLD)), + 10, + ); + this.logger.log(`Heartbeat unhealthy threshold: ${this.heartbeatUnhealthyThreshold} consecutive checks`); + } + + async onModuleInit(): Promise { + this.logger.log( + `Task Dispatch Service initialized (enabled: ${this.dispatchEnabled}, ` + + `agent: ${this.agentApiUrl}, taskController: ${this.taskControllerUrl})`, + ); + + if (this.dispatchEnabled) { + // v5.11.4: Defer heavy initialization to allow HTTP server to start first + // This is critical for Kubernetes probe success - the HTTP server MUST be + // listening before probes run, otherwise pods will be killed. + // Recovery of 67+ in-progress items can take minutes due to sequential API calls. + const STARTUP_DELAY_MS = 3000; + this.logger.log(`Deferring in-flight dispatch recovery for ${STARTUP_DELAY_MS}ms to allow HTTP server startup`); + + setTimeout(() => { + this.logger.log('HTTP server should be ready, recovering in-flight dispatches'); + this.recoverInFlightDispatches() + .then(() => { + this.logger.log('In-flight dispatch recovery completed'); + }) + .catch((error) => { + this.logger.error(`Failed to recover in-flight dispatches: ${error.message}`, error.stack); + }); + }, STARTUP_DELAY_MS); + } + } + + /** + * Generate idempotency key for task dispatch + * Format: goalRunId:checklistItemId:attempt + */ + private generateIdempotencyKey(goalRunId: string, checklistItemId: string, attempt: number = 1): string { + return `${goalRunId}:${checklistItemId}:${attempt}`; + } + + /** + * Phase 12: Select model based on task execution surface + * + * Routes tasks to appropriate in-house models: + * - Desktop tasks (requiresDesktop: true) → desktop-vision (vision model group with fallbacks) + * - Browser tasks (requiresDesktop: false) → gpt-oss-120b (optimized for web) + * - Fallback when surface unspecified → claude-sonnet-4-5 + */ + private selectModelForTask(requiresDesktop?: boolean): typeof BROWSER_TASK_MODEL { + if (requiresDesktop === true) { + this.logger.debug('Selected DESKTOP_TASK_MODEL (claude-sonnet-4-5) for desktop task'); + return DESKTOP_TASK_MODEL; + } + + if (requiresDesktop === false) { + this.logger.debug('Selected BROWSER_TASK_MODEL (gpt-oss-120b) for browser task'); + return BROWSER_TASK_MODEL; + } + + // Fallback for unspecified execution surface + this.logger.debug('Selected FALLBACK_TASK_MODEL (claude-sonnet-4-5) - execution surface not specified'); + return FALLBACK_TASK_MODEL; + } + + /** + * Dispatch a task to the agent system + * Creates a task in bytebot-agent's database via API + */ + async dispatchTask(request: TaskDispatchRequest): Promise { + if (!this.dispatchEnabled) { + this.logger.warn('Task dispatch disabled, skipping'); + return { success: false, error: 'Task dispatch disabled' }; + } + + // Feasibility gate (defense-in-depth): certain goals (e.g., travel-shopping) must not dispatch as TEXT_ONLY. + // This prevents runs from stalling due to planner/tool drift (missing browser tools). + // Prefer the full goal context when present, but fall back to the step description/title so + // travel-shopping goals cannot silently misroute to TEXT_ONLY when goalContext propagation is missing. + const feasibilityText = request.goalContext ?? request.description ?? request.title; + const goalFeasibility = inferGoalFeasibility(feasibilityText); + const mustUseDesktopSurface = goalFeasibility?.requiredSurface === ExecutionSurface.DESKTOP; + const requestedSurface = + request.executionSurface ?? + ((request.requiresDesktop ?? false) ? ExecutionSurface.DESKTOP : ExecutionSurface.TEXT_ONLY); + + const effectiveAllowedTools = Array.isArray(request.allowedTools) ? [...request.allowedTools] : []; + + let effectiveRequiresDesktop = request.requiresDesktop ?? false; + let effectiveExecutionSurface = requestedSurface; + + if (mustUseDesktopSurface) { + const needsBrowserHint = !hasDesktopExecutionTool(effectiveAllowedTools); + if (needsBrowserHint) { + effectiveAllowedTools.push('browser'); + } + + if (!effectiveRequiresDesktop || effectiveExecutionSurface === ExecutionSurface.TEXT_ONLY) { + this.logger.warn({ + message: 'Feasibility auto-upgrade: goal requires DESKTOP (travel-shopping)', + goalRunId: request.goalRunId, + checklistItemId: request.checklistItemId, + previousRequiresDesktop: effectiveRequiresDesktop, + previousExecutionSurface: effectiveExecutionSurface, + reason: goalFeasibility?.reason, + }); + } + + effectiveRequiresDesktop = true; + effectiveExecutionSurface = ExecutionSurface.DESKTOP; + } + + const attempt = request.attempt || 1; + const idempotencyKey = this.generateIdempotencyKey( + request.goalRunId, + request.checklistItemId, + attempt, + ); + + // Check for existing dispatch (idempotency) + const existing = this.dispatchRecords.get(idempotencyKey); + if (existing) { + this.logger.debug(`Task already dispatched for ${idempotencyKey}, returning existing taskId: ${existing.taskId}`); + return { success: true, taskId: existing.taskId }; + } + + // Phase 12: Select model based on execution surface + const selectedModel = this.selectModelForTask(effectiveRequiresDesktop); + + this.logger.log( + `Dispatching task for checklist item ${request.checklistItemId} ` + + `(attempt ${attempt}, model: ${selectedModel.title}, requiresDesktop: ${effectiveRequiresDesktop}, surface: ${effectiveExecutionSurface})`, + ); + + try { + // Build task payload for bytebot-agent + // v1.0.1: Added required 'model' field for agent task schema + // v1.0.2: Phase 4 - Forward requiresDesktop for execution surface constraints + // v2.4.0: Added goalContext and previousStepResults for autonomous operation + // Phase 12: Task-based model routing + const taskPayload = { + description: request.description, + title: request.title, + type: 'IMMEDIATE', + priority: 'HIGH', + // Phase 12: Model selected based on requiresDesktop + model: selectedModel, + // Workflow integration fields + workspaceId: request.workspaceId, + // Map checklistItemId to nodeRunId for agent tracking + nodeRunId: request.checklistItemId, + // Tool configuration + allowedTools: effectiveAllowedTools, + highRiskTools: request.highRiskTools || [], + gatewayToolsOnly: false, + // Phase 4: Execution surface constraints - forward desktop requirement + requiresDesktop: effectiveRequiresDesktop, + // PR5: ExecutionSurface propagation (end-to-end) + executionSurface: effectiveExecutionSurface, + // v2.4.0: Context propagation for autonomous operation + // Provides goal context and previous step results to help agent proceed without asking + goalContext: request.goalContext, + previousStepResults: request.previousStepResults, + }; + + // Create task via agent API + const response = await this.httpClient.post('/tasks', taskPayload); + + if (response.status === 201 || response.status === 200) { + const task: AgentTask = response.data; + + // Record the dispatch + const record: DispatchRecord = { + idempotencyKey, + taskId: task.id, + goalRunId: request.goalRunId, + checklistItemId: request.checklistItemId, + status: 'DISPATCHED', + createdAt: new Date(), + // Phase 2.1: Initialize status check tracking + lastSuccessfulCheck: new Date(), + consecutiveCheckFailures: 0, + // v1.1.0: Initialize 404 tolerance tracking + notFoundCount: 0, + // v1.2.0 Phase C: Initialize heartbeat health tracking + isHeartbeatHealthy: true, // Assume healthy until first check + consecutiveHeartbeatUnhealthy: 0, + }; + this.dispatchRecords.set(idempotencyKey, record); + + // Emit activity event + await this.emitActivityEvent(request.goalRunId, 'STEP_DISPATCHED', request.title, { + checklistItemId: request.checklistItemId, + taskId: task.id, + attempt, + }); + + this.logger.log(`Task dispatched successfully: ${task.id} for item ${request.checklistItemId}`); + return { success: true, taskId: task.id }; + } + + return { success: false, error: `Unexpected response status: ${response.status}` }; + } catch (error: any) { + const errorMsg = error.response?.data?.message || error.message || 'Unknown error'; + this.logger.error(`Failed to dispatch task for ${request.checklistItemId}: ${errorMsg}`); + + // Emit error activity + await this.emitActivityEvent(request.goalRunId, 'ERROR', `Failed to dispatch step: ${request.title}`, { + checklistItemId: request.checklistItemId, + error: errorMsg, + attempt, + }); + + return { success: false, error: errorMsg }; + } + } + + /** + * Task status result with metadata for 404 handling + * v1.1.0: Extended to support 404 tolerance + */ + private async getTaskStatusWithMeta(taskId: string): Promise<{ + task: AgentTask | null; + notFound: boolean; + error?: string; + }> { + try { + const response = await this.httpClient.get(`/tasks/${taskId}`); + return { task: response.data, notFound: false }; + } catch (error: any) { + // Differentiate 404 from other errors + const status = error.response?.status; + if (status === 404 || status === 410) { + // 404 = Not Found, 410 = Gone (intentionally GC'd) + return { task: null, notFound: true }; + } + // Other errors (network, 500, etc.) + this.logger.error(`Failed to get task status for ${taskId}: ${error.message}`); + return { task: null, notFound: false, error: error.message }; + } + } + + /** + * Check task status in agent system + * @deprecated Use getTaskStatusWithMeta for 404 awareness + */ + async getTaskStatus(taskId: string): Promise { + const result = await this.getTaskStatusWithMeta(taskId); + return result.task; + } + + /** + * v1.1.0: Fallback lookup - query agent API for task by nodeRunId + * Used when agent API returns 404 (task GC'd before we could poll) + * + * Note: We can't access the Task table directly (it's in bytebot-agent DB). + * Instead, we try an alternative agent API endpoint if available. + */ + private async getTaskStatusFromDatabase(taskId: string): Promise<{ + status: AgentTaskStatus | null; + result?: any; + error?: string; + source: 'database'; + }> { + try { + // Try alternative agent endpoint that may still have task info + // The /tasks/by-id endpoint might have cached data or check database directly + const response = await this.httpClient.get(`/tasks/${taskId}/status`, { + timeout: 5000, // Short timeout for fallback + validateStatus: (status) => status === 200 || status === 404, + }); + + if (response.status === 200 && response.data) { + return { + status: response.data.status as AgentTaskStatus, + result: response.data.result, + error: response.data.error || undefined, + source: 'database', + }; + } + + return { status: null, source: 'database' }; + } catch (error: any) { + // Fallback lookup failed - this is expected, not an error + this.logger.debug(`Database fallback lookup failed for ${taskId}: ${error.message}`); + return { status: null, source: 'database' }; + } + } + + /** + * v1.1.0: Fallback lookup - check checklist item for completion + * Last resort when both agent API and database fail + */ + private async getTaskStatusFromChecklistItem(checklistItemId: string): Promise<{ + status: AgentTaskStatus | null; + result?: any; + error?: string; + source: 'checklist'; + }> { + try { + const item = await this.prisma.checklistItem.findUnique({ + where: { id: checklistItemId }, + select: { + status: true, + actualOutcome: true, + }, + }); + + if (item) { + // Map checklist status to agent task status + let mappedStatus: AgentTaskStatus | null = null; + if (item.status === 'COMPLETED') { + mappedStatus = 'COMPLETED'; + } else if (item.status === 'FAILED') { + mappedStatus = 'FAILED'; + } + + if (mappedStatus) { + return { + status: mappedStatus, + result: item.actualOutcome, + source: 'checklist', + }; + } + } + + return { status: null, source: 'checklist' }; + } catch (error: any) { + this.logger.debug(`Checklist fallback lookup failed for ${checklistItemId}: ${error.message}`); + return { status: null, source: 'checklist' }; + } + } + + /** + * Poll for task completion and update checklist items + * Runs every 5 seconds (configurable) + * + * v1.1.1: DB transient error handling with backoff + * v1.1.0: Implements 404 tolerance with grace window and fallback lookups + * + * When a 404 is received: + * 1. Enter grace window (don't fail immediately) + * 2. Attempt fallback lookups (database, checklist item) + * 3. Only after grace expires with no resolution, mark as INFRA_FAILED + * 4. INFRA_FAILED triggers step retry, NOT replan (saves replan attempts) + */ + @Cron(CronExpression.EVERY_5_SECONDS) + async pollTaskCompletions(): Promise { + if (!this.dispatchEnabled) return; + + // v1.1.1: Skip if in DB backoff + if (this.dbTransientService.isInBackoff()) { + this.logger.debug( + `Task polling skipped - DB backoff (${Math.round(this.dbTransientService.getBackoffRemainingMs() / 1000)}s remaining)`, + ); + return; + } + + const inFlightRecords = Array.from(this.dispatchRecords.values()) + .filter(r => r.status === 'DISPATCHED' || r.status === 'RUNNING' || r.status === 'WAITING_USER'); + + if (inFlightRecords.length === 0) return; + + this.logger.debug(`Polling ${inFlightRecords.length} in-flight tasks`); + + for (const record of inFlightRecords) { + try { + // v1.1.0: Use getTaskStatusWithMeta for 404 awareness + const result = await this.getTaskStatusWithMeta(record.taskId); + + // Handle 404/410 - task not found (may have been GC'd) + if (result.notFound) { + await this.handle404WithGraceWindow(record); + continue; + } + + // Handle other errors (network, 500, etc.) + if (!result.task && result.error) { + record.consecutiveCheckFailures++; + this.dispatchRecords.set(record.idempotencyKey, record); + + if (record.consecutiveCheckFailures <= STATUS_CHECK_FAILURE_THRESHOLD) { + this.logger.warn( + `Transient status check failure for task ${record.taskId} ` + + `(${record.consecutiveCheckFailures}/${STATUS_CHECK_FAILURE_THRESHOLD}): ${result.error}`, + ); + continue; + } + + // Extended failure window exceeded + const timeSinceLastSuccess = record.lastSuccessfulCheck + ? Date.now() - record.lastSuccessfulCheck.getTime() + : Date.now() - record.createdAt.getTime(); + + if (timeSinceLastSuccess > STATUS_CHECK_FAILURE_WINDOW_MS) { + this.logger.error( + `Task ${record.taskId} unreachable for ${Math.round(timeSinceLastSuccess / 1000)}s ` + + `(${record.consecutiveCheckFailures} consecutive failures)`, + ); + + // Mark as infrastructure failure (retry, don't replan) + await this.markAsInfrastructureFailure(record, 'Agent unreachable'); + } + continue; + } + + // Task found - process normally + const task = result.task!; + + // Clear 404 grace window tracking (task is reachable again) + if (record.notFoundGraceStartedAt) { + this.logger.log(`Task ${record.taskId} reappeared after 404, clearing grace window`); + record.notFoundGraceStartedAt = undefined; + record.notFoundCount = 0; + } + + // Status check succeeded - reset failure tracking + record.consecutiveCheckFailures = 0; + record.lastSuccessfulCheck = new Date(); + + // v1.2.0 Phase C: Update heartbeat health from task controller + // This runs in parallel with status polling for comprehensive health tracking + await this.updateHeartbeatHealth(record); + + // Update record status based on task status + if (task.status === 'RUNNING' && record.status === 'DISPATCHED') { + record.status = 'RUNNING'; + this.dispatchRecords.set(record.idempotencyKey, record); + + await this.emitActivityEvent(record.goalRunId, 'STEP_STARTED', task.title || 'Step started', { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + }); + } + + // Handle completion + if (task.status === 'COMPLETED') { + await this.handleTaskCompleted(record, task.result, task.title); + } + + // Handle semantic failure (task actually failed, not infra issue) + if (task.status === 'FAILED' || task.status === 'CANCELLED') { + const failureType = this.classifyFailureType(task); + await this.handleTaskFailed(record, task.error, task.title, failureType); + } + + // v2.4.0: Handle NEEDS_HELP status - task paused for user input + // Clean up TaskDesktop resource since task is no longer actively processing + if (task.status === 'NEEDS_HELP') { + // Stark Fix (Atom 3): once a dispatch record is WAITING_USER, do not re-emit/redo NEEDS_HELP handling. + if (record.status !== 'WAITING_USER') { + await this.handleTaskNeedsHelp(record, task); + } + } + } catch (error: any) { + this.logger.error(`Error polling task ${record.taskId}: ${error.message}`); + } + } + } + + /** + * v1.1.0: Handle 404 response with grace window and fallback lookups + * + * Critical design principle: 404 does NOT mean failure. + * The task may have completed and been GC'd before we could poll. + * We must check fallback sources before declaring failure. + */ + private async handle404WithGraceWindow(record: DispatchRecord): Promise { + const now = new Date(); + record.notFoundCount++; + + // Start grace window if not already started + if (!record.notFoundGraceStartedAt) { + record.notFoundGraceStartedAt = now; + this.logger.warn( + `Task ${record.taskId} not found (404), starting grace window (${this.notFoundGraceMs}ms)`, + ); + } + + // Check how long we've been in grace window + const graceElapsedMs = now.getTime() - record.notFoundGraceStartedAt.getTime(); + + // Attempt fallback lookups + this.logger.debug(`Task ${record.taskId} 404 - attempting fallback lookups (grace: ${Math.round(graceElapsedMs / 1000)}s)`); + + // Fallback 1: Check database for task status + const dbResult = await this.getTaskStatusFromDatabase(record.taskId); + if (dbResult.status) { + this.logger.log(`Task ${record.taskId} found in database via fallback: ${dbResult.status}`); + + if (dbResult.status === 'COMPLETED') { + await this.handleTaskCompleted(record, dbResult.result, undefined, 'database'); + return; + } + if (dbResult.status === 'FAILED' || dbResult.status === 'CANCELLED') { + await this.handleTaskFailed(record, dbResult.error, undefined, 'SEMANTIC'); + return; + } + // Task still running in DB but 404 from API - possible race, continue waiting + } + + // Fallback 2: Check checklist item status + const checklistResult = await this.getTaskStatusFromChecklistItem(record.checklistItemId); + if (checklistResult.status) { + this.logger.log(`Task ${record.taskId} found in checklist via fallback: ${checklistResult.status}`); + + if (checklistResult.status === 'COMPLETED') { + // Already marked complete by some other path + record.status = 'COMPLETED'; + record.completedAt = now; + this.dispatchRecords.set(record.idempotencyKey, record); + return; + } + if (checklistResult.status === 'FAILED') { + // Already marked failed by some other path + record.status = 'FAILED'; + record.completedAt = now; + this.dispatchRecords.set(record.idempotencyKey, record); + return; + } + } + + // No resolution found - check if grace window expired + if (graceElapsedMs >= this.notFoundGraceMs) { + this.logger.error( + `Task ${record.taskId} not found after ${Math.round(graceElapsedMs / 1000)}s grace window ` + + `(${record.notFoundCount} 404s), marking as infrastructure failure`, + ); + + // Mark as infrastructure failure - triggers retry, not replan + await this.markAsInfrastructureFailure( + record, + `Task not found after ${Math.round(graceElapsedMs / 1000)}s (404 x${record.notFoundCount})`, + ); + return; + } + + // Still within grace window - update record and wait + this.dispatchRecords.set(record.idempotencyKey, record); + this.logger.debug( + `Task ${record.taskId} grace window: ${Math.round((this.notFoundGraceMs - graceElapsedMs) / 1000)}s remaining`, + ); + } + + /** + * v1.1.0: Handle task completion (from any source) + */ + private async handleTaskCompleted( + record: DispatchRecord, + result?: any, + title?: string, + source: string = 'agent', + ): Promise { + record.status = 'COMPLETED'; + record.completedAt = new Date(); + record.failureType = undefined; + this.dispatchRecords.set(record.idempotencyKey, record); + + await this.updateChecklistItemStatus(record.checklistItemId, 'COMPLETED', result); + + await this.emitActivityEvent(record.goalRunId, 'STEP_COMPLETED', title || 'Step completed', { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + result, + source, + }); + + this.logger.log(`Task ${record.taskId} completed for item ${record.checklistItemId} (source: ${source})`); + } + + /** + * v1.1.0: Handle task failure with failure type classification + * + * Critical: failureType determines replan behavior: + * - SEMANTIC: Task actually failed (wrong output, verification failed) → REPLAN + * - INFRASTRUCTURE: Infra issue (404, timeout, network) → RETRY (don't consume replan) + */ + private async handleTaskFailed( + record: DispatchRecord, + error?: string, + title?: string, + failureType: FailureType = 'UNKNOWN', + ): Promise { + record.status = 'FAILED'; + record.completedAt = new Date(); + record.failureType = failureType; + this.dispatchRecords.set(record.idempotencyKey, record); + + // v1.1.0: Store failure type in actualOutcome for orchestrator to read + const errorWithType = failureType === 'INFRASTRUCTURE' + ? `[INFRA] ${error || 'Infrastructure failure'}` + : error || 'Task failed'; + + await this.updateChecklistItemStatus(record.checklistItemId, 'FAILED', undefined, errorWithType); + + await this.emitActivityEvent(record.goalRunId, 'ERROR', `Step failed: ${title || 'Unknown'}`, { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + error: errorWithType, + failureType, + }); + + this.logger.warn( + `Task ${record.taskId} failed for item ${record.checklistItemId}: ${errorWithType} (type: ${failureType})`, + ); + } + + /** + * v6.0.0: Classify agent task failure type (SEMANTIC vs INFRASTRUCTURE). + * + * Contract: + * - Connection errors, timeouts, provider overload, and gateway/model unavailability are INFRASTRUCTURE. + * - Tool/logic failures are SEMANTIC. + */ + private classifyFailureType(task: AgentTask): FailureType { + // Cancellations are usually intentional; treat as semantic unless explicit infra marker exists. + const error = String(task.error || ''); + const lower = error.toLowerCase(); + + // Explicit machine marker (preferred over pattern matching). + if (lower.includes('[infra]')) return 'INFRASTRUCTURE'; + + const result = (task.result || {}) as any; + const errorCategory = typeof result.errorCategory === 'string' ? result.errorCategory : ''; + const errorCode = typeof result.errorCode === 'string' ? result.errorCode : ''; + + if ( + ['INFRA', 'TRANSIENT_INFRA', 'PROVIDER_UNAVAILABLE', 'CAPACITY_EXHAUSTED'].includes( + String(errorCategory), + ) + ) { + return 'INFRASTRUCTURE'; + } + + const combined = `${lower} ${String(errorCategory).toLowerCase()} ${String(errorCode).toLowerCase()}`; + const infraPatterns = [ + // Network/timeouts + 'econnrefused', + 'etimedout', + 'enotfound', + 'econnreset', + 'socket hang up', + 'network error', + 'fetch failed', + 'connection refused', + 'timeout', + + // Provider/gateway unavailability + 'service unavailable', + 'bad gateway', + 'gateway', + 'litellm', + 'circuit breaker open', + 'overloaded', + 'rate limit', + 'no available', + 'capacity', + 'model group', + + // Common HTTP status signals + ' 502 ', + ' 503 ', + ' 504 ', + ]; + + if (infraPatterns.some((p) => combined.includes(p))) { + return 'INFRASTRUCTURE'; + } + + return 'SEMANTIC'; + } + + /** + * v2.4.0: Handle task transitioned to NEEDS_HELP status + * + * When a task asks for user help (e.g., Claude asks for clarification): + * 1. Notify task controller to clean up the TaskDesktop resource + * 2. The task is paused, not failed - user can provide input via UI + * 3. Emit activity event for visibility + * + * Note: This is distinct from failure - the task can continue after user input. + * The TaskDesktop cleanup prevents resource wastage during the wait. + */ + private async handleTaskNeedsHelp(record: DispatchRecord, task: AgentTask): Promise { + // Stark Fix (Atom 3): NEEDS_HELP must be idempotent (no 5s spam loop). + // Make NEEDS_HELP a durable state transition: + // - dispatch record → WAITING_USER (in-memory hotfix) + // - checklist item → BLOCKED (durable) + // - goal run phase → WAITING_USER_INPUT (durable) + // Side effects (activity, outbox) must happen once only, guarded by the durable transition. + if (record.status === 'WAITING_USER') return; + + const needsHelp = this.normalizeNeedsHelpResult(task); + + // Gold invariant: strategy must never block progress. + // If the agent attempted to ask a strategy question (deprecated), we auto-resolve to a safe default + // and retry the step without consuming replan budget. + if (needsHelp.errorCode === CONTRACT_VIOLATION_STRATEGY_AS_HELP) { + const handled = await this.autoResolveStrategyAndRetry(record, needsHelp); + if (handled) return; + } + + const classification = this.classifyNeedsHelpHandling(needsHelp.errorCode); + + // Feasibility gate (Atom 4): if a TEXT_ONLY task tries to use desktop tools, auto-upgrade the step to DESKTOP + // and rely on the existing infra-retry mechanism to re-dispatch without consuming replan budget. + if ( + needsHelp.errorCode === 'DESKTOP_NOT_ALLOWED' && + typeof record.checklistItemId === 'string' && + record.checklistItemId.startsWith('ci-') + ) { + const upgraded = await this.prisma.checklistItem.updateMany({ + where: { + id: record.checklistItemId, + requiresDesktop: false, + }, + data: { + requiresDesktop: true, + executionSurface: ExecutionSurface.DESKTOP, + }, + }); + + if (upgraded.count > 0) { + await this.emitActivityEvent( + record.goalRunId, + 'EXECUTION_SURFACE_UPGRADED', + 'Auto-upgraded execution surface to DESKTOP', + { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + from: ExecutionSurface.TEXT_ONLY, + to: ExecutionSurface.DESKTOP, + reasonCode: needsHelp.errorCode, + }, + ); + + await this.markAsInfrastructureFailure( + record, + 'Execution surface mismatch: step required desktop tools; upgraded to DESKTOP and will retry', + ); + return; + } + } + + // Gold-Standard policy: NEVER convert internal strategy/runtime failures into a user prompt. + // Prompts are reserved for true external input / human action cases only (typed by errorCode). + if (classification.handling !== 'USER_PROMPT') { + const message = this.formatNeedsHelpFailureMessage(needsHelp, needsHelp.errorCode); + + if (classification.handling === 'INFRA_FAILURE') { + await this.markAsInfrastructureFailure(record, message); + return; + } + + await this.handleTaskFailed(record, message, task.title, 'SEMANTIC'); + return; + } + + this.logger.log( + `Task ${record.taskId} needs user help for item ${record.checklistItemId} (code=${needsHelp.errorCode})`, + ); + + const promptKind = this.getPromptKindForNeedsHelpCode(needsHelp.errorCode, task); + + const appBaseUrl = this.configService.get('APP_BASE_URL', 'https://app.bytebot.ai'); + const desktopTakeoverLink = + promptKind === UserPromptKind.DESKTOP_TAKEOVER + ? `${appBaseUrl}/tasks/${record.taskId}` + : null; + + const goalRunTenant = await this.prisma.goalRun.findUnique({ + where: { id: record.goalRunId }, + select: { tenantId: true, executionEngine: true }, + }); + if (!goalRunTenant?.tenantId) { + throw new Error(`GoalRun ${record.goalRunId} not found (tenantId missing)`); + } + + const isTemporal = goalRunTenant.executionEngine === GoalRunExecutionEngine.TEMPORAL_WORKFLOW; + const stepKey = + isTemporal && record.checklistItemId.startsWith(`${record.goalRunId}-`) + ? record.checklistItemId.slice(record.goalRunId.length + 1) + : record.checklistItemId; + + // Durable idempotency (restart-safe): + // If an OPEN prompt already exists for this run+step+kind, treat NEEDS_HELP as already handled. + // This prevents repeated "needs help" logs/spam after process restarts (no in-memory dependency). + const promptDedupeKey = this.userPromptService.buildDedupeKey( + record.goalRunId, + stepKey, + promptKind, + ); + const existingPrompt = await this.prisma.userPrompt.findUnique({ + where: { dedupeKey: promptDedupeKey }, + select: { id: true, status: true, kind: true, dedupeKey: true }, + }); + + if (existingPrompt?.status === UserPromptStatus.OPEN) { + // Best-effort: ensure legacy checklist item is durably marked BLOCKED and linked to the prompt. + // This enables operators to derive "already handled" entirely from DB state. + if (!isTemporal) { + await this.transitionChecklistItemToBlockedWaitingUser(record, task.title, { + promptId: existingPrompt.id, + promptKind: existingPrompt.kind, + promptDedupeKey: existingPrompt.dedupeKey, + }); + + try { + const existingItem = await this.prisma.checklistItem.findUnique({ + where: { id: record.checklistItemId }, + select: { status: true, blockedByPromptId: true }, + }); + + if (existingItem?.status === ChecklistItemStatus.BLOCKED && !existingItem.blockedByPromptId) { + await this.prisma.checklistItem.updateMany({ + where: { id: record.checklistItemId, blockedByPromptId: null }, + data: { + blockedByPromptId: existingPrompt.id, + blockedReason: 'NEEDS_HELP', + blockedAt: new Date(), + }, + }); + } + } catch { + // ignore repair failures; we still stop spam via record.status + } + } + + await this.transitionGoalRunToWaitingUserInput(record.goalRunId); + + // Best-effort: ensure outbox notification exists (idempotent via dedupe key). + try { + await this.outboxService.enqueueOnce({ + dedupeKey: existingPrompt.dedupeKey, + aggregateId: record.goalRunId, + eventType: 'user_prompt.created', + payload: { + promptId: existingPrompt.id, + goalRunId: record.goalRunId, + tenantId: goalRunTenant.tenantId, + checklistItemId: isTemporal ? null : record.checklistItemId, + stepKey: isTemporal ? stepKey : null, + taskId: record.taskId, + kind: existingPrompt.kind, + stepDescription: task.title || null, + links: { + desktopTakeover: desktopTakeoverLink, + }, + }, + }); + } catch { + // ignore; outbox publisher/reconciler will retry later + } + + // In-memory hotfix: stop per-poll spam immediately. + record.status = 'WAITING_USER'; + this.dispatchRecords.set(record.idempotencyKey, record); + return; + } + + this.logger.log(`Task ${record.taskId} needs user help for item ${record.checklistItemId}`); + + const prompt = isTemporal + ? await this.userPromptService.ensureOpenPromptForStepKey({ + tenantId: goalRunTenant.tenantId, + goalRunId: record.goalRunId, + stepKey, + kind: promptKind, + payload: { + goalRunId: record.goalRunId, + stepKey, + taskId: record.taskId, + workspaceId: task.workspaceId ?? null, + requiresDesktop: task.requiresDesktop ?? false, + title: task.title || null, + result: task.result ?? null, + error: task.error ?? null, + reason: 'Task requires user input to continue', + links: { + desktopTakeover: desktopTakeoverLink, + }, + }, + }) + : await this.userPromptService.ensureOpenPromptForStep({ + tenantId: goalRunTenant.tenantId, + goalRunId: record.goalRunId, + checklistItemId: record.checklistItemId, + kind: promptKind, + payload: { + goalRunId: record.goalRunId, + checklistItemId: record.checklistItemId, + taskId: record.taskId, + workspaceId: task.workspaceId ?? null, + requiresDesktop: task.requiresDesktop ?? false, + title: task.title || null, + result: task.result ?? null, + error: task.error ?? null, + reason: 'Task requires user input to continue', + links: { + desktopTakeover: desktopTakeoverLink, + }, + }, + }); + + const stepTransitioned = isTemporal + ? false + : await this.transitionChecklistItemToBlockedWaitingUser(record, task.title, { + promptId: prompt.id, + promptKind: prompt.kind, + promptDedupeKey: prompt.dedupeKey, + }); + const phaseTransitioned = await this.transitionGoalRunToWaitingUserInput(record.goalRunId); + + await this.outboxService.enqueueOnce({ + dedupeKey: prompt.dedupeKey, + aggregateId: record.goalRunId, + eventType: 'user_prompt.created', + payload: { + promptId: prompt.id, + goalRunId: record.goalRunId, + tenantId: goalRunTenant.tenantId, + checklistItemId: isTemporal ? null : record.checklistItemId, + stepKey: isTemporal ? stepKey : null, + taskId: record.taskId, + kind: prompt.kind, + stepDescription: task.title || null, + links: { + desktopTakeover: desktopTakeoverLink, + }, + }, + }); + + // In-memory durable-ish transition (prevents repeated poll-tick logs/side-effects within this process). + // We only mark WAITING_USER after prompt/outbox succeeded so a transient failure can retry. + record.status = 'WAITING_USER'; + this.dispatchRecords.set(record.idempotencyKey, record); + + // Only emit "needs help" signals on the first durable transition. + if (!stepTransitioned && !phaseTransitioned) return; + + await this.emitActivityEvent( + record.goalRunId, + 'USER_PROMPT_CREATED', + `Waiting for user input: ${task.title || 'Step paused'}`, + { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + promptId: prompt.id, + promptKind: prompt.kind, + dedupeKey: prompt.dedupeKey, + }, + ); + + if (prompt.kind === UserPromptKind.TEXT_CLARIFICATION) { + // Policy: TEXT_CLARIFICATION -> cleanup OK. + // This releases the desktop pod back to the pool while waiting for user input. + try { + await this.taskControllerClient.delete(`/api/v1/tasks/${record.taskId}/desktop`); + this.logger.log(`TaskDesktop cleanup requested for task ${record.taskId}`); + } catch (error: any) { + this.logger.warn(`Failed to cleanup TaskDesktop for task ${record.taskId}: ${error.message}`); + } + } else { + // Policy: DESKTOP_TAKEOVER -> preserve desktop + extend keepalive TTL. + // Best effort: if the desktop is task-scoped, extend timeout while user takes over. + try { + await this.taskControllerClient.post(`/api/v1/tasks/${record.taskId}/extend`, { + additionalMinutes: 60, + }); + this.logger.log(`TaskDesktop timeout extended for task ${record.taskId} (desktop takeover)`); + } catch (error: any) { + this.logger.warn(`Failed to extend TaskDesktop timeout for task ${record.taskId}: ${error.message}`); + } + } + + await this.emitActivityEvent(record.goalRunId, 'STEP_NEEDS_HELP', `Waiting for input: ${task.title || 'Step paused'}`, { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + reason: 'Task requires user input to continue', + needsHelp: { + errorCode: needsHelp.errorCode, + message: needsHelp.message ?? null, + }, + }); + } + + private inferStrategyDefault(goal: string, question: string): StrategyDefaultDecision { + const haystack = `${goal || ''}\n${question || ''}`.toLowerCase(); + + if (haystack.includes('flight') || haystack.includes('airfare') || haystack.includes('airline')) { + return { + key: 'flightSite', + name: 'Google Flights', + url: 'https://www.google.com/travel/flights', + }; + } + + if ( + haystack.includes('hotel') || + haystack.includes('lodging') || + haystack.includes('accommodation') + ) { + return { + key: 'hotelSite', + name: 'Booking.com', + url: 'https://www.booking.com', + }; + } + + if (haystack.includes('car rental') || (haystack.includes('car') && haystack.includes('rental'))) { + return { + key: 'carRentalSite', + name: 'KAYAK', + url: 'https://www.kayak.com', + }; + } + + return { + key: 'searchSite', + name: 'Google', + url: 'https://www.google.com', + }; + } + + private async autoResolveStrategyAndRetry( + record: DispatchRecord, + needsHelp: NeedsHelpResult, + ): Promise { + const question = typeof needsHelp.message === 'string' ? needsHelp.message.trim() : ''; + + const [goalRun, item] = await Promise.all([ + this.prisma.goalRun.findUnique({ + where: { id: record.goalRunId }, + select: { goal: true, constraints: true }, + }), + this.prisma.checklistItem.findUnique({ + where: { id: record.checklistItemId }, + select: { description: true }, + }), + ]); + + const currentDescription = item?.description ?? ''; + if (currentDescription.includes(STRATEGY_DEFAULT_MARKER)) { + // Avoid an infinite retry loop if the agent keeps trying to ask a strategy question. + return false; + } + + const goal = goalRun?.goal ?? ''; + const decision = this.inferStrategyDefault(goal, question); + + const nextDescription = `${currentDescription}\n\n${STRATEGY_DEFAULT_MARKER}: Use ${decision.name} (${decision.url}) as the default for this step. Do not ask the user to choose a site.`; + + await this.prisma.checklistItem.update({ + where: { id: record.checklistItemId }, + data: { description: nextDescription }, + }); + + if (goalRun) { + const constraints = + goalRun.constraints && typeof goalRun.constraints === 'object' + ? { ...(goalRun.constraints as any) } + : {}; + const strategyDefaults = + constraints.strategyDefaults && typeof constraints.strategyDefaults === 'object' + ? { ...(constraints.strategyDefaults as any) } + : {}; + + strategyDefaults[decision.key] = { name: decision.name, url: decision.url }; + constraints.strategyDefaults = strategyDefaults; + + await this.prisma.goalRun.update({ + where: { id: record.goalRunId }, + data: { constraints }, + }); + } + + await this.emitActivityEvent( + record.goalRunId, + 'STRATEGY_AUTO_RESOLVED', + `Strategy auto-resolved: ${decision.name}`, + { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + errorCode: needsHelp.errorCode, + decision, + question: question || null, + }, + ); + + await this.markAsInfrastructureFailure( + record, + `Strategy question auto-resolved (${decision.name}); retrying step`, + ); + + return true; + } + + private normalizeNeedsHelpResult(task: AgentTask): Required> & NeedsHelpResult { + const raw = task.result; + const base: NeedsHelpResult = + raw && typeof raw === 'object' + ? (raw as NeedsHelpResult) + : {}; + + const rawErrorCode = typeof base.errorCode === 'string' ? base.errorCode.trim() : ''; + const rawMessage = typeof base.message === 'string' ? base.message.trim() : ''; + + // Contract: errorCode is required. If missing, treat as contract violation and route to INTERNAL_REPAIR. + let errorCode = rawErrorCode || CONTRACT_VIOLATION_UNTYPED_NEEDS_HELP; + + // Contract: AGENT_REQUESTED_HELP must never create WAITING_USER_INPUT; treat as strategy-as-help violation. + if (errorCode === 'AGENT_REQUESTED_HELP') { + errorCode = CONTRACT_VIOLATION_STRATEGY_AS_HELP; + } + + const message = + rawMessage || + (typeof (raw as any)?.description === 'string' ? String((raw as any).description).trim() : '') || + (typeof (raw as any)?.reason === 'string' ? String((raw as any).reason).trim() : '') || + (typeof (raw as any)?.question === 'string' ? String((raw as any).question).trim() : '') || + ''; + + const details = + base.details && typeof base.details === 'object' + ? base.details + : undefined; + + return { + errorCode, + ...(message ? { message } : {}), + ...(details ? { details } : {}), + }; + } + + private getPromptKindForNeedsHelpCode(errorCode: string, task: AgentTask): UserPromptKind { + // NOTE: This is only used when classifyNeedsHelpHandling(errorCode) === USER_PROMPT. + // Keep mapping explicit; do not infer from natural language. + switch (errorCode) { + case 'UI_BLOCKED_SIGNIN': + case 'UI_BLOCKED_POPUP': + return UserPromptKind.DESKTOP_TAKEOVER; + case 'DISPATCHED_USER_PROMPT_STEP': + return UserPromptKind.TEXT_CLARIFICATION; + default: + // Defensive fallback: if we somehow got here, preserve prior behavior but do not broaden prompt eligibility. + return task.requiresDesktop ? UserPromptKind.DESKTOP_TAKEOVER : UserPromptKind.TEXT_CLARIFICATION; + } + } + + private classifyNeedsHelpHandling(errorCode: string): { handling: 'USER_PROMPT' | 'SEMANTIC_FAILURE' | 'INFRA_FAILURE' } { + // User prompts are reserved for true external input / takeover scenarios. + // Everything else must be handled internally (retry/replan/pause) without spamming prompts. + const userPromptCodes = new Set([ + 'DISPATCHED_USER_PROMPT_STEP', + 'UI_BLOCKED_SIGNIN', + 'UI_BLOCKED_POPUP', + ]); + + if (userPromptCodes.has(errorCode)) { + return { handling: 'USER_PROMPT' }; + } + + const infraCodes = new Set([ + 'WAITING_PROVIDER', + 'LLM_EMPTY_RESPONSE', + 'UI_OBSERVATION_FAILED', + ]); + + if (infraCodes.has(errorCode)) { + return { handling: 'INFRA_FAILURE' }; + } + + // Default: treat as semantic failure and let the orchestrator replan (internal strategy), not prompt the user. + return { handling: 'SEMANTIC_FAILURE' }; + } + + private formatNeedsHelpFailureMessage(needsHelp: NeedsHelpResult | null, errorCode: string): string { + const message = typeof needsHelp?.message === 'string' ? needsHelp.message.trim() : ''; + if (message) { + return `NEEDS_HELP(${errorCode}): ${message}`; + } + return `NEEDS_HELP(${errorCode})`; + } + + /** + * Stark Fix (Atom 3): Atomic, idempotent transition for NEEDS_HELP handling. + * + * Uses a single UPDATE with WHERE status IN (...) so concurrent pollers can't double-transition. + * Side effects are only emitted when the transition succeeds (count > 0). + */ + private async transitionChecklistItemToBlockedWaitingUser( + record: DispatchRecord, + title?: string, + prompt?: { promptId: string; promptKind: string; promptDedupeKey: string }, + ): Promise { + const blockedAt = new Date(); + const outcome = JSON.stringify( + { + blockedReason: 'WAITING_USER_INPUT', + source: 'AGENT_NEEDS_HELP', + taskId: record.taskId, + title: title || null, + ...( + prompt + ? { + promptId: prompt.promptId, + promptKind: prompt.promptKind, + promptDedupeKey: prompt.promptDedupeKey, + } + : {} + ), + }, + null, + 2, + ); + + const result = await this.dbTransientService.withTransientGuard( + async () => { + return await this.prisma.checklistItem.updateMany({ + where: { + id: record.checklistItemId, + status: { + in: [ChecklistItemStatus.IN_PROGRESS, ChecklistItemStatus.PENDING], + }, + }, + data: { + status: ChecklistItemStatus.BLOCKED, + blockedByPromptId: prompt?.promptId ?? null, + blockedReason: 'NEEDS_HELP', + blockedAt, + completedAt: null, + actualOutcome: outcome, + }, + }); + }, + `TaskDispatch.transitionChecklistItemToBlockedWaitingUser.${record.checklistItemId}`, + { + onTransientError: (err, backoffMs) => { + this.logger.warn( + `Transient error blocking checklist item ${record.checklistItemId}, will retry ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${err.message}`, + ); + }, + }, + ); + + return (result?.count || 0) > 0; + } + + private async transitionGoalRunToWaitingUserInput(goalRunId: string): Promise { + const goalRun = await this.dbTransientService.withTransientGuard( + async () => { + return await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + select: { phase: true }, + }); + }, + `TaskDispatch.transitionGoalRunToWaitingUserInput.read.${goalRunId}`, + { + onTransientError: (err, backoffMs) => { + this.logger.debug( + `Transient error reading goal run ${goalRunId} before phase transition ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${err.message}`, + ); + }, + }, + ); + + if (!goalRun) return false; + + const updated = await this.dbTransientService.withTransientGuard( + async () => { + return await this.prisma.goalRun.updateMany({ + where: { + id: goalRunId, + phase: { + in: [GoalRunPhase.EXECUTING, GoalRunPhase.CONTROLLING_DESKTOP], + }, + }, + data: { + phase: GoalRunPhase.WAITING_USER_INPUT, + waitReason: GoalRunWaitReason.USER_INPUT, + waitStartedAt: new Date(), + waitUntil: null, + waitDetail: { + kind: 'USER_PROMPT', + source: 'TASK_DISPATCH', + } as any, + }, + }); + }, + `TaskDispatch.transitionGoalRunToWaitingUserInput.update.${goalRunId}`, + { + onTransientError: (err, backoffMs) => { + this.logger.debug( + `Transient error updating goal run ${goalRunId} phase ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${err.message}`, + ); + }, + }, + ); + + if ((updated?.count || 0) > 0) { + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId, + previousPhase: goalRun.phase, + newPhase: GoalRunPhase.WAITING_USER_INPUT, + }); + return true; + } + + return false; + } + + /** + * v1.1.0: Mark task as infrastructure failure + * + * Infrastructure failures (404, timeout, unreachable) should: + * 1. NOT consume replan attempts + * 2. Trigger step retry with backoff + * 3. Be clearly distinguished from semantic failures + */ + private async markAsInfrastructureFailure(record: DispatchRecord, error: string): Promise { + record.status = 'INFRA_FAILED'; + record.completedAt = new Date(); + record.failureType = 'INFRASTRUCTURE'; + this.dispatchRecords.set(record.idempotencyKey, record); + + // v1.1.0: Mark with [INFRA] prefix so orchestrator knows not to replan + const infraError = `[INFRA] ${error}`; + + await this.updateChecklistItemStatus(record.checklistItemId, 'FAILED', undefined, infraError); + + await this.emitActivityEvent(record.goalRunId, 'INFRA_ERROR', `Infrastructure failure: ${error}`, { + checklistItemId: record.checklistItemId, + taskId: record.taskId, + error: infraError, + failureType: 'INFRASTRUCTURE', + recoverable: true, + }); + + this.logger.error( + `Task ${record.taskId} marked as INFRA_FAILED for item ${record.checklistItemId}: ${error}`, + ); + } + + /** + * Update checklist item status in orchestrator database + * v1.1.1: Wrapped in transient guard for DB resilience + */ + private async updateChecklistItemStatus( + itemId: string, + status: 'COMPLETED' | 'FAILED' | 'SKIPPED', + result?: any, + error?: string, + ): Promise { + await this.dbTransientService.withTransientGuard( + async () => { + await this.prisma.checklistItem.update({ + where: { id: itemId }, + data: { + status, + actualOutcome: result ? JSON.stringify(result) : undefined, + completedAt: new Date(), + // Store error in actualOutcome if failed + ...(status === 'FAILED' && error ? { actualOutcome: `Error: ${error}` } : {}), + }, + }); + }, + `TaskDispatch.updateChecklistItemStatus.${itemId}`, + { + onTransientError: (err, backoffMs) => { + this.logger.warn( + `Transient error updating checklist item ${itemId}, will retry ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${err.message}`, + ); + }, + onNonTransientError: (err) => { + this.logger.error(`Failed to update checklist item ${itemId}: ${err.message}`); + }, + }, + ); + } + + /** + * Emit activity event for UI updates + * v1.1.1: Wrapped in transient guard for DB resilience + */ + private async emitActivityEvent( + goalRunId: string, + eventType: string, + title: string, + details?: any, + ): Promise { + const checklistItemId = + typeof details?.checklistItemId === 'string' ? details.checklistItemId : undefined; + const planVersionId = + typeof details?.planVersionId === 'string' ? details.planVersionId : undefined; + const workflowNodeId = + typeof details?.workflowNodeId === 'string' ? details.workflowNodeId : undefined; + + await this.dbTransientService.withTransientGuard( + async () => { + // Check if goal run exists first + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + }); + + if (!goalRun) { + this.logger.warn(`Goal run ${goalRunId} not found, skipping activity event`); + return; + } + + await this.prisma.activityEvent.create({ + data: { + goalRunId, + eventType, + title, + description: details ? JSON.stringify(details) : undefined, + details: details || {}, + checklistItemId, + planVersionId, + workflowNodeId, + }, + }); + + // Emit event for real-time updates + this.eventEmitter.emit('goalrun.activity', { + goalRunId, + eventType, + title, + details, + }); + }, + `TaskDispatch.emitActivityEvent.${goalRunId}`, + { + onTransientError: (err, backoffMs) => { + // Activity events are non-critical, just log + this.logger.debug( + `Transient error emitting activity event for ${goalRunId}, skipped: ${err.message}`, + ); + }, + onNonTransientError: (err) => { + this.logger.error(`Failed to emit activity event: ${err.message}`); + }, + }, + ); + } + + /** + * Recover in-flight dispatches on startup + * v1.1.1: Enhanced recovery with actual task lookup and record creation + * + * This is critical for the restart grace window to work correctly: + * 1. Query all IN_PROGRESS checklist items from DB + * 2. For each, look up the corresponding task in the agent system + * 3. Create dispatch records so polling can resume + * 4. This allows the orchestrator to properly reconcile state after restart + */ + private async recoverInFlightDispatches(): Promise { + // v1.1.1: Wrap in transient guard for DB resilience during startup + const inProgressItems = await this.dbTransientService.withTransientGuard( + async () => { + return await this.prisma.checklistItem.findMany({ + where: { + status: 'IN_PROGRESS', + }, + include: { + planVersion: { + include: { + goalRun: true, + }, + }, + }, + }); + }, + 'TaskDispatch.recoverInFlightDispatches', + { + skipIfInBackoff: false, // Always try on startup + onTransientError: (err, backoffMs) => { + this.logger.warn( + `DB not ready during startup recovery, will retry in next poll cycle ` + + `(backoff: ${Math.round(backoffMs / 1000)}s): ${err.message}`, + ); + }, + }, + ); + + if (!inProgressItems) { + this.logger.warn('Could not recover in-flight dispatches - DB unavailable'); + return; + } + + this.logger.log(`Recovering ${inProgressItems.length} in-progress checklist items`); + + // For each in-progress item, try to find the corresponding task in agent system + for (const item of inProgressItems) { + if (!item.planVersion?.goalRun) continue; + + const goalRunId = item.planVersion.goalRun.id; + const idempotencyKey = this.generateIdempotencyKey(goalRunId, item.id, 1); + + // Skip if we already have a record (shouldn't happen, but safety check) + if (this.dispatchRecords.has(idempotencyKey)) { + this.logger.debug(`Dispatch record already exists for item ${item.id}`); + continue; + } + + try { + // Try to find the task by querying the agent API with nodeRunId + // The agent stores nodeRunId = checklistItemId when we create tasks + const taskResponse = await this.httpClient.get('/tasks', { + params: { nodeRunId: item.id }, + timeout: 5000, + validateStatus: (status) => status === 200 || status === 404, + }); + + if (taskResponse.status === 200 && taskResponse.data?.length > 0) { + const task = taskResponse.data[0]; // Most recent task for this nodeRunId + + // Create a recovery record + const record: DispatchRecord = { + idempotencyKey, + taskId: task.id, + goalRunId, + checklistItemId: item.id, + status: task.status === 'RUNNING' ? 'RUNNING' : 'DISPATCHED', + createdAt: new Date(task.createdAt || Date.now()), + lastSuccessfulCheck: new Date(), + consecutiveCheckFailures: 0, + notFoundCount: 0, + // v1.2.0 Phase C: Initialize heartbeat health tracking + isHeartbeatHealthy: true, + consecutiveHeartbeatUnhealthy: 0, + }; + this.dispatchRecords.set(idempotencyKey, record); + + this.logger.log( + `Recovered dispatch record for item ${item.id} → task ${task.id} (${task.status})`, + ); + } else { + // No task found - create a placeholder record that will trigger re-dispatch + // or be cleaned up by the next poll cycle + this.logger.warn( + `No task found for in-progress item ${item.id} - may need re-dispatch`, + ); + } + } catch (err: any) { + this.logger.warn( + `Failed to recover task for item ${item.id}: ${err.message}`, + ); + } + } + + this.logger.log( + `Recovery complete: ${this.dispatchRecords.size} dispatch records loaded`, + ); + } + + /** + * Handle workflow node ready event + * This integrates with the existing orchestrator-loop executeStepViaWorkflow + */ + @OnEvent('workflow.node-ready') + async handleNodeReady(payload: { + workflowRunId: string; + nodeId: string; + goalRunId: string; + checklistItemId: string; + }): Promise { + this.logger.log(`Node ready event received for item ${payload.checklistItemId}`); + + // Get the checklist item details + const item = await this.prisma.checklistItem.findUnique({ + where: { id: payload.checklistItemId }, + include: { + planVersion: { + include: { + goalRun: true, + }, + }, + }, + }); + + if (!item || !item.planVersion?.goalRun) { + this.logger.error(`Checklist item ${payload.checklistItemId} not found`); + return; + } + + // Get workspace ID from goal run + const goalRun = item.planVersion.goalRun; + const workflowRun = await this.prisma.workflowRun.findUnique({ + where: { id: payload.workflowRunId }, + select: { workspaceId: true }, + }); + const workspaceId = workflowRun?.workspaceId ?? undefined; + + // v2.4.0: Get goal context and previous step results for autonomous operation + const goalContext = goalRun.goal as string; + + // Get completed previous steps from the same plan version + const completedItems = await this.prisma.checklistItem.findMany({ + where: { + planVersionId: item.planVersionId, + status: 'COMPLETED', + order: { lt: item.order }, // Only items before this one + }, + orderBy: { order: 'asc' }, + select: { + description: true, + actualOutcome: true, + order: true, + }, + }); + + // Build previous step results summary with context summarization + // v2.4.1: Implements hierarchical summarization for long histories + let previousStepResults = this.buildPreviousStepResults(completedItems); + + // PR5: Inject resolved user prompt answers for this step into agent context. + // This prevents “answer loss” when an EXECUTE step is unblocked by a prompt resolution. + const resolvedPromptsForItem = await this.prisma.userPrompt.findMany({ + where: { + checklistItemId: item.id, + status: UserPromptStatus.RESOLVED, + }, + orderBy: { resolvedAt: 'desc' }, + take: 3, + select: { + id: true, + kind: true, + answers: true, + resolvedAt: true, + }, + }); + + if (resolvedPromptsForItem.length > 0) { + const formatted = resolvedPromptsForItem + .map((p) => { + const ts = p.resolvedAt ? ` @ ${p.resolvedAt.toISOString()}` : ''; + return `- ${p.kind} (${p.id})${ts}: ${JSON.stringify(p.answers)}`; + }) + .join('\n'); + + previousStepResults = [previousStepResults, `User-provided answers for this step:\n${formatted}`] + .filter((s) => !!s && String(s).trim().length > 0) + .join('\n\n'); + } + + // v2.4.1: Structured logging for context propagation debugging + this.logger.log({ + message: 'Dispatching task with context', + goalRunId: payload.goalRunId, + checklistItemId: payload.checklistItemId, + stepOrder: item.order, + hasGoalContext: !!goalContext, + goalContextLength: goalContext?.length || 0, + previousStepsCount: completedItems.length, + previousStepsWithOutcome: completedItems.filter((ci) => ci.actualOutcome).length, + workspaceId: workspaceId || null, + requiresDesktop: item.requiresDesktop, + executionSurface: item.executionSurface, + }); + + // Dispatch the task + await this.dispatchTask({ + goalRunId: payload.goalRunId, + checklistItemId: payload.checklistItemId, + planVersionId: item.planVersionId, + workspaceId, + title: item.description.slice(0, 100), + description: item.description, + expectedOutcome: item.expectedOutcome || undefined, + allowedTools: item.suggestedTools || [], + requiresDesktop: item.requiresDesktop, + executionSurface: item.executionSurface, + // v2.4.0: Context propagation for autonomous operation + goalContext, + previousStepResults, + }); + } + + /** + * Get dispatch statistics + */ + getStats(): { total: number; dispatched: number; running: number; completed: number; failed: number } { + const records = Array.from(this.dispatchRecords.values()); + return { + total: records.length, + dispatched: records.filter(r => r.status === 'DISPATCHED').length, + running: records.filter(r => r.status === 'RUNNING').length, + completed: records.filter(r => r.status === 'COMPLETED').length, + failed: records.filter(r => r.status === 'FAILED').length, + }; + } + + /** + * Phase 2.1: Get status check health for a checklist item + * Returns info about whether we can reliably check task status + */ + getStatusCheckHealth(checklistItemId: string): { + hasActiveDispatch: boolean; + lastSuccessfulCheck?: Date; + consecutiveFailures: number; + isHealthy: boolean; + } { + // Find dispatch record for this checklist item + const record = Array.from(this.dispatchRecords.values()).find( + r => r.checklistItemId === checklistItemId && (r.status === 'DISPATCHED' || r.status === 'RUNNING'), + ); + + if (!record) { + return { + hasActiveDispatch: false, + consecutiveFailures: 0, + isHealthy: true, + }; + } + + const isHealthy = record.consecutiveCheckFailures < STATUS_CHECK_FAILURE_THRESHOLD; + + return { + hasActiveDispatch: true, + lastSuccessfulCheck: record.lastSuccessfulCheck, + consecutiveFailures: record.consecutiveCheckFailures, + isHealthy, + }; + } + + /** + * Phase 2.2: Get effective "last progress" time for timeout calculations + * Uses lastSuccessfulCheck if available, otherwise falls back to createdAt + */ + getLastProgressTime(checklistItemId: string): Date | null { + const record = Array.from(this.dispatchRecords.values()).find( + r => r.checklistItemId === checklistItemId && (r.status === 'DISPATCHED' || r.status === 'RUNNING'), + ); + + if (!record) return null; + + // Return the more recent of lastSuccessfulCheck or createdAt + return record.lastSuccessfulCheck || record.createdAt; + } + + /** + * v1.2.0 Phase C: Query task controller for heartbeat health + * + * Calls the task controller's /health endpoint to get the agent's heartbeat status. + * This is used to determine if the agent is still alive and processing the task. + */ + private async queryHeartbeatHealth(taskId: string): Promise { + try { + const response = await this.taskControllerClient.get(`/api/v1/tasks/${taskId}/health`); + return response.data as HeartbeatHealthResponse; + } catch (error: any) { + const status = error.response?.status; + if (status === 404) { + // Task not found in task controller - treat as unhealthy + this.logger.debug(`Heartbeat health query 404 for ${taskId}`); + return null; + } + // Other errors - log but don't fail (fallback to status-based health) + this.logger.warn(`Heartbeat health query failed for ${taskId}: ${error.message}`); + return null; + } + } + + /** + * v1.2.0 Phase C: Update heartbeat health for a dispatch record + * + * Called during status polling to check and update heartbeat health. + * This tracks consecutive unhealthy heartbeat checks for timeout decisions. + */ + async updateHeartbeatHealth(record: DispatchRecord): Promise { + const health = await this.queryHeartbeatHealth(record.taskId); + const now = new Date(); + + if (!health) { + // Query failed - increment unhealthy count + record.consecutiveHeartbeatUnhealthy++; + record.isHeartbeatHealthy = false; + record.lastHeartbeatCheck = now; + this.dispatchRecords.set(record.idempotencyKey, record); + + this.logger.debug( + `Heartbeat health unknown for ${record.taskId} ` + + `(${record.consecutiveHeartbeatUnhealthy} consecutive unhealthy)`, + ); + return; + } + + record.lastHeartbeatCheck = now; + + if (health.agentHeartbeat) { + record.lastHeartbeatTime = new Date(health.agentHeartbeat); + } + + if (health.isHeartbeatHealthy) { + // Heartbeat is healthy - reset unhealthy counter + record.isHeartbeatHealthy = true; + record.consecutiveHeartbeatUnhealthy = 0; + } else { + // Heartbeat is unhealthy - increment counter + record.isHeartbeatHealthy = false; + record.consecutiveHeartbeatUnhealthy++; + + this.logger.warn( + `Heartbeat unhealthy for ${record.taskId}: ` + + `${health.timeSinceHeartbeat}s since last heartbeat, phase=${health.phase} ` + + `(${record.consecutiveHeartbeatUnhealthy}/${this.heartbeatUnhealthyThreshold})`, + ); + } + + this.dispatchRecords.set(record.idempotencyKey, record); + } + + /** + * v1.2.0 Phase C: Get heartbeat health for timeout decisions + * + * Returns heartbeat health information that the orchestrator-loop uses + * to make timeout decisions. Replaces static TTL-based timeout with + * dynamic heartbeat-based timeout. + */ + getHeartbeatHealth(checklistItemId: string): { + hasActiveDispatch: boolean; + isHealthy: boolean; + consecutiveUnhealthy: number; + lastHeartbeat?: Date; + shouldTimeout: boolean; + } { + // Find dispatch record for this checklist item + const record = Array.from(this.dispatchRecords.values()).find( + r => r.checklistItemId === checklistItemId && (r.status === 'DISPATCHED' || r.status === 'RUNNING'), + ); + + if (!record) { + return { + hasActiveDispatch: false, + isHealthy: true, + consecutiveUnhealthy: 0, + shouldTimeout: false, + }; + } + + // Timeout if consecutive unhealthy checks exceed threshold + const shouldTimeout = record.consecutiveHeartbeatUnhealthy >= this.heartbeatUnhealthyThreshold; + + return { + hasActiveDispatch: true, + isHealthy: record.isHeartbeatHealthy, + consecutiveUnhealthy: record.consecutiveHeartbeatUnhealthy, + lastHeartbeat: record.lastHeartbeatTime, + shouldTimeout, + }; + } + + /** + * v2.4.1: Build previous step results with context summarization + * + * For long step histories, this method summarizes earlier steps and provides + * full detail only for recent steps. This prevents context window overflow + * while preserving important continuity information. + * + * Strategy: + * - Last N steps (configurable): Full detail with outcome + * - Earlier steps: Brief summary (description only) + * - Total output capped at max chars + */ + private buildPreviousStepResults( + completedItems: Array<{ + description: string; + actualOutcome: string | null; + order: number; + }>, + ): string | undefined { + if (completedItems.length === 0) { + return undefined; + } + + const { maxPreviousStepsDetailed, maxContextChars } = CONTEXT_SUMMARIZATION_CONFIG; + + // Parse outcome from each step + const stepsWithOutcome = completedItems.map((ci, idx) => { + let outcome = ''; + if (ci.actualOutcome) { + try { + const parsed = JSON.parse(ci.actualOutcome); + outcome = parsed.summary || ''; + } catch { + // Fallback: treat as plain string, but skip error messages + outcome = ci.actualOutcome.startsWith('Error:') ? '' : ci.actualOutcome; + } + } + return { + index: idx + 1, + description: ci.description, + outcome, + order: ci.order, + }; + }); + + // Determine which steps get full detail vs summary + const totalSteps = stepsWithOutcome.length; + const detailedStartIdx = Math.max(0, totalSteps - maxPreviousStepsDetailed); + + const parts: string[] = []; + + // Earlier steps (summarized) - just descriptions + if (detailedStartIdx > 0) { + const summarizedSteps = stepsWithOutcome.slice(0, detailedStartIdx); + parts.push(`[Earlier steps 1-${detailedStartIdx} completed successfully]`); + + // Add brief list if not too many + if (summarizedSteps.length <= 3) { + summarizedSteps.forEach((step) => { + parts.push(` ${step.index}. ${step.description.slice(0, 80)}${step.description.length > 80 ? '...' : ''}`); + }); + } + } + + // Recent steps (detailed) - with outcomes + const detailedSteps = stepsWithOutcome.slice(detailedStartIdx); + detailedSteps.forEach((step) => { + const outcomeStr = step.outcome ? `: ${step.outcome}` : ''; + parts.push(`${step.index}. ${step.description}${outcomeStr}`); + }); + + // Join and truncate if needed + let result = parts.join('\n'); + + if (result.length > maxContextChars) { + // Truncate and add indicator + result = result.slice(0, maxContextChars - 50) + '\n[...truncated for brevity]'; + this.logger.warn({ + message: 'Previous step results truncated due to length', + originalLength: parts.join('\n').length, + truncatedTo: result.length, + stepCount: completedItems.length, + }); + } + + return result; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.travel-feasibility-gate.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.travel-feasibility-gate.spec.ts new file mode 100644 index 000000000..d3750ddfe --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/task-dispatch.travel-feasibility-gate.spec.ts @@ -0,0 +1,102 @@ +import { ExecutionSurface } from '@prisma/client'; +import { TaskDispatchService } from './task-dispatch.service'; + +describe('TaskDispatchService travel feasibility gate', () => { + it('auto-upgrades travel-shopping dispatches to DESKTOP with browser tool', async () => { + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + const prisma = {} as any; + const dbTransientService = {} as any; + const eventEmitter = { emit: jest.fn() } as any; + const userPromptService = {} as any; + const outboxService = {} as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid real HTTP and side effects + (service as any).httpClient = { + post: jest.fn().mockResolvedValue({ status: 201, data: { id: 'task-1' } }), + }; + jest.spyOn(service as any, 'emitActivityEvent').mockResolvedValue(undefined); + + await service.dispatchTask({ + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + workspaceId: 'ws-1', + title: 'Search flights', + description: 'Search for flights from DTW to LAS', + allowedTools: [], + requiresDesktop: false, + executionSurface: ExecutionSurface.TEXT_ONLY, + goalContext: 'Please price a trip with flight and hotel from Detroit to Las Vegas Feb 21-28', + previousStepResults: '', + attempt: 1, + }); + + expect((service as any).httpClient.post).toHaveBeenCalledWith( + '/tasks', + expect.objectContaining({ + requiresDesktop: true, + executionSurface: ExecutionSurface.DESKTOP, + allowedTools: expect.arrayContaining(['browser']), + }), + ); + }); + + it('auto-upgrades travel-shopping dispatches even when goalContext is missing', async () => { + const configService = { + get: jest.fn((_key: string, fallback: any) => fallback), + } as any; + const prisma = {} as any; + const dbTransientService = {} as any; + const eventEmitter = { emit: jest.fn() } as any; + const userPromptService = {} as any; + const outboxService = {} as any; + + const service = new TaskDispatchService( + configService, + prisma, + dbTransientService, + eventEmitter, + userPromptService, + outboxService, + ); + + // Avoid real HTTP and side effects + (service as any).httpClient = { + post: jest.fn().mockResolvedValue({ status: 201, data: { id: 'task-2' } }), + }; + jest.spyOn(service as any, 'emitActivityEvent').mockResolvedValue(undefined); + + await service.dispatchTask({ + goalRunId: 'gr-2', + checklistItemId: 'ci-2', + workspaceId: 'ws-2', + title: 'Price a trip', + description: 'Search for round-trip flights from DTW to LAS for Feb 21-28', + allowedTools: [], + requiresDesktop: false, + executionSurface: ExecutionSurface.TEXT_ONLY, + // goalContext intentionally omitted + previousStepResults: '', + attempt: 1, + }); + + expect((service as any).httpClient.post).toHaveBeenCalledWith( + '/tasks', + expect.objectContaining({ + requiresDesktop: true, + executionSurface: ExecutionSurface.DESKTOP, + allowedTools: expect.arrayContaining(['browser']), + }), + ); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/task-recovery.service.ts b/packages/bytebot-workflow-orchestrator/src/services/task-recovery.service.ts new file mode 100644 index 000000000..2f547bfd8 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/task-recovery.service.ts @@ -0,0 +1,756 @@ +/** + * Task Recovery Service + * v1.0.0: Phase 9 Self-Healing & Auto-Recovery + * + * Automatically detects and recovers stale/stuck tasks: + * - Detects tasks stuck in RUNNING state beyond timeout + * - Detects tasks assigned to offline agents + * - Reassigns tasks to healthy agents + * - Manages task timeout and cleanup + * + * Runs on a configurable schedule (default: every 30 seconds). + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { AgentRouterService } from './agent-router.service'; +import { LeaderElectionService } from './leader-election.service'; + +// Stale task reasons +export enum StaleReason { + TIMEOUT = 'TIMEOUT', + AGENT_OFFLINE = 'AGENT_OFFLINE', + HEARTBEAT_MISSING = 'HEARTBEAT_MISSING', + ASSIGNMENT_EXPIRED = 'ASSIGNMENT_EXPIRED', +} + +// Recovery result +export interface RecoveryResult { + nodeRunId: string; + success: boolean; + action: 'REASSIGNED' | 'MOVED_TO_DLQ' | 'SKIPPED'; + newAgentId?: string; + error?: string; +} + +// Recovery summary +export interface RecoverySummary { + detected: number; + recovered: number; + movedToDLQ: number; + failed: number; + duration: number; +} + +@Injectable() +export class TaskRecoveryService implements OnModuleInit { + private readonly logger = new Logger(TaskRecoveryService.name); + private isProcessing = false; + + // Configuration + private readonly taskTimeoutMs: number; + private readonly heartbeatTimeoutMs: number; + private readonly maxRecoveryAttempts: number; + private readonly recoveryEnabled: boolean; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly agentRouter: AgentRouterService, + private readonly leaderElection: LeaderElectionService, + ) { + // Default: 5 minutes for task timeout + // v1.0.1: Use parseInt for numeric env vars (env vars are always strings) + this.taskTimeoutMs = parseInt( + this.configService.get('TASK_TIMEOUT_MS', '300000'), + 10, + ); + // Default: 2 minutes for heartbeat timeout + this.heartbeatTimeoutMs = parseInt( + this.configService.get('HEARTBEAT_TIMEOUT_MS', '120000'), + 10, + ); + // Default: 3 recovery attempts before moving to DLQ + this.maxRecoveryAttempts = parseInt( + this.configService.get('MAX_RECOVERY_ATTEMPTS', '3'), + 10, + ); + // Default: enabled + // v1.0.1: Use string comparison for boolean env vars + this.recoveryEnabled = + this.configService.get('TASK_RECOVERY_ENABLED', 'true') === 'true'; + } + + onModuleInit() { + this.logger.log( + `Task Recovery Service initialized (enabled: ${this.recoveryEnabled}, ` + + `taskTimeout: ${this.taskTimeoutMs}ms, heartbeatTimeout: ${this.heartbeatTimeoutMs}ms)`, + ); + } + + /** + * Scheduled task recovery check (runs every 30 seconds) + * Only runs on leader to prevent duplicate processing + */ + @Cron(CronExpression.EVERY_30_SECONDS) + async checkStaleTasks(): Promise { + // Only run on leader + if (!this.leaderElection.isLeader) { + return; + } + + if (!this.recoveryEnabled) { + return; + } + + if (this.isProcessing) { + this.logger.debug('Skipping recovery check - already processing'); + return; + } + + this.isProcessing = true; + const startTime = Date.now(); + + try { + const summary = await this.runRecoveryCheck(); + + if (summary.detected > 0) { + this.logger.log( + `Recovery check completed: detected=${summary.detected}, ` + + `recovered=${summary.recovered}, dlq=${summary.movedToDLQ}, ` + + `failed=${summary.failed}, duration=${summary.duration}ms`, + ); + + this.eventEmitter.emit('recovery.completed', summary); + } + } catch (error) { + this.logger.error(`Recovery check failed: ${error.message}`, error.stack); + } finally { + this.isProcessing = false; + } + } + + /** + * Run a full recovery check + */ + async runRecoveryCheck(): Promise { + const startTime = Date.now(); + const summary: RecoverySummary = { + detected: 0, + recovered: 0, + movedToDLQ: 0, + failed: 0, + duration: 0, + }; + + // 1. Detect stale tasks by timeout + const timeoutTasks = await this.detectTimeoutTasks(); + summary.detected += timeoutTasks.length; + + // 2. Detect tasks assigned to offline agents + const offlineAgentTasks = await this.detectOfflineAgentTasks(); + summary.detected += offlineAgentTasks.length; + + // 3. Recover detected tasks + for (const task of [...timeoutTasks, ...offlineAgentTasks]) { + const result = await this.recoverTask(task); + if (result.success) { + if (result.action === 'REASSIGNED') { + summary.recovered++; + } else if (result.action === 'MOVED_TO_DLQ') { + summary.movedToDLQ++; + } + } else { + summary.failed++; + } + } + + summary.duration = Date.now() - startTime; + return summary; + } + + /** + * Detect tasks that have timed out + */ + private async detectTimeoutTasks(): Promise> { + const timeoutThreshold = new Date(Date.now() - this.taskTimeoutMs); + + // Find node runs that are RUNNING but haven't completed within timeout + const staleRuns = await this.prisma.workflowNodeRun.findMany({ + where: { + status: 'RUNNING', + startedAt: { + lt: timeoutThreshold, + }, + }, + include: { + node: { + include: { + workflowRun: true, + }, + }, + }, + }); + + const results: Array<{ + nodeRunId: string; + workflowRunId: string; + tenantId: string; + assignmentId: string | null; + agentId: string | null; + reason: StaleReason; + assignedAt: Date | null; + }> = []; + + for (const run of staleRuns) { + // Check if already tracked as stale + const existingStale = await this.prisma.staleTask.findUnique({ + where: { nodeRunId: run.id }, + }); + + if (!existingStale) { + // Get current assignment + const assignment = await this.prisma.taskAssignment.findFirst({ + where: { + nodeRunId: run.id, + status: { in: ['ASSIGNED', 'RUNNING'] }, + }, + orderBy: { createdAt: 'desc' }, + }); + + results.push({ + nodeRunId: run.id, + workflowRunId: run.node.workflowRunId, + tenantId: run.node.workflowRun.tenantId, + assignmentId: assignment?.id ?? null, + agentId: assignment?.agentId ?? null, + reason: StaleReason.TIMEOUT, + assignedAt: assignment?.dispatchedAt ?? null, + }); + + // Record stale task + await this.prisma.staleTask.create({ + data: { + nodeRunId: run.id, + workflowRunId: run.node.workflowRunId, + tenantId: run.node.workflowRun.tenantId, + staleReason: StaleReason.TIMEOUT, + originalAgentId: assignment?.agentId, + assignedAt: assignment?.dispatchedAt, + }, + }); + } + } + + return results; + } + + /** + * Detect tasks assigned to offline agents + */ + private async detectOfflineAgentTasks(): Promise> { + const heartbeatThreshold = new Date(Date.now() - this.heartbeatTimeoutMs); + + // Find assignments to agents that haven't sent heartbeat + const staleAssignments = await this.prisma.taskAssignment.findMany({ + where: { + status: { in: ['ASSIGNED', 'RUNNING'] }, + agent: { + lastHeartbeatAt: { + lt: heartbeatThreshold, + }, + }, + }, + include: { + agent: true, + }, + }); + + const results: Array<{ + nodeRunId: string; + workflowRunId: string; + tenantId: string; + assignmentId: string | null; + agentId: string | null; + reason: StaleReason; + assignedAt: Date | null; + }> = []; + + for (const assignment of staleAssignments) { + // Get the node run details + const nodeRun = await this.prisma.workflowNodeRun.findUnique({ + where: { id: assignment.nodeRunId }, + include: { + node: { + include: { + workflowRun: true, + }, + }, + }, + }); + + if (!nodeRun) continue; + + // Check if already tracked as stale + const existingStale = await this.prisma.staleTask.findUnique({ + where: { nodeRunId: nodeRun.id }, + }); + + if (!existingStale) { + results.push({ + nodeRunId: nodeRun.id, + workflowRunId: nodeRun.node.workflowRunId, + tenantId: nodeRun.node.workflowRun.tenantId, + assignmentId: assignment.id, + agentId: assignment.agentId, + reason: StaleReason.AGENT_OFFLINE, + assignedAt: assignment.dispatchedAt, + }); + + // Record stale task + await this.prisma.staleTask.create({ + data: { + nodeRunId: nodeRun.id, + workflowRunId: nodeRun.node.workflowRunId, + tenantId: nodeRun.node.workflowRun.tenantId, + staleReason: StaleReason.AGENT_OFFLINE, + originalAgentId: assignment.agentId, + assignedAt: assignment.dispatchedAt, + lastHeartbeatAt: assignment.agent.lastHeartbeatAt, + }, + }); + } + } + + return results; + } + + /** + * Recover a stale task + */ + private async recoverTask(task: { + nodeRunId: string; + workflowRunId: string; + tenantId: string; + assignmentId: string | null; + agentId: string | null; + reason: StaleReason; + assignedAt: Date | null; + }): Promise { + try { + // Get stale task record + const staleTask = await this.prisma.staleTask.findUnique({ + where: { nodeRunId: task.nodeRunId }, + }); + + if (!staleTask) { + return { + nodeRunId: task.nodeRunId, + success: false, + action: 'SKIPPED', + error: 'Stale task record not found', + }; + } + + // Check if we've exceeded max recovery attempts + if (staleTask.recoveryAttempts >= this.maxRecoveryAttempts) { + return await this.moveToDeadLetterQueue(task, staleTask); + } + + // Update recovery attempts + await this.prisma.staleTask.update({ + where: { nodeRunId: task.nodeRunId }, + data: { + status: 'RECOVERING', + recoveryAttempts: staleTask.recoveryAttempts + 1, + }, + }); + + // Mark old assignment as failed + if (task.assignmentId) { + await this.prisma.taskAssignment.update({ + where: { id: task.assignmentId }, + data: { + status: 'FAILED', + error: `Task stale: ${task.reason}`, + completedAt: new Date(), + }, + }); + } + + // Reset node run status to allow reassignment + await this.prisma.workflowNodeRun.update({ + where: { id: task.nodeRunId }, + data: { + status: 'READY', + error: `Recovered from stale state: ${task.reason}`, + }, + }); + + // Try to find a new agent (use least loaded for recovery) + const newAgent = await this.agentRouter.routeLeastLoaded(); + + if (!newAgent) { + this.logger.warn( + `No healthy agent available for task ${task.nodeRunId}`, + ); + + await this.prisma.staleTask.update({ + where: { nodeRunId: task.nodeRunId }, + data: { + status: 'FAILED', + errorMessage: 'No healthy agent available', + }, + }); + + return { + nodeRunId: task.nodeRunId, + success: false, + action: 'SKIPPED', + error: 'No healthy agent available', + }; + } + + // Create new assignment + await this.prisma.taskAssignment.create({ + data: { + nodeRunId: task.nodeRunId, + agentId: newAgent.id, + status: 'ASSIGNED', + routingReason: 'recovery_reassignment', + previousAssignmentId: task.assignmentId, + attempt: staleTask.recoveryAttempts + 1, + }, + }); + + // Update stale task as recovered + await this.prisma.staleTask.update({ + where: { nodeRunId: task.nodeRunId }, + data: { + status: 'RECOVERED', + newAgentId: newAgent.id, + recoveredAt: new Date(), + }, + }); + + // Log recovery action + await this.logRecoveryAction( + task.tenantId, + task.nodeRunId, + 'TASK_REASSIGNED', + 'RUNNING', + 'READY', + `Reassigned from ${task.agentId ?? 'unknown'} to ${newAgent.id} due to ${task.reason}`, + true, + ); + + this.logger.log( + `Task ${task.nodeRunId} recovered: reassigned to agent ${newAgent.id}`, + ); + + this.eventEmitter.emit('task.recovered', { + nodeRunId: task.nodeRunId, + oldAgentId: task.agentId, + newAgentId: newAgent.id, + reason: task.reason, + }); + + return { + nodeRunId: task.nodeRunId, + success: true, + action: 'REASSIGNED', + newAgentId: newAgent.id, + }; + } catch (error) { + this.logger.error( + `Failed to recover task ${task.nodeRunId}: ${error.message}`, + error.stack, + ); + + await this.prisma.staleTask.update({ + where: { nodeRunId: task.nodeRunId }, + data: { + status: 'FAILED', + errorMessage: error.message, + }, + }); + + return { + nodeRunId: task.nodeRunId, + success: false, + action: 'SKIPPED', + error: error.message, + }; + } + } + + /** + * Move a task to the dead letter queue + */ + private async moveToDeadLetterQueue( + task: { + nodeRunId: string; + workflowRunId: string; + tenantId: string; + reason: StaleReason; + }, + staleTask: { recoveryAttempts: number }, + ): Promise { + try { + // Get node run details for the DLQ entry + const nodeRun = await this.prisma.workflowNodeRun.findUnique({ + where: { id: task.nodeRunId }, + include: { + node: true, + }, + }); + + if (!nodeRun) { + return { + nodeRunId: task.nodeRunId, + success: false, + action: 'SKIPPED', + error: 'Node run not found', + }; + } + + // Create DLQ entry + await this.prisma.deadLetterEntry.create({ + data: { + tenantId: task.tenantId, + workflowRunId: task.workflowRunId, + nodeRunId: task.nodeRunId, + taskType: 'WORKFLOW_NODE', + originalPayload: { + nodeId: nodeRun.nodeId, + nodeName: nodeRun.node.name, + nodeType: nodeRun.node.type, + attempt: nodeRun.attempt, + input: nodeRun.input, + }, + failureReason: `Max recovery attempts exceeded: ${task.reason}`, + failureCount: staleTask.recoveryAttempts, + lastFailedAt: new Date(), + failureCategory: 'PERMANENT', + severity: 'high', + maxRetries: this.maxRecoveryAttempts, + retryCount: staleTask.recoveryAttempts, + }, + }); + + // Update node run as failed + await this.prisma.workflowNodeRun.update({ + where: { id: task.nodeRunId }, + data: { + status: 'FAILED', + error: `Moved to DLQ after ${staleTask.recoveryAttempts} recovery attempts`, + completedAt: new Date(), + }, + }); + + // Update stale task + await this.prisma.staleTask.update({ + where: { nodeRunId: task.nodeRunId }, + data: { + status: 'FAILED', + errorMessage: 'Moved to dead letter queue', + }, + }); + + // Log recovery action + await this.logRecoveryAction( + task.tenantId, + task.nodeRunId, + 'MOVED_TO_DLQ', + 'RECOVERING', + 'FAILED', + `Exceeded ${this.maxRecoveryAttempts} recovery attempts`, + true, + ); + + this.logger.warn( + `Task ${task.nodeRunId} moved to DLQ after ${staleTask.recoveryAttempts} recovery attempts`, + ); + + this.eventEmitter.emit('task.moved-to-dlq', { + nodeRunId: task.nodeRunId, + workflowRunId: task.workflowRunId, + reason: task.reason, + attempts: staleTask.recoveryAttempts, + }); + + return { + nodeRunId: task.nodeRunId, + success: true, + action: 'MOVED_TO_DLQ', + }; + } catch (error) { + this.logger.error( + `Failed to move task ${task.nodeRunId} to DLQ: ${error.message}`, + error.stack, + ); + + return { + nodeRunId: task.nodeRunId, + success: false, + action: 'SKIPPED', + error: error.message, + }; + } + } + + /** + * Log a recovery action + */ + private async logRecoveryAction( + tenantId: string, + targetId: string, + actionType: string, + previousState: string, + newState: string, + reason: string, + success: boolean, + errorMessage?: string, + ): Promise { + try { + await this.prisma.recoveryLog.create({ + data: { + tenantId, + actionType, + targetType: 'NODE', + targetId, + previousState, + newState, + reason, + actorType: 'SYSTEM', + success, + errorMessage, + }, + }); + } catch (error) { + this.logger.error(`Failed to log recovery action: ${error.message}`); + } + } + + /** + * Get recovery statistics + */ + async getRecoveryStats(): Promise<{ + pendingStaleTasks: number; + recoveringTasks: number; + recoveredLast24h: number; + failedLast24h: number; + dlqEntriesCount: number; + }> { + const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000); + + const [pending, recovering, recovered, failed, dlq] = await Promise.all([ + this.prisma.staleTask.count({ where: { status: 'DETECTED' } }), + this.prisma.staleTask.count({ where: { status: 'RECOVERING' } }), + this.prisma.staleTask.count({ + where: { + status: 'RECOVERED', + recoveredAt: { gte: oneDayAgo }, + }, + }), + this.prisma.staleTask.count({ + where: { + status: 'FAILED', + updatedAt: { gte: oneDayAgo }, + }, + }), + this.prisma.deadLetterEntry.count({ + where: { status: 'PENDING' }, + }), + ]); + + return { + pendingStaleTasks: pending, + recoveringTasks: recovering, + recoveredLast24h: recovered, + failedLast24h: failed, + dlqEntriesCount: dlq, + }; + } + + /** + * Manually trigger recovery for a specific task + */ + async manualRecover(nodeRunId: string): Promise { + const nodeRun = await this.prisma.workflowNodeRun.findUnique({ + where: { id: nodeRunId }, + include: { + node: { + include: { + workflowRun: true, + }, + }, + }, + }); + + if (!nodeRun) { + return { + nodeRunId, + success: false, + action: 'SKIPPED', + error: 'Node run not found', + }; + } + + // Get or create stale task record + let staleTask = await this.prisma.staleTask.findUnique({ + where: { nodeRunId }, + }); + + if (!staleTask) { + staleTask = await this.prisma.staleTask.create({ + data: { + nodeRunId, + workflowRunId: nodeRun.node.workflowRunId, + tenantId: nodeRun.node.workflowRun.tenantId, + staleReason: 'MANUAL_RECOVERY' as StaleReason, + recoveryAttempts: 0, + }, + }); + } + + // Get current assignment + const assignment = await this.prisma.taskAssignment.findFirst({ + where: { + nodeRunId, + status: { in: ['ASSIGNED', 'RUNNING'] }, + }, + orderBy: { createdAt: 'desc' }, + }); + + return this.recoverTask({ + nodeRunId, + workflowRunId: nodeRun.node.workflowRunId, + tenantId: nodeRun.node.workflowRun.tenantId, + assignmentId: assignment?.id ?? null, + agentId: assignment?.agentId ?? null, + reason: StaleReason.TIMEOUT, + assignedAt: assignment?.dispatchedAt ?? null, + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/teams-notification.service.ts b/packages/bytebot-workflow-orchestrator/src/services/teams-notification.service.ts new file mode 100644 index 000000000..6f89ec61f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/teams-notification.service.ts @@ -0,0 +1,956 @@ +/** + * Microsoft Teams Notification Service + * Phase 8: External Integrations + * + * Sends notifications to Microsoft Teams via Incoming Webhooks. + * Uses Adaptive Cards for rich message formatting. + * + * Features: + * - Rich Adaptive Card message formatting + * - Goal run notifications (started, completed, failed) + * - Batch progress notifications + * - Approval request notifications with action buttons + * - Configurable per-channel settings + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import * as crypto from 'crypto'; + +// Teams configuration interface +export interface TeamsConfig { + webhookUrl: string; +} + +// Adaptive Card interfaces +interface AdaptiveCardElement { + type: string; + text?: string; + size?: string; + weight?: string; + color?: string; + wrap?: boolean; + spacing?: string; + separator?: boolean; + columns?: AdaptiveCardElement[]; + width?: string; + items?: AdaptiveCardElement[]; + facts?: Array<{ title: string; value: string }>; + style?: string; + actions?: AdaptiveCardAction[]; + url?: string; + altText?: string; + horizontalAlignment?: string; + isSubtle?: boolean; +} + +interface AdaptiveCardAction { + type: string; + title: string; + url?: string; + style?: string; +} + +interface AdaptiveCard { + type: 'AdaptiveCard'; + version: string; + body: AdaptiveCardElement[]; + actions?: AdaptiveCardAction[]; + $schema: string; + msteams?: { + width?: string; + }; +} + +interface TeamsMessage { + type: 'message'; + attachments: Array<{ + contentType: string; + contentUrl: string | null; + content: AdaptiveCard; + }>; +} + +// Event types (reuse from Slack) +export enum TeamsEventType { + GOAL_STARTED = 'goal.started', + GOAL_COMPLETED = 'goal.completed', + GOAL_FAILED = 'goal.failed', + GOAL_CANCELLED = 'goal.cancelled', + BATCH_STARTED = 'batch.started', + BATCH_PROGRESS = 'batch.progress', + BATCH_COMPLETED = 'batch.completed', + BATCH_FAILED = 'batch.failed', + APPROVAL_REQUESTED = 'approval.requested', + APPROVAL_APPROVED = 'approval.approved', + APPROVAL_REJECTED = 'approval.rejected', + APPROVAL_EXPIRED = 'approval.expired', + USER_PROMPT_CREATED = 'user_prompt.created', + USER_PROMPT_RESOLVED = 'user_prompt.resolved', + USER_PROMPT_CANCELLED = 'user_prompt.cancelled', +} + +// Event data interfaces (same as Slack) +export interface GoalEventData { + goalRunId: string; + tenantId: string; + goal: string; + status: string; + phase?: string; + templateName?: string; + duration?: number; + error?: string; + stepsCompleted?: number; + totalSteps?: number; + links?: { goalRun?: string }; +} + +export interface BatchEventData { + batchId: string; + tenantId: string; + name: string; + status: string; + totalGoals: number; + completedGoals: number; + failedGoals: number; + progress: number; + links?: { batch?: string }; +} + +export interface ApprovalEventData { + approvalId: string; + tenantId: string; + toolName: string; + riskLevel: string; + summary: string; + decision?: { + status: string; + reviewerId?: string; + reason?: string; + }; + links?: { approval?: string }; +} + +export interface UserPromptEventData { + promptId: string; + tenantId: string; + goalRunId: string; + checklistItemId: string | null; + kind: string; + stepDescription?: string | null; + links?: { goalRun?: string; prompt?: string; desktopTakeover?: string | null }; +} + +export interface TeamsDeliveryResult { + success: boolean; + channelId: string; + eventId: string; + statusCode?: number; + error?: string; + attempts: number; + deliveredAt?: Date; +} + +const RETRY_CONFIG = { + maxAttempts: 3, + baseDelayMs: 1000, + maxDelayMs: 30000, + backoffMultiplier: 2, +}; + +@Injectable() +export class TeamsNotificationService { + private readonly logger = new Logger(TeamsNotificationService.name); + private readonly baseUrl: string; + private readonly timeoutMs: number; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + ) { + this.baseUrl = this.configService.get( + 'APP_BASE_URL', + 'https://app.bytebot.ai', + ); + this.timeoutMs = parseInt( + this.configService.get('TEAMS_TIMEOUT_MS', '10000'), + 10, + ); + this.logger.log('TeamsNotificationService initialized'); + } + + /** + * Send a goal event notification + */ + async sendGoalNotification( + eventType: TeamsEventType, + data: GoalEventData, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Teams channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const card = this.buildGoalCard(eventType, data); + return this.deliverToChannels(channels, eventType, card); + } + + /** + * Send a batch event notification + */ + async sendBatchNotification( + eventType: TeamsEventType, + data: BatchEventData, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Teams channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const card = this.buildBatchCard(eventType, data); + return this.deliverToChannels(channels, eventType, card); + } + + /** + * Send an approval event notification + */ + async sendApprovalNotification( + eventType: TeamsEventType, + data: ApprovalEventData, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Teams channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const card = this.buildApprovalCard(eventType, data); + return this.deliverToChannels(channels, eventType, card); + } + + /** + * Send a user prompt notification (durable WAIT surface) + */ + async sendUserPromptNotification( + eventType: TeamsEventType, + data: UserPromptEventData, + options?: { eventId?: string }, + ): Promise { + const channels = await this.getActiveChannels(data.tenantId, eventType); + + if (channels.length === 0) { + this.logger.debug(`No Teams channels for ${eventType} in tenant ${data.tenantId}`); + return []; + } + + const card = this.buildUserPromptCard(eventType, data); + return this.deliverToChannels(channels, eventType, card, options); + } + + private buildUserPromptCard(eventType: TeamsEventType, data: UserPromptEventData): AdaptiveCard { + const { color, title, icon } = this.getUserPromptEventStyle(eventType); + const goalRunLink = data.links?.goalRun || `${this.baseUrl}/goals/${data.goalRunId}`; + const promptLink = data.links?.prompt || `${this.baseUrl}/prompts/${data.promptId}`; + const desktopTakeoverLink = data.links?.desktopTakeover || null; + + const body: AdaptiveCardElement[] = [ + { + type: 'ColumnSet', + columns: [ + { + type: 'Column', + width: 'auto', + items: [{ type: 'TextBlock', text: icon, size: 'Large' }], + }, + { + type: 'Column', + width: 'stretch', + items: [ + { + type: 'TextBlock', + text: title, + size: 'Large', + weight: 'Bolder', + color, + }, + ], + }, + ], + }, + { + type: 'TextBlock', + text: `Step: ${data.stepDescription || '(no description)'}`, + wrap: true, + spacing: 'Medium', + }, + { + type: 'FactSet', + facts: [ + { title: 'Prompt Kind', value: data.kind }, + { title: 'Goal Run', value: data.goalRunId }, + { title: 'Prompt ID', value: data.promptId }, + ], + }, + ]; + + const actions: AdaptiveCardAction[] = []; + if (promptLink) { + actions.push({ type: 'Action.OpenUrl', title: 'Open Prompt', url: promptLink }); + } + if (goalRunLink) { + actions.push({ type: 'Action.OpenUrl', title: 'Open Goal Run', url: goalRunLink }); + } + if (desktopTakeoverLink) { + actions.push({ type: 'Action.OpenUrl', title: 'Take Over Desktop', url: desktopTakeoverLink }); + } + + return { + type: 'AdaptiveCard', + version: '1.4', + $schema: 'http://adaptivecards.io/schemas/adaptive-card.json', + body, + actions: actions.length > 0 ? actions : undefined, + msteams: { width: 'Full' }, + }; + } + /** + * Build Adaptive Card for goal events + */ + private buildGoalCard(eventType: TeamsEventType, data: GoalEventData): AdaptiveCard { + const { color, title, icon } = this.getGoalEventStyle(eventType); + + const body: AdaptiveCardElement[] = [ + // Header with icon and title + { + type: 'ColumnSet', + columns: [ + { + type: 'Column', + width: 'auto', + items: [ + { + type: 'TextBlock', + text: icon, + size: 'Large', + }, + ], + }, + { + type: 'Column', + width: 'stretch', + items: [ + { + type: 'TextBlock', + text: title, + size: 'Large', + weight: 'Bolder', + color, + }, + ], + }, + ], + }, + // Goal text + { + type: 'TextBlock', + text: this.truncateText(data.goal, 200), + wrap: true, + spacing: 'Medium', + }, + // Facts + { + type: 'FactSet', + facts: [ + { title: 'Status', value: data.status }, + ...(data.phase ? [{ title: 'Phase', value: data.phase }] : []), + ...(data.templateName ? [{ title: 'Template', value: data.templateName }] : []), + ...(data.duration ? [{ title: 'Duration', value: this.formatDuration(data.duration) }] : []), + ...(data.stepsCompleted !== undefined + ? [{ title: 'Steps', value: `${data.stepsCompleted}/${data.totalSteps || '?'}` }] + : []), + ], + spacing: 'Medium', + }, + ]; + + // Add error for failed goals + if (data.error && eventType === TeamsEventType.GOAL_FAILED) { + body.push({ + type: 'TextBlock', + text: `**Error:** ${this.truncateText(data.error, 300)}`, + wrap: true, + color: 'Attention', + spacing: 'Medium', + }); + } + + // Footer with ID and timestamp + body.push({ + type: 'TextBlock', + text: `Goal Run ID: ${data.goalRunId} | ${new Date().toISOString()}`, + size: 'Small', + isSubtle: true, + spacing: 'Medium', + }); + + const actions: AdaptiveCardAction[] = []; + if (data.links?.goalRun) { + actions.push({ + type: 'Action.OpenUrl', + title: 'View Goal Run', + url: data.links.goalRun, + }); + } + + return { + type: 'AdaptiveCard', + version: '1.4', + $schema: 'http://adaptivecards.io/schemas/adaptive-card.json', + msteams: { width: 'Full' }, + body, + actions: actions.length > 0 ? actions : undefined, + }; + } + + /** + * Build Adaptive Card for batch events + */ + private buildBatchCard(eventType: TeamsEventType, data: BatchEventData): AdaptiveCard { + const { color, title, icon } = this.getBatchEventStyle(eventType); + + const progressPercent = Math.min(100, Math.max(0, data.progress)); + + const body: AdaptiveCardElement[] = [ + // Header + { + type: 'ColumnSet', + columns: [ + { + type: 'Column', + width: 'auto', + items: [{ type: 'TextBlock', text: icon, size: 'Large' }], + }, + { + type: 'Column', + width: 'stretch', + items: [ + { + type: 'TextBlock', + text: title, + size: 'Large', + weight: 'Bolder', + color, + }, + ], + }, + ], + }, + // Batch name + { + type: 'TextBlock', + text: `**Batch:** ${data.name}`, + wrap: true, + spacing: 'Medium', + }, + // Progress bar simulation using columns + { + type: 'ColumnSet', + spacing: 'Medium', + columns: [ + { + type: 'Column', + width: `${progressPercent}`, + items: [ + { + type: 'Container', + style: 'emphasis', + items: [{ type: 'TextBlock', text: ' ', size: 'Small' }], + }, + ], + }, + { + type: 'Column', + width: `${100 - progressPercent}`, + items: [{ type: 'TextBlock', text: ' ', size: 'Small' }], + }, + ], + }, + // Facts + { + type: 'FactSet', + facts: [ + { title: 'Status', value: data.status }, + { title: 'Progress', value: `${data.progress}%` }, + { title: 'Completed', value: `${data.completedGoals}/${data.totalGoals}` }, + { title: 'Failed', value: data.failedGoals.toString() }, + ], + spacing: 'Medium', + }, + // Footer + { + type: 'TextBlock', + text: `Batch ID: ${data.batchId} | ${new Date().toISOString()}`, + size: 'Small', + isSubtle: true, + spacing: 'Medium', + }, + ]; + + const actions: AdaptiveCardAction[] = []; + if (data.links?.batch) { + actions.push({ + type: 'Action.OpenUrl', + title: 'View Batch', + url: data.links.batch, + }); + } + + return { + type: 'AdaptiveCard', + version: '1.4', + $schema: 'http://adaptivecards.io/schemas/adaptive-card.json', + msteams: { width: 'Full' }, + body, + actions: actions.length > 0 ? actions : undefined, + }; + } + + /** + * Build Adaptive Card for approval events + */ + private buildApprovalCard(eventType: TeamsEventType, data: ApprovalEventData): AdaptiveCard { + const { color, title, icon } = this.getApprovalEventStyle(eventType); + + const body: AdaptiveCardElement[] = [ + // Header + { + type: 'ColumnSet', + columns: [ + { + type: 'Column', + width: 'auto', + items: [{ type: 'TextBlock', text: icon, size: 'Large' }], + }, + { + type: 'Column', + width: 'stretch', + items: [ + { + type: 'TextBlock', + text: title, + size: 'Large', + weight: 'Bolder', + color, + }, + ], + }, + ], + }, + // Summary + { + type: 'TextBlock', + text: `**Action:** ${data.summary}`, + wrap: true, + spacing: 'Medium', + }, + // Facts + { + type: 'FactSet', + facts: [ + { title: 'Tool', value: data.toolName }, + { title: 'Risk Level', value: `${this.getRiskIcon(data.riskLevel)} ${data.riskLevel}` }, + ...(data.decision + ? [ + { title: 'Decision', value: data.decision.status }, + ...(data.decision.reviewerId + ? [{ title: 'Reviewer', value: data.decision.reviewerId }] + : []), + ...(data.decision.reason + ? [{ title: 'Reason', value: data.decision.reason }] + : []), + ] + : []), + ], + spacing: 'Medium', + }, + // Footer + { + type: 'TextBlock', + text: `Approval ID: ${data.approvalId} | ${new Date().toISOString()}`, + size: 'Small', + isSubtle: true, + spacing: 'Medium', + }, + ]; + + const actions: AdaptiveCardAction[] = []; + if (data.links?.approval) { + actions.push({ + type: 'Action.OpenUrl', + title: eventType === TeamsEventType.APPROVAL_REQUESTED ? 'Review & Approve' : 'View Details', + url: data.links.approval, + style: eventType === TeamsEventType.APPROVAL_REQUESTED ? 'destructive' : 'default', + }); + } + + return { + type: 'AdaptiveCard', + version: '1.4', + $schema: 'http://adaptivecards.io/schemas/adaptive-card.json', + msteams: { width: 'Full' }, + body, + actions: actions.length > 0 ? actions : undefined, + }; + } + + /** + * Get active Teams channels + */ + private async getActiveChannels( + tenantId: string, + eventType: TeamsEventType, + ): Promise> { + try { + const channels = await this.prisma.notificationChannel.findMany({ + where: { + tenantId, + type: 'TEAMS', + enabled: true, + events: { has: eventType }, + }, + }); + + return channels.map((c) => ({ + id: c.id, + config: c.config as unknown as TeamsConfig, + })); + } catch (error: any) { + if (error.code === 'P2021' || error.message?.includes('does not exist')) { + return []; + } + throw error; + } + } + + /** + * Deliver to multiple channels + */ + private async deliverToChannels( + channels: Array<{ id: string; config: TeamsConfig }>, + eventType: TeamsEventType, + card: AdaptiveCard, + options?: { eventId?: string }, + ): Promise { + const eventId = options?.eventId ?? this.generateEventId(); + const results: TeamsDeliveryResult[] = []; + + for (const channel of channels) { + const result = await this.deliverToChannel(channel, eventType, eventId, card); + results.push(result); + await this.recordDelivery(channel.id, eventId, eventType, result, card); + } + + return results; + } + + /** + * Deliver to single channel with retry + */ + private async deliverToChannel( + channel: { id: string; config: TeamsConfig }, + eventType: TeamsEventType, + eventId: string, + card: AdaptiveCard, + ): Promise { + let lastError: string | undefined; + let lastStatusCode: number | undefined; + let attempts = 0; + + const message: TeamsMessage = { + type: 'message', + attachments: [ + { + contentType: 'application/vnd.microsoft.card.adaptive', + contentUrl: null, + content: card, + }, + ], + }; + + for (let attempt = 1; attempt <= RETRY_CONFIG.maxAttempts; attempt++) { + attempts = attempt; + + try { + const result = await this.sendToTeams(channel.config.webhookUrl, message); + + if (result.success) { + this.logger.log(`Teams notification delivered: ${channel.id} (attempt ${attempt})`); + return { + success: true, + channelId: channel.id, + eventId, + statusCode: result.statusCode, + attempts, + deliveredAt: new Date(), + }; + } + + lastStatusCode = result.statusCode; + lastError = result.error; + + if (result.statusCode && result.statusCode >= 400 && result.statusCode < 500) { + break; + } + } catch (error: any) { + lastError = error.message; + this.logger.warn(`Teams delivery failed (attempt ${attempt}): ${error.message}`); + } + + if (attempt < RETRY_CONFIG.maxAttempts) { + const delay = Math.min( + RETRY_CONFIG.baseDelayMs * Math.pow(RETRY_CONFIG.backoffMultiplier, attempt - 1), + RETRY_CONFIG.maxDelayMs, + ); + await this.sleep(delay); + } + } + + this.logger.error(`Teams delivery failed after ${attempts} attempts: ${lastError}`); + return { + success: false, + channelId: channel.id, + eventId, + statusCode: lastStatusCode, + error: lastError, + attempts, + }; + } + + /** + * Send to Teams webhook + */ + private async sendToTeams( + webhookUrl: string, + message: TeamsMessage, + ): Promise<{ success: boolean; statusCode?: number; error?: string }> { + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs); + + try { + const response = await fetch(webhookUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(message), + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (response.ok) { + return { success: true, statusCode: response.status }; + } + + const responseText = await response.text().catch(() => ''); + return { + success: false, + statusCode: response.status, + error: `HTTP ${response.status}: ${responseText.substring(0, 200)}`, + }; + } catch (error: any) { + clearTimeout(timeoutId); + + if (error.name === 'AbortError') { + return { success: false, error: `Timeout after ${this.timeoutMs}ms` }; + } + + return { success: false, error: error.message }; + } + } + + /** + * Record delivery + */ + private async recordDelivery( + channelId: string, + eventId: string, + eventType: TeamsEventType, + result: TeamsDeliveryResult, + payload: AdaptiveCard, + ): Promise { + try { + await this.prisma.notificationDelivery.create({ + data: { + channelId, + eventId, + eventType, + success: result.success, + statusCode: result.statusCode, + error: result.error, + attempts: result.attempts, + payload: payload as any, + deliveredAt: result.deliveredAt, + }, + }); + } catch (error: any) { + this.logger.warn(`Failed to record delivery: ${error.message}`); + } + } + + /** + * Test channel + */ + async testChannel(channelId: string): Promise { + const channel = await this.prisma.notificationChannel.findUnique({ + where: { id: channelId }, + }); + + if (!channel || channel.type !== 'TEAMS') { + throw new Error('Teams channel not found'); + } + + const config = channel.config as unknown as TeamsConfig; + const eventId = this.generateEventId(); + + const testCard: AdaptiveCard = { + type: 'AdaptiveCard', + version: '1.4', + $schema: 'http://adaptivecards.io/schemas/adaptive-card.json', + body: [ + { + type: 'TextBlock', + text: '✅ ByteBot Connection Test', + size: 'Large', + weight: 'Bolder', + color: 'Good', + }, + { + type: 'TextBlock', + text: 'This is a test notification from ByteBot. If you see this message, your Teams integration is working correctly!', + wrap: true, + spacing: 'Medium', + }, + { + type: 'TextBlock', + text: `Channel: ${channel.name} | Tenant: ${channel.tenantId} | ${new Date().toISOString()}`, + size: 'Small', + isSubtle: true, + spacing: 'Medium', + }, + ], + }; + + const result = await this.deliverToChannel( + { id: channelId, config }, + TeamsEventType.GOAL_STARTED, + eventId, + testCard, + ); + + if (result.success) { + await this.prisma.notificationChannel.update({ + where: { id: channelId }, + data: { verified: true }, + }); + } + + return result; + } + + // Helper methods + + private getGoalEventStyle(eventType: TeamsEventType): { color: string; title: string; icon: string } { + switch (eventType) { + case TeamsEventType.GOAL_STARTED: + return { color: 'Accent', title: 'Goal Started', icon: '🚀' }; + case TeamsEventType.GOAL_COMPLETED: + return { color: 'Good', title: 'Goal Completed', icon: '✅' }; + case TeamsEventType.GOAL_FAILED: + return { color: 'Attention', title: 'Goal Failed', icon: '❌' }; + case TeamsEventType.GOAL_CANCELLED: + return { color: 'Default', title: 'Goal Cancelled', icon: '🚫' }; + default: + return { color: 'Default', title: 'Goal Update', icon: 'ℹ️' }; + } + } + + private getBatchEventStyle(eventType: TeamsEventType): { color: string; title: string; icon: string } { + switch (eventType) { + case TeamsEventType.BATCH_STARTED: + return { color: 'Accent', title: 'Batch Started', icon: '📦' }; + case TeamsEventType.BATCH_PROGRESS: + return { color: 'Warning', title: 'Batch Progress', icon: '⏳' }; + case TeamsEventType.BATCH_COMPLETED: + return { color: 'Good', title: 'Batch Completed', icon: '🎉' }; + case TeamsEventType.BATCH_FAILED: + return { color: 'Attention', title: 'Batch Failed', icon: '⚠️' }; + default: + return { color: 'Default', title: 'Batch Update', icon: '📦' }; + } + } + + private getApprovalEventStyle(eventType: TeamsEventType): { color: string; title: string; icon: string } { + switch (eventType) { + case TeamsEventType.APPROVAL_REQUESTED: + return { color: 'Warning', title: 'Approval Required', icon: '✋' }; + case TeamsEventType.APPROVAL_APPROVED: + return { color: 'Good', title: 'Approval Granted', icon: '👍' }; + case TeamsEventType.APPROVAL_REJECTED: + return { color: 'Attention', title: 'Approval Rejected', icon: '👎' }; + case TeamsEventType.APPROVAL_EXPIRED: + return { color: 'Default', title: 'Approval Expired', icon: '⏰' }; + default: + return { color: 'Default', title: 'Approval Update', icon: '📋' }; + } + } + + private getUserPromptEventStyle(eventType: TeamsEventType): { color: string; title: string; icon: string } { + switch (eventType) { + case TeamsEventType.USER_PROMPT_CREATED: + return { color: 'Warning', title: 'User Input Required', icon: '💬' }; + case TeamsEventType.USER_PROMPT_RESOLVED: + return { color: 'Good', title: 'User Input Resolved', icon: '✅' }; + case TeamsEventType.USER_PROMPT_CANCELLED: + return { color: 'Default', title: 'User Prompt Cancelled', icon: '⛔' }; + default: + return { color: 'Default', title: 'User Prompt Update', icon: '💬' }; + } + } + + private getRiskIcon(riskLevel: string): string { + switch (riskLevel?.toUpperCase()) { + case 'CRITICAL': return '🚨'; + case 'HIGH': return '⚠️'; + case 'MEDIUM': return '🔶'; + case 'LOW': return '🔷'; + default: return '❓'; + } + } + + private formatDuration(ms: number): string { + if (ms < 1000) return `${ms}ms`; + if (ms < 60000) return `${(ms / 1000).toFixed(1)}s`; + if (ms < 3600000) return `${Math.floor(ms / 60000)}m ${Math.floor((ms % 60000) / 1000)}s`; + return `${Math.floor(ms / 3600000)}h ${Math.floor((ms % 3600000) / 60000)}m`; + } + + private truncateText(text: string, maxLength: number): string { + if (text.length <= maxLength) return text; + return text.substring(0, maxLength - 3) + '...'; + } + + private generateEventId(): string { + const timestamp = Date.now().toString(36); + const random = crypto.randomBytes(8).toString('hex'); + return `teams_${timestamp}_${random}`; + } + + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/template-generation.service.ts b/packages/bytebot-workflow-orchestrator/src/services/template-generation.service.ts new file mode 100644 index 000000000..a9c565a5a --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/template-generation.service.ts @@ -0,0 +1,703 @@ +/** + * Template Generation Service + * Phase 9 (v5.4.0): Advanced AI Features + * + * Responsibilities: + * - Analyze completed goal runs to identify patterns + * - Automatically generate reusable templates from similar goals + * - Extract variables from goal patterns + * - Score and rank template quality + * - Suggest template improvements + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { TemplateVariable } from './goal-template.service'; +import { z } from 'zod'; + +// Zod schemas for LLM output validation +const ExtractedVariableSchema = z.object({ + name: z.string(), + type: z.enum(['string', 'number', 'boolean', 'select']), + required: z.boolean(), + description: z.string().optional(), + exampleValues: z.array(z.union([z.string(), z.number()])).optional(), + options: z.array(z.string()).optional(), +}); + +const GeneratedTemplateSchema = z.object({ + name: z.string(), + description: z.string(), + category: z.string().optional(), + goalPattern: z.string(), + variables: z.array(ExtractedVariableSchema), + checklistTemplate: z.array(z.object({ + order: z.number(), + descriptionTemplate: z.string(), + expectedOutcomeTemplate: z.string().optional(), + })).optional(), + confidence: z.number().min(0).max(1), + sourceGoalIds: z.array(z.string()).optional(), +}); + +type GeneratedTemplate = z.infer; + +// Public interfaces +export interface TemplateGenerationRequest { + tenantId: string; + options?: { + minGoalsForPattern?: number; + similarityThreshold?: number; + maxTemplates?: number; + includeChecklist?: boolean; + }; +} + +export interface TemplateCandidate { + name: string; + description: string; + category?: string; + goalPattern: string; + variables: TemplateVariable[]; + checklistTemplate?: Array<{ + order: number; + descriptionTemplate: string; + expectedOutcomeTemplate?: string; + }>; + confidence: number; + sourceGoalCount: number; + sourceGoalIds: string[]; + estimatedUsage: number; + qualityScore: number; +} + +export interface TemplateGenerationResult { + candidates: TemplateCandidate[]; + analyzedGoalsCount: number; + patternsFound: number; + processingTimeMs: number; +} + +export interface GoalCluster { + representative: string; + goals: Array<{ id: string; goal: string; createdAt: Date }>; + similarity: number; +} + +@Injectable() +export class TemplateGenerationService { + private readonly logger = new Logger(TemplateGenerationService.name); + private readonly llmModel: string; + private readonly llmApiKey: string; + private readonly llmApiUrl: string; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.llmModel = this.configService.get('LLM_MODEL', 'claude-3-5-sonnet-20241022'); + this.llmApiKey = this.configService.get('ANTHROPIC_API_KEY', ''); + this.llmApiUrl = this.configService.get('LLM_API_URL', 'https://api.anthropic.com/v1/messages'); + + this.logger.log('TemplateGenerationService initialized'); + } + + /** + * Analyze completed goals and generate template candidates + */ + async generateTemplatesFromHistory( + request: TemplateGenerationRequest, + ): Promise { + const startTime = Date.now(); + const { tenantId, options } = request; + + const minGoals = options?.minGoalsForPattern || 3; + const similarityThreshold = options?.similarityThreshold || 0.6; + const maxTemplates = options?.maxTemplates || 10; + + this.logger.log(`Generating templates for tenant ${tenantId}`); + + // Fetch completed goal runs + const completedGoals = await this.prisma.goalRun.findMany({ + where: { + tenantId, + status: 'COMPLETED', + }, + select: { + id: true, + goal: true, + createdAt: true, + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + orderBy: { createdAt: 'desc' }, + take: 500, // Analyze last 500 completed goals + }); + + if (completedGoals.length < minGoals) { + return { + candidates: [], + analyzedGoalsCount: completedGoals.length, + patternsFound: 0, + processingTimeMs: Date.now() - startTime, + }; + } + + // Cluster similar goals + const clusters = this.clusterSimilarGoals( + completedGoals.map(g => ({ id: g.id, goal: g.goal, createdAt: g.createdAt })), + similarityThreshold, + ); + + // Filter clusters with enough goals + const significantClusters = clusters.filter(c => c.goals.length >= minGoals); + + this.logger.log(`Found ${significantClusters.length} significant clusters from ${completedGoals.length} goals`); + + // Generate templates from clusters + const candidates: TemplateCandidate[] = []; + + for (const cluster of significantClusters.slice(0, maxTemplates)) { + try { + const template = await this.generateTemplateFromCluster( + cluster, + completedGoals, + options?.includeChecklist !== false, + ); + + if (template) { + candidates.push(template); + } + } catch (error: any) { + this.logger.warn(`Failed to generate template from cluster: ${error.message}`); + } + } + + // Sort by quality score + candidates.sort((a, b) => b.qualityScore - a.qualityScore); + + // Emit event + this.eventEmitter.emit('template-generation.completed', { + tenantId, + candidatesCount: candidates.length, + analyzedGoals: completedGoals.length, + }); + + return { + candidates: candidates.slice(0, maxTemplates), + analyzedGoalsCount: completedGoals.length, + patternsFound: significantClusters.length, + processingTimeMs: Date.now() - startTime, + }; + } + + /** + * Generate a template from a single goal + */ + async generateTemplateFromGoal( + tenantId: string, + goalRunId: string, + ): Promise { + const goalRun = await this.prisma.goalRun.findUnique({ + where: { id: goalRunId }, + include: { + planVersions: { + orderBy: { version: 'desc' }, + take: 1, + include: { + checklistItems: { + orderBy: { order: 'asc' }, + }, + }, + }, + }, + }); + + if (!goalRun) { + return null; + } + + const prompt = this.buildSingleGoalTemplatePrompt( + goalRun.goal, + goalRun.planVersions[0]?.checklistItems || [], + ); + + try { + const response = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(response); + const validated = GeneratedTemplateSchema.safeParse(parsed); + + if (validated.success) { + return this.transformToCandidate(validated.data, [goalRunId]); + } + + return this.generateFallbackTemplate(goalRun.goal, goalRunId); + } catch (error: any) { + this.logger.error(`Single goal template generation failed: ${error.message}`); + return this.generateFallbackTemplate(goalRun.goal, goalRunId); + } + } + + /** + * Analyze a goal and suggest variable extraction + */ + async suggestVariables(goal: string): Promise> { + const suggestions: Array<{ + name: string; + value: string; + type: 'string' | 'number' | 'boolean' | 'select'; + confidence: number; + }> = []; + + // Extract URLs + const urlMatch = goal.match(/https?:\/\/[^\s]+/g); + if (urlMatch) { + suggestions.push({ + name: 'url', + value: urlMatch[0], + type: 'string', + confidence: 0.95, + }); + } + + // Extract email addresses + const emailMatch = goal.match(/[\w.-]+@[\w.-]+\.\w+/g); + if (emailMatch) { + suggestions.push({ + name: 'email', + value: emailMatch[0], + type: 'string', + confidence: 0.95, + }); + } + + // Extract numbers + const numberMatch = goal.match(/\b\d+\b/g); + if (numberMatch) { + numberMatch.forEach((num, idx) => { + suggestions.push({ + name: idx === 0 ? 'count' : `count${idx + 1}`, + value: num, + type: 'number', + confidence: 0.8, + }); + }); + } + + // Extract dates + const datePatterns = [ + /\d{4}-\d{2}-\d{2}/, + /\d{1,2}\/\d{1,2}\/\d{2,4}/, + /(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[a-z]* \d{1,2},? \d{4}/i, + ]; + for (const pattern of datePatterns) { + const match = goal.match(pattern); + if (match) { + suggestions.push({ + name: 'date', + value: match[0], + type: 'string', + confidence: 0.9, + }); + break; + } + } + + // Extract file paths + const pathMatch = goal.match(/(?:\/[\w.-]+)+\/?|(?:[A-Z]:)?\\(?:[\w.-]+\\)+[\w.-]*/g); + if (pathMatch) { + suggestions.push({ + name: 'filePath', + value: pathMatch[0], + type: 'string', + confidence: 0.85, + }); + } + + // Extract quoted strings + const quotedMatch = goal.match(/"([^"]+)"|'([^']+)'/g); + if (quotedMatch) { + quotedMatch.forEach((quoted, idx) => { + const value = quoted.replace(/['"]/g, ''); + suggestions.push({ + name: idx === 0 ? 'text' : `text${idx + 1}`, + value, + type: 'string', + confidence: 0.75, + }); + }); + } + + return suggestions; + } + + /** + * Get template quality score + */ + calculateTemplateQuality(template: Partial): number { + let score = 0; + + // Variable count (2-5 is ideal) + const varCount = template.variables?.length || 0; + if (varCount >= 2 && varCount <= 5) { + score += 0.25; + } else if (varCount === 1 || varCount === 6) { + score += 0.15; + } else if (varCount > 0) { + score += 0.1; + } + + // Has description + if (template.description && template.description.length > 20) { + score += 0.15; + } + + // Has category + if (template.category) { + score += 0.1; + } + + // Goal pattern length (50-200 chars is good) + const patternLen = template.goalPattern?.length || 0; + if (patternLen >= 50 && patternLen <= 200) { + score += 0.2; + } else if (patternLen >= 30) { + score += 0.1; + } + + // Has checklist template + if (template.checklistTemplate && template.checklistTemplate.length > 0) { + score += 0.2; + } + + // Source goal count (more sources = more validated) + const sourceCount = template.sourceGoalCount || 1; + if (sourceCount >= 5) { + score += 0.1; + } else if (sourceCount >= 3) { + score += 0.05; + } + + return Math.round(score * 100) / 100; + } + + // Private methods + + private clusterSimilarGoals( + goals: Array<{ id: string; goal: string; createdAt: Date }>, + threshold: number, + ): GoalCluster[] { + const clusters: GoalCluster[] = []; + const assigned = new Set(); + + for (const goal of goals) { + if (assigned.has(goal.id)) continue; + + const cluster: GoalCluster = { + representative: goal.goal, + goals: [goal], + similarity: 1, + }; + + // Find similar goals + for (const other of goals) { + if (other.id === goal.id || assigned.has(other.id)) continue; + + const similarity = this.calculateSimilarity(goal.goal, other.goal); + if (similarity >= threshold) { + cluster.goals.push(other); + cluster.similarity = Math.min(cluster.similarity, similarity); + assigned.add(other.id); + } + } + + if (cluster.goals.length > 1) { + assigned.add(goal.id); + clusters.push(cluster); + } + } + + // Sort by cluster size + clusters.sort((a, b) => b.goals.length - a.goals.length); + + return clusters; + } + + private calculateSimilarity(goal1: string, goal2: string): number { + const words1 = new Set(goal1.toLowerCase().split(/\s+/).filter(w => w.length > 2)); + const words2 = new Set(goal2.toLowerCase().split(/\s+/).filter(w => w.length > 2)); + + const intersection = [...words1].filter(w => words2.has(w)).length; + const union = new Set([...words1, ...words2]).size; + + if (union === 0) return 0; + + // Jaccard similarity + const jaccard = intersection / union; + + // Also consider structural similarity (same length range) + const lenRatio = Math.min(goal1.length, goal2.length) / Math.max(goal1.length, goal2.length); + + return (jaccard * 0.7 + lenRatio * 0.3); + } + + private async generateTemplateFromCluster( + cluster: GoalCluster, + allGoals: any[], + includeChecklist: boolean, + ): Promise { + const goalTexts = cluster.goals.map(g => g.goal); + const goalIds = cluster.goals.map(g => g.id); + + // Get checklist items for the cluster + const checklistItems: any[] = []; + if (includeChecklist) { + const goalData = allGoals.filter(g => goalIds.includes(g.id)); + for (const g of goalData.slice(0, 3)) { + if (g.planVersions?.[0]?.checklistItems) { + checklistItems.push(...g.planVersions[0].checklistItems); + } + } + } + + const prompt = this.buildClusterTemplatePrompt(goalTexts, checklistItems); + + try { + const response = await this.callLLM(prompt); + const parsed = this.parseLLMResponse(response); + const validated = GeneratedTemplateSchema.safeParse(parsed); + + if (validated.success) { + const candidate = this.transformToCandidate(validated.data, goalIds); + candidate.sourceGoalCount = cluster.goals.length; + return candidate; + } + + return null; + } catch (error: any) { + this.logger.warn(`Cluster template generation failed: ${error.message}`); + return null; + } + } + + private buildClusterTemplatePrompt(goals: string[], checklistItems: any[]): string { + const goalsText = goals.slice(0, 10).map((g, i) => `${i + 1}. ${g}`).join('\n'); + + const checklistText = checklistItems.length > 0 + ? `\nEXAMPLE CHECKLIST ITEMS:\n${checklistItems.slice(0, 5).map(c => `- ${c.description}`).join('\n')}` + : ''; + + return `Analyze these similar goals and generate a reusable template: + +SIMILAR GOALS: +${goalsText} +${checklistText} + +TASK: +1. Identify the common pattern across these goals +2. Extract variable parts that differ between goals +3. Create a template with {{variable}} placeholders +4. Suggest variable types and descriptions + +OUTPUT FORMAT (JSON): +{ + "name": "Short descriptive name for template", + "description": "What this template does", + "category": "category name", + "goalPattern": "Template with {{variable}} placeholders", + "variables": [ + { + "name": "variableName", + "type": "string|number|boolean|select", + "required": true, + "description": "What this variable represents", + "exampleValues": ["example1", "example2"] + } + ], + "checklistTemplate": [ + { + "order": 1, + "descriptionTemplate": "Step with {{variable}}", + "expectedOutcomeTemplate": "Expected result" + } + ], + "confidence": 0.0-1.0 +} + +Generate the template:`; + } + + private buildSingleGoalTemplatePrompt(goal: string, checklistItems: any[]): string { + const checklistText = checklistItems.length > 0 + ? `\nCHECKLIST ITEMS:\n${checklistItems.map(c => `- ${c.description}`).join('\n')}` + : ''; + + return `Convert this specific goal into a reusable template: + +GOAL: "${goal}" +${checklistText} + +TASK: +1. Identify parts that could be parameterized +2. Create a template with {{variable}} placeholders +3. Define variable types + +OUTPUT FORMAT (JSON): +{ + "name": "Short template name", + "description": "Template description", + "category": "category", + "goalPattern": "Template with {{variables}}", + "variables": [ + { + "name": "var", + "type": "string", + "required": true, + "description": "description" + } + ], + "checklistTemplate": [], + "confidence": 0.0-1.0 +} + +Generate the template:`; + } + + private async callLLM(prompt: string): Promise { + if (!this.llmApiKey) { + this.logger.warn('No LLM API key configured, using mock response'); + return this.getMockLLMResponse(prompt); + } + + const response = await fetch(this.llmApiUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-api-key': this.llmApiKey, + 'anthropic-version': '2023-06-01', + }, + body: JSON.stringify({ + model: this.llmModel, + max_tokens: 2000, + messages: [{ role: 'user', content: prompt }], + }), + }); + + if (!response.ok) { + throw new Error(`LLM API error: ${response.status} ${response.statusText}`); + } + + const data = await response.json(); + return data.content[0].text; + } + + private parseLLMResponse(response: string): any { + const jsonMatch = response.match(/```(?:json)?\s*([\s\S]*?)```/); + const jsonStr = jsonMatch ? jsonMatch[1] : response; + + try { + return JSON.parse(jsonStr.trim()); + } catch { + const objectMatch = response.match(/\{[\s\S]*\}/); + if (objectMatch) { + return JSON.parse(objectMatch[0]); + } + throw new Error('Failed to parse LLM response as JSON'); + } + } + + private transformToCandidate( + generated: GeneratedTemplate, + sourceGoalIds: string[], + ): TemplateCandidate { + const candidate: TemplateCandidate = { + name: generated.name, + description: generated.description, + category: generated.category, + goalPattern: generated.goalPattern, + variables: generated.variables.map(v => ({ + name: v.name, + type: v.type, + required: v.required, + description: v.description, + options: v.options, + })), + checklistTemplate: generated.checklistTemplate, + confidence: generated.confidence, + sourceGoalCount: sourceGoalIds.length, + sourceGoalIds, + estimatedUsage: sourceGoalIds.length * 2, // Estimate based on pattern frequency + qualityScore: 0, + }; + + candidate.qualityScore = this.calculateTemplateQuality(candidate); + + return candidate; + } + + private generateFallbackTemplate(goal: string, goalId: string): TemplateCandidate { + // Simple variable extraction + const variables: TemplateVariable[] = []; + let pattern = goal; + + // Extract URLs + const urlMatch = goal.match(/https?:\/\/[^\s]+/); + if (urlMatch) { + variables.push({ name: 'url', type: 'string', required: true, description: 'Target URL' }); + pattern = pattern.replace(urlMatch[0], '{{url}}'); + } + + // Extract numbers + const numMatch = goal.match(/\b\d+\b/); + if (numMatch) { + variables.push({ name: 'count', type: 'number', required: true, description: 'Count value' }); + pattern = pattern.replace(numMatch[0], '{{count}}'); + } + + return { + name: `Template from: ${goal.substring(0, 30)}...`, + description: 'Auto-generated template', + goalPattern: pattern, + variables, + confidence: 0.5, + sourceGoalCount: 1, + sourceGoalIds: [goalId], + estimatedUsage: 1, + qualityScore: 0.3, + }; + } + + private getMockLLMResponse(prompt: string): string { + return JSON.stringify({ + name: 'Sample Generated Template', + description: 'A template generated from similar goals', + category: 'general', + goalPattern: 'Perform {{action}} on {{target}} with {{options}}', + variables: [ + { name: 'action', type: 'string', required: true, description: 'The action to perform' }, + { name: 'target', type: 'string', required: true, description: 'The target of the action' }, + { name: 'options', type: 'string', required: false, description: 'Additional options' }, + ], + checklistTemplate: [ + { order: 1, descriptionTemplate: 'Prepare {{target}}', expectedOutcomeTemplate: 'Target ready' }, + { order: 2, descriptionTemplate: 'Execute {{action}}', expectedOutcomeTemplate: 'Action completed' }, + ], + confidence: 0.75, + }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/template-seed.service.ts b/packages/bytebot-workflow-orchestrator/src/services/template-seed.service.ts new file mode 100644 index 000000000..50b7f7d13 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/template-seed.service.ts @@ -0,0 +1,525 @@ +/** + * Template Seed Service + * Phase 7: Enhanced Features + * + * Seeds built-in goal templates during application startup. + * Built-in templates provide common automation patterns that users can + * customize and use immediately. + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { PrismaService } from './prisma.service'; +import { createId } from '@paralleldrive/cuid2'; + +// Built-in template tenant ID (system-wide templates) +const BUILTIN_TENANT_ID = '__builtin__'; + +interface BuiltInTemplate { + name: string; + description: string; + category: string; + tags: string[]; + icon: string; + goalPattern: string; + variables: Array<{ + name: string; + type: 'string' | 'number' | 'boolean' | 'select'; + required: boolean; + default?: string | number | boolean; + description: string; + options?: string[]; + validation?: { + minLength?: number; + maxLength?: number; + pattern?: string; + }; + }>; + checklistTemplate: Array<{ + order: number; + descriptionTemplate: string; + expectedOutcomeTemplate?: string; + suggestedTools?: string[]; + requiresDesktop?: boolean; + }>; +} + +@Injectable() +export class TemplateSeedService implements OnModuleInit { + private readonly logger = new Logger(TemplateSeedService.name); + + constructor(private prisma: PrismaService) {} + + async onModuleInit() { + await this.seedBuiltInTemplates(); + } + + /** + * Seed built-in templates if they don't exist + */ + async seedBuiltInTemplates(): Promise { + this.logger.log('Checking for built-in templates...'); + + const existingCount = await this.prisma.goalTemplate.count({ + where: { isBuiltIn: true }, + }); + + if (existingCount > 0) { + this.logger.log(`Found ${existingCount} existing built-in templates, skipping seed`); + return; + } + + this.logger.log('Seeding built-in templates...'); + + const templates = this.getBuiltInTemplates(); + let seeded = 0; + + for (const template of templates) { + try { + await this.prisma.goalTemplate.create({ + data: { + id: createId(), + tenantId: BUILTIN_TENANT_ID, + name: template.name, + description: template.description, + category: template.category, + tags: template.tags, + icon: template.icon, + goalPattern: template.goalPattern, + defaultConstraints: {}, + variables: template.variables as any, + checklistTemplate: template.checklistTemplate as any, + version: '1.0.0', + isLatest: true, + isPublished: true, + isBuiltIn: true, + usageCount: 0, + createdBy: 'system', + }, + }); + seeded++; + this.logger.debug(`Seeded template: ${template.name}`); + } catch (error: any) { + this.logger.warn(`Failed to seed template "${template.name}": ${error.message}`); + } + } + + this.logger.log(`Seeded ${seeded} built-in templates`); + } + + /** + * Get list of built-in templates + */ + private getBuiltInTemplates(): BuiltInTemplate[] { + return [ + // Web Research Template + { + name: 'Web Research', + description: 'Research a topic online and compile findings into a structured summary', + category: 'Research', + tags: ['research', 'web', 'summary', 'information'], + icon: 'search', + goalPattern: 'Research {{topic}} and compile a summary including {{aspects}}', + variables: [ + { + name: 'topic', + type: 'string', + required: true, + description: 'The topic or subject to research', + validation: { minLength: 3, maxLength: 200 }, + }, + { + name: 'aspects', + type: 'string', + required: true, + default: 'key facts, recent developments, and notable sources', + description: 'Specific aspects to cover in the research', + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Open a web browser and search for "{{topic}}"', + expectedOutcomeTemplate: 'Search results page displayed with relevant results', + suggestedTools: ['browser'], + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Visit and read the top 3-5 most relevant sources', + expectedOutcomeTemplate: 'Key information gathered from multiple sources', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Compile findings into a structured summary covering {{aspects}}', + expectedOutcomeTemplate: 'Summary document created with organized information', + requiresDesktop: true, + }, + ], + }, + + // Form Submission Template + { + name: 'Form Submission', + description: 'Fill out and submit a web form with provided data', + category: 'Data Entry', + tags: ['form', 'submit', 'data-entry', 'automation'], + icon: 'edit-3', + goalPattern: 'Navigate to {{url}} and fill out the form with the following data: {{formData}}', + variables: [ + { + name: 'url', + type: 'string', + required: true, + description: 'The URL of the form to fill out', + validation: { pattern: '^https?://.+' }, + }, + { + name: 'formData', + type: 'string', + required: true, + description: 'The data to enter (field: value pairs)', + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Navigate to {{url}}', + expectedOutcomeTemplate: 'Form page loaded successfully', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Locate and fill in form fields with provided data: {{formData}}', + expectedOutcomeTemplate: 'All form fields populated with correct values', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Submit the form', + expectedOutcomeTemplate: 'Form submitted successfully with confirmation', + requiresDesktop: true, + }, + ], + }, + + // Data Extraction Template + { + name: 'Data Extraction', + description: 'Extract specific data from a webpage and save it', + category: 'Data Collection', + tags: ['scraping', 'extraction', 'data', 'web'], + icon: 'download', + goalPattern: 'Extract {{dataType}} from {{url}} and save to {{outputFormat}}', + variables: [ + { + name: 'url', + type: 'string', + required: true, + description: 'The URL to extract data from', + validation: { pattern: '^https?://.+' }, + }, + { + name: 'dataType', + type: 'string', + required: true, + description: 'Type of data to extract (e.g., prices, contact info, product details)', + }, + { + name: 'outputFormat', + type: 'select', + required: true, + default: 'text file', + description: 'Format for the extracted data', + options: ['text file', 'CSV', 'JSON', 'clipboard'], + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Navigate to {{url}}', + expectedOutcomeTemplate: 'Page loaded with target data visible', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Identify and extract {{dataType}} from the page', + expectedOutcomeTemplate: 'Target data identified and captured', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Format and save extracted data as {{outputFormat}}', + expectedOutcomeTemplate: 'Data saved in requested format', + requiresDesktop: true, + }, + ], + }, + + // Screenshot Capture Template + { + name: 'Screenshot Capture', + description: 'Navigate to a webpage and capture screenshots', + category: 'Documentation', + tags: ['screenshot', 'capture', 'documentation', 'visual'], + icon: 'camera', + goalPattern: 'Take {{screenshotType}} screenshot(s) of {{url}}', + variables: [ + { + name: 'url', + type: 'string', + required: true, + description: 'The URL to screenshot', + validation: { pattern: '^https?://.+' }, + }, + { + name: 'screenshotType', + type: 'select', + required: true, + default: 'full page', + description: 'Type of screenshot to capture', + options: ['full page', 'viewport only', 'specific element'], + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Navigate to {{url}}', + expectedOutcomeTemplate: 'Page fully loaded and rendered', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Wait for any dynamic content to load', + expectedOutcomeTemplate: 'All content visible and stable', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Capture {{screenshotType}} screenshot', + expectedOutcomeTemplate: 'Screenshot captured and saved', + requiresDesktop: true, + }, + ], + }, + + // Email Composition Template + { + name: 'Email Composition', + description: 'Compose and send an email via webmail', + category: 'Communication', + tags: ['email', 'communication', 'compose', 'send'], + icon: 'mail', + goalPattern: 'Compose and send an email to {{recipient}} about {{subject}}', + variables: [ + { + name: 'recipient', + type: 'string', + required: true, + description: 'Email recipient address', + validation: { pattern: '^[^@]+@[^@]+\\.[^@]+$' }, + }, + { + name: 'subject', + type: 'string', + required: true, + description: 'Email subject line', + validation: { minLength: 1, maxLength: 200 }, + }, + { + name: 'emailBody', + type: 'string', + required: true, + description: 'The content of the email', + }, + { + name: 'webmailProvider', + type: 'select', + required: true, + default: 'Gmail', + description: 'Webmail provider to use', + options: ['Gmail', 'Outlook', 'Yahoo Mail'], + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Open {{webmailProvider}} in the browser', + expectedOutcomeTemplate: 'Webmail interface loaded (login if needed)', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Click compose/new email button', + expectedOutcomeTemplate: 'New email composition window open', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Enter recipient: {{recipient}}, subject: {{subject}}', + expectedOutcomeTemplate: 'To and Subject fields populated', + requiresDesktop: true, + }, + { + order: 4, + descriptionTemplate: 'Enter email body content', + expectedOutcomeTemplate: 'Email body composed with provided content', + requiresDesktop: true, + }, + { + order: 5, + descriptionTemplate: 'Review and send the email', + expectedOutcomeTemplate: 'Email sent successfully', + requiresDesktop: true, + }, + ], + }, + + // Price Comparison Template + { + name: 'Price Comparison', + description: 'Compare prices for a product across multiple websites', + category: 'Shopping', + tags: ['price', 'comparison', 'shopping', 'research'], + icon: 'dollar-sign', + goalPattern: 'Compare prices for {{product}} across {{sites}}', + variables: [ + { + name: 'product', + type: 'string', + required: true, + description: 'Product name or description to search for', + validation: { minLength: 2, maxLength: 200 }, + }, + { + name: 'sites', + type: 'string', + required: true, + default: 'Amazon, eBay, Walmart', + description: 'Comma-separated list of sites to check', + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Search for "{{product}}" on each site: {{sites}}', + expectedOutcomeTemplate: 'Search results displayed on each site', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Record the price, shipping cost, and availability from each site', + expectedOutcomeTemplate: 'Price data collected from all sites', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Compile comparison summary with best deal recommendation', + expectedOutcomeTemplate: 'Comparison table created with recommendation', + requiresDesktop: true, + }, + ], + }, + + // Social Media Post Template + { + name: 'Social Media Post', + description: 'Create and publish a post on social media', + category: 'Social Media', + tags: ['social', 'post', 'marketing', 'content'], + icon: 'share-2', + goalPattern: 'Post "{{content}}" to {{platform}}', + variables: [ + { + name: 'platform', + type: 'select', + required: true, + default: 'Twitter/X', + description: 'Social media platform to post on', + options: ['Twitter/X', 'LinkedIn', 'Facebook'], + }, + { + name: 'content', + type: 'string', + required: true, + description: 'The content to post', + validation: { minLength: 1, maxLength: 2000 }, + }, + { + name: 'includeImage', + type: 'boolean', + required: false, + default: false, + description: 'Whether to include an image with the post', + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Navigate to {{platform}} and ensure logged in', + expectedOutcomeTemplate: 'Social media platform loaded and authenticated', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Create new post with content: {{content}}', + expectedOutcomeTemplate: 'Post content entered in composition area', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Review and publish the post', + expectedOutcomeTemplate: 'Post published successfully and visible on timeline', + requiresDesktop: true, + }, + ], + }, + + // PDF Download Template + { + name: 'PDF Download', + description: 'Download a PDF from a webpage', + category: 'File Operations', + tags: ['download', 'pdf', 'document', 'file'], + icon: 'file-text', + goalPattern: 'Download PDF from {{url}} and save as {{filename}}', + variables: [ + { + name: 'url', + type: 'string', + required: true, + description: 'URL of the page containing the PDF or direct PDF link', + validation: { pattern: '^https?://.+' }, + }, + { + name: 'filename', + type: 'string', + required: false, + default: 'downloaded.pdf', + description: 'Name to save the file as', + }, + ], + checklistTemplate: [ + { + order: 1, + descriptionTemplate: 'Navigate to {{url}}', + expectedOutcomeTemplate: 'Page loaded with PDF link or content visible', + requiresDesktop: true, + }, + { + order: 2, + descriptionTemplate: 'Locate and click the PDF download link', + expectedOutcomeTemplate: 'PDF download initiated', + requiresDesktop: true, + }, + { + order: 3, + descriptionTemplate: 'Save the file as {{filename}}', + expectedOutcomeTemplate: 'PDF saved to downloads folder', + requiresDesktop: true, + }, + ], + }, + ]; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/tenant-admin.service.ts b/packages/bytebot-workflow-orchestrator/src/services/tenant-admin.service.ts new file mode 100644 index 000000000..84dfd44e1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/tenant-admin.service.ts @@ -0,0 +1,689 @@ +/** + * Tenant Administration Service + * Phase 10 (v5.5.0): Enterprise Features - Multi-Tenant Administration + * + * Provides comprehensive tenant management capabilities: + * - Tenant CRUD operations + * - Settings management + * - Quota management and enforcement + * - Usage tracking + * - Plan/subscription management + */ + +import { Injectable, Logger, NotFoundException, ConflictException, BadRequestException } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { createId } from '@paralleldrive/cuid2'; + +// ============================================================================ +// Types and Interfaces +// ============================================================================ + +export enum TenantPlan { + FREE = 'free', + STARTER = 'starter', + PROFESSIONAL = 'professional', + ENTERPRISE = 'enterprise', +} + +export enum TenantStatus { + ACTIVE = 'active', + SUSPENDED = 'suspended', + PENDING = 'pending', + CANCELLED = 'cancelled', +} + +export interface CreateTenantInput { + name: string; + slug?: string; + adminEmail: string; + adminName?: string; + companyName?: string; + plan?: TenantPlan; + billingEmail?: string; + metadata?: Record; +} + +export interface UpdateTenantInput { + name?: string; + adminEmail?: string; + adminName?: string; + companyName?: string; + billingEmail?: string; + metadata?: Record; +} + +export interface TenantSettingsInput { + timezone?: string; + dateFormat?: string; + defaultWorkspaceMode?: string; + requireMfa?: boolean; + sessionTimeout?: number; + ipAllowlist?: string[]; + allowedDomains?: string[]; + maxConcurrentGoals?: number; + defaultApprovalTimeout?: number; + autoReplanEnabled?: boolean; + maxReplanAttempts?: number; + notificationEmail?: string; + slackWebhookUrl?: string; + teamsWebhookUrl?: string; + auditLogRetentionDays?: number; + goalRunRetentionDays?: number; + features?: Record; +} + +export interface TenantQuotaInput { + monthlyGoalRuns?: number; + monthlyTokens?: number; + storageLimit?: bigint; + maxConcurrentWorkspaces?: number; + maxUsersPerTenant?: number; + maxTemplates?: number; + maxBatchSize?: number; + apiRateLimitPerMinute?: number; +} + +export interface UsageStats { + goalRuns: { used: number; limit: number; percentage: number }; + tokens: { used: number; limit: number; percentage: number }; + storage: { used: bigint; limit: bigint; percentage: number }; + periodStart: Date; + daysRemaining: number; +} + +// Plan limits configuration +const PLAN_LIMITS: Record = { + [TenantPlan.FREE]: { + monthlyGoalRuns: 100, + monthlyTokens: 100000, + storageLimit: BigInt(1073741824), // 1GB + maxConcurrentWorkspaces: 2, + maxUsersPerTenant: 3, + maxTemplates: 10, + maxBatchSize: 5, + apiRateLimitPerMinute: 30, + }, + [TenantPlan.STARTER]: { + monthlyGoalRuns: 500, + monthlyTokens: 500000, + storageLimit: BigInt(5368709120), // 5GB + maxConcurrentWorkspaces: 5, + maxUsersPerTenant: 10, + maxTemplates: 50, + maxBatchSize: 20, + apiRateLimitPerMinute: 60, + }, + [TenantPlan.PROFESSIONAL]: { + monthlyGoalRuns: 2000, + monthlyTokens: 2000000, + storageLimit: BigInt(21474836480), // 20GB + maxConcurrentWorkspaces: 20, + maxUsersPerTenant: 50, + maxTemplates: 200, + maxBatchSize: 50, + apiRateLimitPerMinute: 100, + }, + [TenantPlan.ENTERPRISE]: { + monthlyGoalRuns: 10000, + monthlyTokens: 10000000, + storageLimit: BigInt(107374182400), // 100GB + maxConcurrentWorkspaces: 100, + maxUsersPerTenant: 500, + maxTemplates: 1000, + maxBatchSize: 100, + apiRateLimitPerMinute: 500, + }, +}; + +@Injectable() +export class TenantAdminService { + private readonly logger = new Logger(TenantAdminService.name); + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.logger.log('TenantAdminService initialized'); + } + + // ========================================================================== + // Tenant CRUD Operations + // ========================================================================== + + /** + * Create a new tenant with default settings and quotas + */ + async createTenant(input: CreateTenantInput): Promise { + // Generate slug if not provided + const slug = input.slug || this.generateSlug(input.name); + + // Check if slug already exists + const existing = await this.prisma.tenant.findUnique({ + where: { slug }, + }); + + if (existing) { + throw new ConflictException(`Tenant with slug "${slug}" already exists`); + } + + const plan = input.plan || TenantPlan.FREE; + const planLimits = PLAN_LIMITS[plan]; + + // Create tenant with settings and quotas in a transaction + const tenant = await this.prisma.$transaction(async (tx) => { + // Create tenant + const newTenant = await tx.tenant.create({ + data: { + name: input.name, + slug, + adminEmail: input.adminEmail, + adminName: input.adminName, + companyName: input.companyName, + plan, + billingEmail: input.billingEmail || input.adminEmail, + status: TenantStatus.ACTIVE, + metadata: input.metadata || {}, + }, + }); + + // Create default settings + await tx.tenantSettings.create({ + data: { + tenantId: newTenant.id, + }, + }); + + // Create quotas based on plan + await tx.tenantQuota.create({ + data: { + tenantId: newTenant.id, + monthlyGoalRuns: planLimits.monthlyGoalRuns!, + monthlyTokens: planLimits.monthlyTokens!, + storageLimit: planLimits.storageLimit!, + maxConcurrentWorkspaces: planLimits.maxConcurrentWorkspaces!, + maxUsersPerTenant: planLimits.maxUsersPerTenant!, + maxTemplates: planLimits.maxTemplates!, + maxBatchSize: planLimits.maxBatchSize!, + apiRateLimitPerMinute: planLimits.apiRateLimitPerMinute!, + }, + }); + + return newTenant; + }); + + this.logger.log(`Created tenant: ${tenant.id} (${tenant.slug})`); + this.eventEmitter.emit('tenant.created', { tenantId: tenant.id, plan }); + + return this.getTenant(tenant.id); + } + + /** + * Get a tenant by ID with all related data + */ + async getTenant(tenantId: string): Promise { + const tenant = await this.prisma.tenant.findUnique({ + where: { id: tenantId }, + include: { + settings: true, + quotas: true, + ssoConfig: true, + llmProviders: true, + }, + }); + + if (!tenant) { + throw new NotFoundException(`Tenant ${tenantId} not found`); + } + + return tenant; + } + + /** + * Get a tenant by slug + */ + async getTenantBySlug(slug: string): Promise { + const tenant = await this.prisma.tenant.findUnique({ + where: { slug }, + include: { + settings: true, + quotas: true, + }, + }); + + if (!tenant) { + throw new NotFoundException(`Tenant with slug "${slug}" not found`); + } + + return tenant; + } + + /** + * List all tenants with pagination and filtering + */ + async listTenants(options: { + status?: TenantStatus; + plan?: TenantPlan; + search?: string; + limit?: number; + offset?: number; + }): Promise<{ tenants: any[]; total: number }> { + const where: any = {}; + + if (options.status) { + where.status = options.status; + } + + if (options.plan) { + where.plan = options.plan; + } + + if (options.search) { + where.OR = [ + { name: { contains: options.search, mode: 'insensitive' } }, + { slug: { contains: options.search, mode: 'insensitive' } }, + { adminEmail: { contains: options.search, mode: 'insensitive' } }, + { companyName: { contains: options.search, mode: 'insensitive' } }, + ]; + } + + const [tenants, total] = await Promise.all([ + this.prisma.tenant.findMany({ + where, + include: { + quotas: true, + }, + orderBy: { createdAt: 'desc' }, + take: options.limit || 50, + skip: options.offset || 0, + }), + this.prisma.tenant.count({ where }), + ]); + + return { tenants, total }; + } + + /** + * Update a tenant + */ + async updateTenant(tenantId: string, input: UpdateTenantInput): Promise { + const existing = await this.prisma.tenant.findUnique({ + where: { id: tenantId }, + }); + + if (!existing) { + throw new NotFoundException(`Tenant ${tenantId} not found`); + } + + const updated = await this.prisma.tenant.update({ + where: { id: tenantId }, + data: { + ...input, + }, + include: { + settings: true, + quotas: true, + }, + }); + + this.logger.log(`Updated tenant: ${tenantId}`); + this.eventEmitter.emit('tenant.updated', { tenantId }); + + return updated; + } + + /** + * Delete a tenant (soft delete by setting status to cancelled) + */ + async deleteTenant(tenantId: string, hardDelete = false): Promise { + const existing = await this.prisma.tenant.findUnique({ + where: { id: tenantId }, + }); + + if (!existing) { + throw new NotFoundException(`Tenant ${tenantId} not found`); + } + + if (hardDelete) { + // Hard delete - cascade will remove related records + await this.prisma.tenant.delete({ + where: { id: tenantId }, + }); + this.logger.log(`Hard deleted tenant: ${tenantId}`); + } else { + // Soft delete + await this.prisma.tenant.update({ + where: { id: tenantId }, + data: { status: TenantStatus.CANCELLED }, + }); + this.logger.log(`Soft deleted tenant: ${tenantId}`); + } + + this.eventEmitter.emit('tenant.deleted', { tenantId, hardDelete }); + } + + // ========================================================================== + // Tenant Status Management + // ========================================================================== + + /** + * Suspend a tenant + */ + async suspendTenant(tenantId: string, reason?: string): Promise { + const tenant = await this.prisma.tenant.update({ + where: { id: tenantId }, + data: { + status: TenantStatus.SUSPENDED, + metadata: { + ...(await this.getTenantMetadata(tenantId)), + suspendedAt: new Date().toISOString(), + suspendReason: reason, + }, + }, + }); + + this.logger.log(`Suspended tenant: ${tenantId}, reason: ${reason}`); + this.eventEmitter.emit('tenant.suspended', { tenantId, reason }); + + return tenant; + } + + /** + * Reactivate a suspended tenant + */ + async reactivateTenant(tenantId: string): Promise { + const existing = await this.prisma.tenant.findUnique({ + where: { id: tenantId }, + }); + + if (!existing) { + throw new NotFoundException(`Tenant ${tenantId} not found`); + } + + if (existing.status !== TenantStatus.SUSPENDED) { + throw new BadRequestException('Only suspended tenants can be reactivated'); + } + + const tenant = await this.prisma.tenant.update({ + where: { id: tenantId }, + data: { + status: TenantStatus.ACTIVE, + metadata: { + ...(existing.metadata as any), + reactivatedAt: new Date().toISOString(), + }, + }, + }); + + this.logger.log(`Reactivated tenant: ${tenantId}`); + this.eventEmitter.emit('tenant.reactivated', { tenantId }); + + return tenant; + } + + // ========================================================================== + // Settings Management + // ========================================================================== + + /** + * Get tenant settings + */ + async getSettings(tenantId: string): Promise { + const settings = await this.prisma.tenantSettings.findUnique({ + where: { tenantId }, + }); + + if (!settings) { + throw new NotFoundException(`Settings not found for tenant ${tenantId}`); + } + + return settings; + } + + /** + * Update tenant settings + */ + async updateSettings(tenantId: string, input: TenantSettingsInput): Promise { + // Validate tenant exists + await this.getTenant(tenantId); + + const settings = await this.prisma.tenantSettings.upsert({ + where: { tenantId }, + create: { + tenantId, + ...input, + }, + update: input, + }); + + this.logger.log(`Updated settings for tenant: ${tenantId}`); + this.eventEmitter.emit('tenant.settings.updated', { tenantId }); + + return settings; + } + + // ========================================================================== + // Quota Management + // ========================================================================== + + /** + * Get tenant quotas + */ + async getQuotas(tenantId: string): Promise { + const quotas = await this.prisma.tenantQuota.findUnique({ + where: { tenantId }, + }); + + if (!quotas) { + throw new NotFoundException(`Quotas not found for tenant ${tenantId}`); + } + + return quotas; + } + + /** + * Update tenant quotas + */ + async updateQuotas(tenantId: string, input: TenantQuotaInput): Promise { + // Validate tenant exists + await this.getTenant(tenantId); + + const quotas = await this.prisma.tenantQuota.upsert({ + where: { tenantId }, + create: { + tenantId, + ...input, + }, + update: input, + }); + + this.logger.log(`Updated quotas for tenant: ${tenantId}`); + this.eventEmitter.emit('tenant.quotas.updated', { tenantId }); + + return quotas; + } + + /** + * Get usage statistics for a tenant + */ + async getUsageStats(tenantId: string): Promise { + const quotas = await this.getQuotas(tenantId); + + const periodStart = new Date(quotas.quotaPeriodStart); + const now = new Date(); + const endOfPeriod = new Date(periodStart); + endOfPeriod.setMonth(endOfPeriod.getMonth() + 1); + + const daysRemaining = Math.max(0, Math.ceil((endOfPeriod.getTime() - now.getTime()) / (1000 * 60 * 60 * 24))); + + return { + goalRuns: { + used: quotas.monthlyGoalRunsUsed, + limit: quotas.monthlyGoalRuns, + percentage: (quotas.monthlyGoalRunsUsed / quotas.monthlyGoalRuns) * 100, + }, + tokens: { + used: quotas.monthlyTokensUsed, + limit: quotas.monthlyTokens, + percentage: (quotas.monthlyTokensUsed / quotas.monthlyTokens) * 100, + }, + storage: { + used: quotas.storageUsed, + limit: quotas.storageLimit, + percentage: Number((quotas.storageUsed * BigInt(100)) / quotas.storageLimit), + }, + periodStart, + daysRemaining, + }; + } + + /** + * Check if tenant has quota available + */ + async checkQuota(tenantId: string, resource: 'goalRuns' | 'tokens' | 'storage', amount: number): Promise { + const quotas = await this.getQuotas(tenantId); + + switch (resource) { + case 'goalRuns': + return quotas.monthlyGoalRunsUsed + amount <= quotas.monthlyGoalRuns; + case 'tokens': + return quotas.monthlyTokensUsed + amount <= quotas.monthlyTokens; + case 'storage': + return quotas.storageUsed + BigInt(amount) <= quotas.storageLimit; + default: + return false; + } + } + + /** + * Increment usage for a resource + */ + async incrementUsage(tenantId: string, resource: 'goalRuns' | 'tokens' | 'storage', amount: number): Promise { + const updateData: any = {}; + + switch (resource) { + case 'goalRuns': + updateData.monthlyGoalRunsUsed = { increment: amount }; + break; + case 'tokens': + updateData.monthlyTokensUsed = { increment: amount }; + break; + case 'storage': + updateData.storageUsed = { increment: BigInt(amount) }; + break; + } + + await this.prisma.tenantQuota.update({ + where: { tenantId }, + data: updateData, + }); + } + + /** + * Reset monthly quotas (called by scheduler) + */ + async resetMonthlyQuotas(): Promise { + const now = new Date(); + const oneMonthAgo = new Date(); + oneMonthAgo.setMonth(oneMonthAgo.getMonth() - 1); + + const result = await this.prisma.tenantQuota.updateMany({ + where: { + quotaPeriodStart: { lt: oneMonthAgo }, + }, + data: { + monthlyGoalRunsUsed: 0, + monthlyTokensUsed: 0, + quotaPeriodStart: now, + }, + }); + + if (result.count > 0) { + this.logger.log(`Reset quotas for ${result.count} tenants`); + } + + return result.count; + } + + // ========================================================================== + // Plan Management + // ========================================================================== + + /** + * Upgrade/downgrade tenant plan + */ + async changePlan(tenantId: string, newPlan: TenantPlan): Promise { + const tenant = await this.getTenant(tenantId); + const oldPlan = tenant.plan; + + if (oldPlan === newPlan) { + return tenant; + } + + const planLimits = PLAN_LIMITS[newPlan]; + + // Update plan and quotas + await this.prisma.$transaction([ + this.prisma.tenant.update({ + where: { id: tenantId }, + data: { plan: newPlan }, + }), + this.prisma.tenantQuota.update({ + where: { tenantId }, + data: { + monthlyGoalRuns: planLimits.monthlyGoalRuns!, + monthlyTokens: planLimits.monthlyTokens!, + storageLimit: planLimits.storageLimit!, + maxConcurrentWorkspaces: planLimits.maxConcurrentWorkspaces!, + maxUsersPerTenant: planLimits.maxUsersPerTenant!, + maxTemplates: planLimits.maxTemplates!, + maxBatchSize: planLimits.maxBatchSize!, + apiRateLimitPerMinute: planLimits.apiRateLimitPerMinute!, + }, + }), + ]); + + this.logger.log(`Changed plan for tenant ${tenantId}: ${oldPlan} -> ${newPlan}`); + this.eventEmitter.emit('tenant.plan.changed', { tenantId, oldPlan, newPlan }); + + return this.getTenant(tenantId); + } + + /** + * Get available plans with their limits + */ + getAvailablePlans(): Record { + return PLAN_LIMITS; + } + + // ========================================================================== + // Helper Methods + // ========================================================================== + + /** + * Generate URL-friendly slug from name + */ + private generateSlug(name: string): string { + const baseSlug = name + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-|-$/g, ''); + + // Add random suffix to ensure uniqueness + const suffix = createId().slice(0, 6); + return `${baseSlug}-${suffix}`; + } + + /** + * Get tenant metadata + */ + private async getTenantMetadata(tenantId: string): Promise> { + const tenant = await this.prisma.tenant.findUnique({ + where: { id: tenantId }, + select: { metadata: true }, + }); + return (tenant?.metadata as Record) || {}; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/tenant-knowledge.service.ts b/packages/bytebot-workflow-orchestrator/src/services/tenant-knowledge.service.ts new file mode 100644 index 000000000..2cc6d0b11 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/tenant-knowledge.service.ts @@ -0,0 +1,536 @@ +/** + * Tenant Knowledge Service + * v1.0.0: Multi-Tenant Knowledge Graph Isolation + * + * Implements industry-standard patterns for tenant data isolation: + * - AWS: Row-level security with tenant_id in all queries + * - Salesforce: Namespace-based isolation + * - Neo4j: Graph partitioning per tenant + * + * Key Features: + * 1. Tenant-scoped knowledge graphs + * 2. Automatic tenant context injection + * 3. Cross-tenant analytics (aggregated, anonymized) + * 4. Tenant-specific fact and entity storage + * 5. Quota management per tenant + * + * @see /documentation/2026-01-03-ADVANCED_ENHANCEMENTS_V2.md + */ + +import { Injectable, Logger, Scope, Inject } from '@nestjs/common'; +import { REQUEST } from '@nestjs/core'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { + KnowledgeExtractionService, + ExtractedFact, + ExtractedEntity, + KnowledgeGraph, +} from './knowledge-extraction.service'; + +// Tenant Context +export interface TenantContext { + tenantId: string; + organizationName?: string; + tier: 'free' | 'pro' | 'enterprise'; + quotas: TenantQuotas; +} + +// Quota Configuration +export interface TenantQuotas { + maxFacts: number; + maxEntities: number; + maxGoalsPerDay: number; + retentionDays: number; +} + +// Tenant Knowledge Graph +export interface TenantKnowledgeGraph extends KnowledgeGraph { + tenantId: string; + totalFacts: number; + totalEntities: number; + usagePercent: number; +} + +// Cross-Tenant Analytics (aggregated) +export interface CrossTenantAnalytics { + totalTenants: number; + totalFacts: number; + totalEntities: number; + averageFactsPerTenant: number; + topEntityTypes: Array<{ type: string; count: number }>; + topFactTypes: Array<{ type: string; count: number }>; +} + +// Default quotas by tier +const DEFAULT_QUOTAS: Record = { + free: { + maxFacts: 1000, + maxEntities: 500, + maxGoalsPerDay: 10, + retentionDays: 7, + }, + pro: { + maxFacts: 10000, + maxEntities: 5000, + maxGoalsPerDay: 100, + retentionDays: 30, + }, + enterprise: { + maxFacts: 100000, + maxEntities: 50000, + maxGoalsPerDay: 1000, + retentionDays: 365, + }, +}; + +@Injectable() +export class TenantKnowledgeService { + private readonly logger = new Logger(TenantKnowledgeService.name); + private readonly enabled: boolean; + + // In-memory tenant knowledge storage + // In production, this would be backed by database tables + private tenantKnowledge: Map; + metadata: { + createdAt: Date; + updatedAt: Date; + factCount: number; + entityCount: number; + }; + }> = new Map(); + + // Tenant context cache + private tenantContextCache: Map = new Map(); + + constructor( + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly prisma: PrismaService, + private readonly knowledgeService: KnowledgeExtractionService, + ) { + this.enabled = this.configService.get('TENANT_KNOWLEDGE_ENABLED', 'true') === 'true'; + this.logger.log(`Tenant knowledge service ${this.enabled ? 'enabled' : 'disabled'}`); + } + + /** + * Get or create tenant context + */ + async getTenantContext(tenantId: string): Promise { + // Check cache + if (this.tenantContextCache.has(tenantId)) { + return this.tenantContextCache.get(tenantId)!; + } + + // Try to load from database (if tenant table exists) + try { + // In production, query tenant from database + // For now, create default context + const context: TenantContext = { + tenantId, + tier: 'pro', // Default tier + quotas: DEFAULT_QUOTAS['pro'], + }; + + this.tenantContextCache.set(tenantId, context); + return context; + } catch (error) { + // Default context + const context: TenantContext = { + tenantId, + tier: 'free', + quotas: DEFAULT_QUOTAS['free'], + }; + return context; + } + } + + /** + * Get tenant knowledge graph + */ + async getTenantKnowledge(tenantId: string): Promise { + const context = await this.getTenantContext(tenantId); + const storage = this.tenantKnowledge.get(tenantId); + + if (!storage) { + return null; + } + + const usagePercent = (storage.metadata.factCount / context.quotas.maxFacts) * 100; + + return { + goalRunId: `tenant-${tenantId}`, + extractedAt: storage.metadata.updatedAt, + facts: storage.facts, + entities: storage.entities, + summary: `Tenant ${tenantId} knowledge: ${storage.metadata.factCount} facts, ${storage.metadata.entityCount} entities`, + keyMetrics: {}, + decisions: [], + tenantId, + totalFacts: storage.metadata.factCount, + totalEntities: storage.metadata.entityCount, + usagePercent, + }; + } + + /** + * Add knowledge to tenant graph + */ + async addKnowledge( + tenantId: string, + goalRunId: string, + facts: ExtractedFact[], + entities: ExtractedEntity[], + ): Promise<{ added: number; quota: { used: number; max: number } }> { + const context = await this.getTenantContext(tenantId); + + // Initialize storage if needed + if (!this.tenantKnowledge.has(tenantId)) { + this.tenantKnowledge.set(tenantId, { + facts: [], + entities: [], + goalGraphs: new Map(), + metadata: { + createdAt: new Date(), + updatedAt: new Date(), + factCount: 0, + entityCount: 0, + }, + }); + } + + const storage = this.tenantKnowledge.get(tenantId)!; + + // Check quota + const remainingCapacity = context.quotas.maxFacts - storage.metadata.factCount; + if (remainingCapacity <= 0) { + this.logger.warn(`Tenant ${tenantId} has reached fact quota (${context.quotas.maxFacts})`); + + // Emit quota warning + this.eventEmitter.emit('tenant.quota.exceeded', { + tenantId, + resource: 'facts', + current: storage.metadata.factCount, + max: context.quotas.maxFacts, + }); + + return { + added: 0, + quota: { used: storage.metadata.factCount, max: context.quotas.maxFacts }, + }; + } + + // Add facts (up to quota) + const factsToAdd = facts.slice(0, remainingCapacity); + for (const fact of factsToAdd) { + // Tag with tenant and goal + const taggedFact: ExtractedFact = { + ...fact, + tags: [...(fact.tags || []), `tenant:${tenantId}`, `goal:${goalRunId}`], + }; + storage.facts.push(taggedFact); + } + + // Add entities (deduplicate by name) + const existingEntityNames = new Set(storage.entities.map(e => e.name.toLowerCase())); + for (const entity of entities) { + if (!existingEntityNames.has(entity.name.toLowerCase())) { + if (storage.metadata.entityCount < context.quotas.maxEntities) { + storage.entities.push(entity); + storage.metadata.entityCount++; + existingEntityNames.add(entity.name.toLowerCase()); + } + } else { + // Update existing entity + const existing = storage.entities.find( + e => e.name.toLowerCase() === entity.name.toLowerCase() + ); + if (existing) { + existing.mentions += entity.mentions; + existing.lastSeen = entity.lastSeen; + existing.relatedFacts.push(...entity.relatedFacts); + } + } + } + + // Update metadata + storage.metadata.factCount += factsToAdd.length; + storage.metadata.updatedAt = new Date(); + + // Store goal-specific graph + const goalGraph = this.knowledgeService.getKnowledge(goalRunId); + if (goalGraph) { + storage.goalGraphs.set(goalRunId, goalGraph); + } + + this.logger.debug( + `Added ${factsToAdd.length} facts for tenant ${tenantId} ` + + `(${storage.metadata.factCount}/${context.quotas.maxFacts})` + ); + + return { + added: factsToAdd.length, + quota: { used: storage.metadata.factCount, max: context.quotas.maxFacts }, + }; + } + + /** + * Search tenant knowledge + */ + async searchKnowledge( + tenantId: string, + query: string, + options: { + types?: ExtractedFact['type'][]; + limit?: number; + minConfidence?: number; + } = {}, + ): Promise { + const storage = this.tenantKnowledge.get(tenantId); + if (!storage) { + return []; + } + + const queryLower = query.toLowerCase(); + const limit = options.limit || 20; + const minConfidence = options.minConfidence || 0; + + let results = storage.facts.filter(fact => { + // Content match + const contentMatch = fact.content.toLowerCase().includes(queryLower); + + // Type filter + const typeMatch = !options.types || options.types.includes(fact.type); + + // Confidence filter + const confidenceMatch = fact.confidence >= minConfidence; + + return contentMatch && typeMatch && confidenceMatch; + }); + + // Sort by confidence and recency + results.sort((a, b) => { + const confidenceDiff = b.confidence - a.confidence; + if (Math.abs(confidenceDiff) > 0.1) { + return confidenceDiff; + } + return b.source.timestamp.getTime() - a.source.timestamp.getTime(); + }); + + return results.slice(0, limit); + } + + /** + * Get entities for tenant + */ + async getTenantEntities( + tenantId: string, + options: { + types?: ExtractedEntity['type'][]; + limit?: number; + sortBy?: 'mentions' | 'recency'; + } = {}, + ): Promise { + const storage = this.tenantKnowledge.get(tenantId); + if (!storage) { + return []; + } + + let entities = [...storage.entities]; + + // Type filter + if (options.types?.length) { + entities = entities.filter(e => options.types!.includes(e.type)); + } + + // Sort + if (options.sortBy === 'recency') { + entities.sort((a, b) => b.lastSeen.getTime() - a.lastSeen.getTime()); + } else { + entities.sort((a, b) => b.mentions - a.mentions); + } + + return entities.slice(0, options.limit || 50); + } + + /** + * Get cross-tenant analytics (admin only, aggregated) + */ + async getCrossTenantAnalytics(): Promise { + let totalFacts = 0; + let totalEntities = 0; + const entityTypeCounts = new Map(); + const factTypeCounts = new Map(); + + for (const [_, storage] of this.tenantKnowledge) { + totalFacts += storage.metadata.factCount; + totalEntities += storage.metadata.entityCount; + + for (const entity of storage.entities) { + entityTypeCounts.set(entity.type, (entityTypeCounts.get(entity.type) || 0) + 1); + } + + for (const fact of storage.facts) { + factTypeCounts.set(fact.type, (factTypeCounts.get(fact.type) || 0) + 1); + } + } + + const totalTenants = this.tenantKnowledge.size || 1; + + return { + totalTenants, + totalFacts, + totalEntities, + averageFactsPerTenant: Math.round(totalFacts / totalTenants), + topEntityTypes: Array.from(entityTypeCounts.entries()) + .map(([type, count]) => ({ type, count })) + .sort((a, b) => b.count - a.count) + .slice(0, 10), + topFactTypes: Array.from(factTypeCounts.entries()) + .map(([type, count]) => ({ type, count })) + .sort((a, b) => b.count - a.count) + .slice(0, 10), + }; + } + + /** + * Clean up old facts based on retention policy + */ + async cleanupExpiredFacts(tenantId: string): Promise { + const context = await this.getTenantContext(tenantId); + const storage = this.tenantKnowledge.get(tenantId); + + if (!storage) { + return 0; + } + + const cutoffDate = new Date(Date.now() - context.quotas.retentionDays * 24 * 60 * 60 * 1000); + + const originalCount = storage.facts.length; + storage.facts = storage.facts.filter( + fact => fact.source.timestamp > cutoffDate + ); + + const removedCount = originalCount - storage.facts.length; + storage.metadata.factCount = storage.facts.length; + + if (removedCount > 0) { + this.logger.log(`Cleaned up ${removedCount} expired facts for tenant ${tenantId}`); + } + + return removedCount; + } + + /** + * Delete all knowledge for a tenant + */ + async deleteTenantKnowledge(tenantId: string): Promise { + const deleted = this.tenantKnowledge.delete(tenantId); + this.tenantContextCache.delete(tenantId); + + if (deleted) { + this.logger.log(`Deleted all knowledge for tenant ${tenantId}`); + this.eventEmitter.emit('tenant.knowledge.deleted', { tenantId }); + } + + return deleted; + } + + /** + * Get tenant usage statistics + */ + async getTenantUsage(tenantId: string): Promise<{ + facts: { used: number; max: number; percent: number }; + entities: { used: number; max: number; percent: number }; + retentionDays: number; + } | null> { + const context = await this.getTenantContext(tenantId); + const storage = this.tenantKnowledge.get(tenantId); + + if (!storage) { + return { + facts: { used: 0, max: context.quotas.maxFacts, percent: 0 }, + entities: { used: 0, max: context.quotas.maxEntities, percent: 0 }, + retentionDays: context.quotas.retentionDays, + }; + } + + return { + facts: { + used: storage.metadata.factCount, + max: context.quotas.maxFacts, + percent: Math.round((storage.metadata.factCount / context.quotas.maxFacts) * 100), + }, + entities: { + used: storage.metadata.entityCount, + max: context.quotas.maxEntities, + percent: Math.round((storage.metadata.entityCount / context.quotas.maxEntities) * 100), + }, + retentionDays: context.quotas.retentionDays, + }; + } + + /** + * Export tenant knowledge (for backup/migration) + */ + async exportTenantKnowledge(tenantId: string): Promise<{ + tenantId: string; + exportedAt: Date; + facts: ExtractedFact[]; + entities: ExtractedEntity[]; + } | null> { + const storage = this.tenantKnowledge.get(tenantId); + + if (!storage) { + return null; + } + + return { + tenantId, + exportedAt: new Date(), + facts: storage.facts, + entities: storage.entities, + }; + } + + /** + * Import tenant knowledge (from backup/migration) + */ + async importTenantKnowledge( + tenantId: string, + data: { + facts: ExtractedFact[]; + entities: ExtractedEntity[]; + }, + ): Promise<{ factsImported: number; entitiesImported: number }> { + const context = await this.getTenantContext(tenantId); + + // Initialize or reset storage + this.tenantKnowledge.set(tenantId, { + facts: data.facts.slice(0, context.quotas.maxFacts), + entities: data.entities.slice(0, context.quotas.maxEntities), + goalGraphs: new Map(), + metadata: { + createdAt: new Date(), + updatedAt: new Date(), + factCount: Math.min(data.facts.length, context.quotas.maxFacts), + entityCount: Math.min(data.entities.length, context.quotas.maxEntities), + }, + }); + + const storage = this.tenantKnowledge.get(tenantId)!; + + this.logger.log( + `Imported ${storage.metadata.factCount} facts and ${storage.metadata.entityCount} entities for tenant ${tenantId}` + ); + + return { + factsImported: storage.metadata.factCount, + entitiesImported: storage.metadata.entityCount, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/user-prompt-resolution.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/user-prompt-resolution.service.spec.ts new file mode 100644 index 000000000..b27122c76 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/user-prompt-resolution.service.spec.ts @@ -0,0 +1,649 @@ +import { UserPromptResolutionService } from './user-prompt-resolution.service'; +import { + ActorType, + ChecklistItemStatus, + GoalRunPhase, + GoalSpecStatus, + StepType, + UserPromptKind, + UserPromptScope, + UserPromptStatus, +} from '@prisma/client'; +import { ConflictException, UnprocessableEntityException } from '@nestjs/common'; + +describe(UserPromptResolutionService.name, () => { + it('resolves an OPEN prompt once and emits outbox + phase change', async () => { + const tx: any = { + $queryRaw: jest.fn(), + userPrompt: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + userPromptResolution: { + create: jest.fn(), + }, + checklistItem: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + outbox: { + create: jest.fn(), + }, + }; + + tx.$queryRaw.mockResolvedValue([{ id: 'p-1' }]); + tx.userPrompt.findUnique + .mockResolvedValueOnce({ + id: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + goalSpecId: null, + status: UserPromptStatus.OPEN, + kind: UserPromptKind.TEXT_CLARIFICATION, + scope: UserPromptScope.STEP, + jsonSchema: null, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }) + .mockResolvedValueOnce({ + id: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + goalSpecId: null, + status: UserPromptStatus.RESOLVED, + kind: UserPromptKind.TEXT_CLARIFICATION, + scope: UserPromptScope.STEP, + jsonSchema: null, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }); + tx.userPromptResolution.create.mockResolvedValue({ id: 'pr-1' }); + tx.userPrompt.updateMany.mockResolvedValue({ count: 1 }); + tx.checklistItem.findUnique.mockResolvedValue({ + type: StepType.USER_INPUT_REQUIRED, + description: 'Confirm details', + }); + tx.checklistItem.updateMany.mockResolvedValue({ count: 1 }); + tx.goalRun.findUnique.mockResolvedValue({ + phase: GoalRunPhase.WAITING_USER_INPUT, + tenantId: 't-1', + }); + tx.goalRun.updateMany.mockResolvedValue({ count: 1 }); + tx.outbox.create.mockResolvedValue({ id: 'o-1' }); + + const prisma: any = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + userPrompt: { findUnique: jest.fn() }, + goalRun: { findUnique: jest.fn() }, + goalSpec: { findUnique: jest.fn() }, + userPromptAttempt: { create: jest.fn(), findFirst: jest.fn() }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce({ + id: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: null, + status: UserPromptStatus.OPEN, + kind: UserPromptKind.TEXT_CLARIFICATION, + }); + prisma.userPromptAttempt.create.mockResolvedValueOnce({ id: 'pa-1' }); + + const eventEmitter: any = { emit: jest.fn() }; + const jsonSchemaValidator: any = { + makePatchSchema: jest.fn((s: any) => s), + validate: jest.fn(() => ({ valid: true, violations: [], missingFields: [] })), + }; + const userPromptTimeToResolveSeconds: any = { labels: jest.fn(() => ({ observe: jest.fn() })) }; + const promptResolvedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const goalIntakeCompletedTotal: any = { inc: jest.fn() }; + const resumeOutboxEnqueuedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const validationFailTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const unauthorizedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const incompleteAfterApplyTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + + const service = new UserPromptResolutionService( + prisma, + eventEmitter, + jsonSchemaValidator, + userPromptTimeToResolveSeconds, + promptResolvedTotal, + goalIntakeCompletedTotal, + resumeOutboxEnqueuedTotal, + validationFailTotal, + unauthorizedTotal, + incompleteAfterApplyTotal, + undefined, + ); + + const result = await service.resolvePrompt({ + promptId: 'p-1', + tenantId: 't-1', + actor: { type: ActorType.HUMAN, id: 'u-1' }, + answers: { answer: 'yes' }, + requestId: 'req-1', + ipAddress: '127.0.0.1', + userAgent: 'jest', + }); + + expect(result).toEqual({ + promptId: 'p-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + goalSpecId: null, + didResolve: true, + promptStatus: UserPromptStatus.RESOLVED, + promptKind: UserPromptKind.TEXT_CLARIFICATION, + }); + + expect(tx.outbox.create).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + dedupeKey: 'user_prompt.resolved:p-1', + eventType: 'user_prompt.resolved', + }), + }), + ); + expect(tx.outbox.create).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + dedupeKey: 'user_prompt.resume:p-1', + eventType: 'user_prompt.resume', + }), + }), + ); + expect(eventEmitter.emit).toHaveBeenCalledWith('goal-run.phase-changed', expect.anything()); + expect(userPromptTimeToResolveSeconds.labels).toHaveBeenCalledWith(UserPromptKind.TEXT_CLARIFICATION); + expect(promptResolvedTotal.labels).toHaveBeenCalledWith(ActorType.HUMAN, UserPromptKind.TEXT_CLARIFICATION); + expect(resumeOutboxEnqueuedTotal.labels).toHaveBeenCalledWith('resolution'); + }); + + it('is idempotent: resolving an already-RESOLVED prompt is a no-op (no outbox)', async () => { + const tx: any = { + $queryRaw: jest.fn(), + userPrompt: { + findUnique: jest.fn(), + }, + outbox: { + create: jest.fn(), + }, + }; + + tx.$queryRaw.mockResolvedValue([{ id: 'p-1' }]); + tx.userPrompt.findUnique.mockResolvedValue({ + id: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: 'ci-1', + goalSpecId: null, + status: UserPromptStatus.RESOLVED, + kind: UserPromptKind.TEXT_CLARIFICATION, + scope: UserPromptScope.STEP, + jsonSchema: null, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }); + + const prisma: any = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + userPrompt: { findUnique: jest.fn() }, + goalRun: { findUnique: jest.fn() }, + goalSpec: { findUnique: jest.fn() }, + userPromptAttempt: { create: jest.fn(), findFirst: jest.fn() }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce({ + id: 'p-1', + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: null, + status: UserPromptStatus.RESOLVED, + kind: UserPromptKind.TEXT_CLARIFICATION, + }); + prisma.userPromptAttempt.create.mockResolvedValueOnce({ id: 'pa-1' }); + + const eventEmitter: any = { emit: jest.fn() }; + const jsonSchemaValidator: any = { + makePatchSchema: jest.fn((s: any) => s), + validate: jest.fn(() => ({ valid: true, violations: [], missingFields: [] })), + }; + const userPromptTimeToResolveSeconds: any = { labels: jest.fn(() => ({ observe: jest.fn() })) }; + const promptResolvedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const goalIntakeCompletedTotal: any = { inc: jest.fn() }; + const resumeOutboxEnqueuedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const validationFailTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const unauthorizedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const incompleteAfterApplyTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + + const service = new UserPromptResolutionService( + prisma, + eventEmitter, + jsonSchemaValidator, + userPromptTimeToResolveSeconds, + promptResolvedTotal, + goalIntakeCompletedTotal, + resumeOutboxEnqueuedTotal, + validationFailTotal, + unauthorizedTotal, + incompleteAfterApplyTotal, + undefined, + ); + + const result = await service.resolvePrompt({ + promptId: 'p-1', + tenantId: 't-1', + actor: { type: ActorType.HUMAN, id: 'u-1' }, + answers: { answer: 'yes' }, + }); + + expect(result.didResolve).toBe(false); + expect(tx.outbox.create).not.toHaveBeenCalled(); + expect(eventEmitter.emit).not.toHaveBeenCalled(); + }); + + it('resolving a GOAL_INTAKE prompt returns run to INITIALIZING (so planning can restart)', async () => { + const tx: any = { + $queryRaw: jest.fn(), + userPrompt: { findUnique: jest.fn(), updateMany: jest.fn() }, + userPromptResolution: { create: jest.fn() }, + goalSpec: { findUnique: jest.fn(), update: jest.fn() }, + goalRun: { findUnique: jest.fn(), updateMany: jest.fn() }, + outbox: { create: jest.fn() }, + }; + + tx.$queryRaw.mockResolvedValue([{ id: 'p-gi-1' }]); + tx.userPrompt.findUnique + .mockResolvedValueOnce({ + id: 'p-gi-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: null, + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + scope: UserPromptScope.RUN, + jsonSchema: { + type: 'object', + properties: { notes: { type: 'string', minLength: 1 } }, + required: ['notes'], + }, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }) + .mockResolvedValueOnce({ + id: 'p-gi-1', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: null, + goalSpecId: 'gs-1', + status: UserPromptStatus.RESOLVED, + kind: UserPromptKind.GOAL_INTAKE, + scope: UserPromptScope.RUN, + jsonSchema: { + type: 'object', + properties: { notes: { type: 'string', minLength: 1 } }, + required: ['notes'], + }, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }); + + tx.goalSpec.findUnique.mockResolvedValue({ values: {}, status: GoalSpecStatus.INCOMPLETE }); + tx.goalSpec.update.mockResolvedValue({ id: 'gs-1' }); + tx.userPromptResolution.create.mockResolvedValue({ id: 'pr-gi-1' }); + tx.userPrompt.updateMany.mockResolvedValue({ count: 1 }); + tx.goalRun.findUnique.mockResolvedValue({ phase: GoalRunPhase.WAITING_USER_INPUT, tenantId: 't-1' }); + tx.goalRun.updateMany.mockResolvedValue({ count: 1 }); + tx.outbox.create.mockResolvedValue({ id: 'o-gi-1' }); + + const prisma: any = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + userPrompt: { findUnique: jest.fn() }, + goalRun: { findUnique: jest.fn() }, + goalSpec: { findUnique: jest.fn() }, + userPromptAttempt: { create: jest.fn(), findFirst: jest.fn() }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce({ + id: 'p-gi-1', + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + }); + prisma.goalSpec.findUnique.mockResolvedValueOnce({ + jsonSchema: { type: 'object', required: ['notes'], properties: { notes: { type: 'string' } } }, + }); + prisma.userPromptAttempt.create.mockResolvedValueOnce({ id: 'pa-gi-1' }); + + const eventEmitter: any = { emit: jest.fn() }; + const jsonSchemaValidator: any = { + makePatchSchema: jest.fn((s: any) => ({ ...s, required: [] })), + validate: jest.fn(() => ({ valid: true, violations: [], missingFields: [] })), + }; + const userPromptTimeToResolveSeconds: any = { labels: jest.fn(() => ({ observe: jest.fn() })) }; + const promptResolvedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const goalIntakeCompletedTotal: any = { inc: jest.fn() }; + const resumeOutboxEnqueuedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const validationFailTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const unauthorizedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const incompleteAfterApplyTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + + const service = new UserPromptResolutionService( + prisma, + eventEmitter, + jsonSchemaValidator, + userPromptTimeToResolveSeconds, + promptResolvedTotal, + goalIntakeCompletedTotal, + resumeOutboxEnqueuedTotal, + validationFailTotal, + unauthorizedTotal, + incompleteAfterApplyTotal, + undefined, + ); + + const result = await service.resolvePrompt({ + promptId: 'p-gi-1', + tenantId: 't-1', + actor: { type: ActorType.HUMAN, id: 'u-1' }, + answers: { notes: 'Use account X, target URL Y' }, + }); + + expect(result).toEqual({ + promptId: 'p-gi-1', + goalRunId: 'gr-1', + checklistItemId: null, + goalSpecId: 'gs-1', + didResolve: true, + promptStatus: UserPromptStatus.RESOLVED, + promptKind: UserPromptKind.GOAL_INTAKE, + }); + + expect(tx.goalRun.updateMany).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ phase: GoalRunPhase.INITIALIZING }), + }), + ); + + expect(eventEmitter.emit).toHaveBeenCalledWith( + 'goal-run.phase-changed', + expect.objectContaining({ newPhase: GoalRunPhase.INITIALIZING }), + ); + + expect(goalIntakeCompletedTotal.inc).toHaveBeenCalledTimes(1); + }); + + it('records an invalid attempt and does not resolve the prompt (schema validation failure)', async () => { + const prisma: any = { + $transaction: jest.fn(), + userPrompt: { findUnique: jest.fn() }, + goalRun: { findUnique: jest.fn() }, + goalSpec: { findUnique: jest.fn() }, + userPromptAttempt: { create: jest.fn(), findFirst: jest.fn() }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce({ + id: 'p-gi-2', + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + }); + prisma.goalSpec.findUnique.mockResolvedValueOnce({ + jsonSchema: { type: 'object', required: ['notes'], properties: { notes: { type: 'string', minLength: 1 } } }, + }); + prisma.userPromptAttempt.create.mockResolvedValueOnce({ id: 'pa-invalid' }); + + const eventEmitter: any = { emit: jest.fn() }; + const jsonSchemaValidator: any = { + makePatchSchema: jest.fn((s: any) => s), + validate: jest.fn(() => ({ valid: true, violations: [], missingFields: [] })), + }; + const userPromptTimeToResolveSeconds: any = { labels: jest.fn(() => ({ observe: jest.fn() })) }; + const promptResolvedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const goalIntakeCompletedTotal: any = { inc: jest.fn() }; + const resumeOutboxEnqueuedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const validationFailTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const unauthorizedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const incompleteAfterApplyTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + + const service = new UserPromptResolutionService( + prisma, + eventEmitter, + jsonSchemaValidator, + userPromptTimeToResolveSeconds, + promptResolvedTotal, + goalIntakeCompletedTotal, + resumeOutboxEnqueuedTotal, + validationFailTotal, + unauthorizedTotal, + incompleteAfterApplyTotal, + undefined, + ); + + await expect( + service.resolvePrompt({ + promptId: 'p-gi-2', + tenantId: 't-1', + actor: { type: ActorType.HUMAN, id: 'u-1' }, + answers: {}, // missing required "notes" + }), + ).rejects.toMatchObject({ + response: expect.objectContaining({ + message: 'Schema validation failed', + }), + }); + + expect(prisma.userPromptAttempt.create).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + promptId: 'p-gi-2', + isValid: false, + errorCode: 'SCHEMA_VALIDATION_FAILED', + }), + }), + ); + expect(prisma.$transaction).not.toHaveBeenCalled(); + }); + + it('rejects schema-invalid patch answers (jsonSchema snapshot) without resolving', async () => { + const tx: any = { + $queryRaw: jest.fn(), + userPrompt: { findUnique: jest.fn(), updateMany: jest.fn() }, + userPromptResolution: { create: jest.fn() }, + goalSpec: { findUnique: jest.fn(), update: jest.fn() }, + goalRun: { findUnique: jest.fn(), updateMany: jest.fn() }, + outbox: { create: jest.fn() }, + checklistItem: { findUnique: jest.fn(), updateMany: jest.fn() }, + }; + + tx.$queryRaw.mockResolvedValue([{ id: 'p-gi-3' }]); + tx.userPrompt.findUnique.mockResolvedValue({ + id: 'p-gi-3', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: null, + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + scope: UserPromptScope.RUN, + jsonSchema: { type: 'object', properties: { notes: { type: 'string', minLength: 1 } }, required: ['notes'] }, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }); + + const prisma: any = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + userPrompt: { findUnique: jest.fn() }, + goalRun: { findUnique: jest.fn() }, + goalSpec: { findUnique: jest.fn() }, + userPromptAttempt: { create: jest.fn(), findFirst: jest.fn() }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce({ + id: 'p-gi-3', + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + }); + prisma.goalSpec.findUnique.mockResolvedValueOnce({ + jsonSchema: { type: 'object', required: ['notes'], properties: { notes: { type: 'string' } } }, + }); + prisma.userPromptAttempt.create.mockResolvedValueOnce({ id: 'pa-gi-3' }); + + const eventEmitter: any = { emit: jest.fn() }; + const jsonSchemaValidator: any = { + makePatchSchema: jest.fn((s: any) => ({ ...s, required: [] })), + validate: jest.fn(() => ({ valid: false, violations: [{ keyword: 'minLength' }], missingFields: [] })), + }; + const userPromptTimeToResolveSeconds: any = { labels: jest.fn(() => ({ observe: jest.fn() })) }; + const promptResolvedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const goalIntakeCompletedTotal: any = { inc: jest.fn() }; + const resumeOutboxEnqueuedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const validationFailTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const unauthorizedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const incompleteAfterApplyTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + + const service = new UserPromptResolutionService( + prisma, + eventEmitter, + jsonSchemaValidator, + userPromptTimeToResolveSeconds, + promptResolvedTotal, + goalIntakeCompletedTotal, + resumeOutboxEnqueuedTotal, + validationFailTotal, + unauthorizedTotal, + incompleteAfterApplyTotal, + undefined, + ); + + await expect( + service.resolvePrompt({ + promptId: 'p-gi-3', + tenantId: 't-1', + actor: { type: ActorType.HUMAN, id: 'u-1' }, + answers: { notes: '' }, // invalid (minLength) + }), + ).rejects.toBeInstanceOf(UnprocessableEntityException); + + expect(tx.userPromptResolution.create).not.toHaveBeenCalled(); + expect(tx.userPrompt.updateMany).not.toHaveBeenCalled(); + expect(tx.outbox.create).not.toHaveBeenCalled(); + }); + + it('schema-valid patch but incomplete after apply keeps prompt OPEN and emits no outbox', async () => { + const tx: any = { + $queryRaw: jest.fn(), + userPrompt: { findUnique: jest.fn(), updateMany: jest.fn() }, + userPromptResolution: { create: jest.fn() }, + goalSpec: { findUnique: jest.fn(), update: jest.fn() }, + goalRun: { findUnique: jest.fn(), updateMany: jest.fn() }, + outbox: { create: jest.fn() }, + checklistItem: { findUnique: jest.fn(), updateMany: jest.fn() }, + }; + + tx.$queryRaw.mockResolvedValue([{ id: 'p-gi-4' }]); + tx.userPrompt.findUnique.mockResolvedValue({ + id: 'p-gi-4', + tenantId: 't-1', + goalRunId: 'gr-1', + checklistItemId: null, + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + scope: UserPromptScope.RUN, + jsonSchema: { + type: 'object', + properties: { notes: { type: 'string', minLength: 1 }, targetUrl: { type: 'string', minLength: 1 } }, + required: ['notes', 'targetUrl'], + }, + createdAt: new Date('2026-01-01T00:00:00.000Z'), + }); + + tx.goalSpec.findUnique.mockResolvedValue({ values: {}, status: GoalSpecStatus.INCOMPLETE }); + tx.goalSpec.update.mockResolvedValue({ id: 'gs-1' }); + tx.goalRun.findUnique.mockResolvedValue({ phase: GoalRunPhase.WAITING_USER_INPUT, tenantId: 't-1' }); + + const prisma: any = { + $transaction: jest.fn(async (fn: any) => fn(tx)), + userPrompt: { findUnique: jest.fn() }, + goalRun: { findUnique: jest.fn() }, + goalSpec: { findUnique: jest.fn() }, + userPromptAttempt: { create: jest.fn(), findFirst: jest.fn() }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce({ + id: 'p-gi-4', + tenantId: 't-1', + goalRunId: 'gr-1', + goalSpecId: 'gs-1', + status: UserPromptStatus.OPEN, + kind: UserPromptKind.GOAL_INTAKE, + }); + // Preflight schema only requires notes; full snapshot requires notes+targetUrl. + prisma.goalSpec.findUnique.mockResolvedValueOnce({ + jsonSchema: { type: 'object', required: ['notes'], properties: { notes: { type: 'string' } } }, + }); + prisma.userPromptAttempt.create.mockResolvedValueOnce({ id: 'pa-gi-4' }); + + // Patch validate passes; full validate fails with missing required. + const jsonSchemaValidator: any = { + makePatchSchema: jest.fn((s: any) => ({ ...s, required: [] })), + validate: jest + .fn() + .mockReturnValueOnce({ valid: true, violations: [], missingFields: [] }) + .mockReturnValueOnce({ + valid: false, + violations: [{ keyword: 'required', params: { missingProperty: 'targetUrl' } }], + missingFields: ['targetUrl'], + }), + }; + + const eventEmitter: any = { emit: jest.fn() }; + const userPromptTimeToResolveSeconds: any = { labels: jest.fn(() => ({ observe: jest.fn() })) }; + const promptResolvedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const goalIntakeCompletedTotal: any = { inc: jest.fn() }; + const resumeOutboxEnqueuedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const validationFailTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const unauthorizedTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + const incompleteAfterApplyTotal: any = { labels: jest.fn(() => ({ inc: jest.fn() })) }; + + const service = new UserPromptResolutionService( + prisma, + eventEmitter, + jsonSchemaValidator, + userPromptTimeToResolveSeconds, + promptResolvedTotal, + goalIntakeCompletedTotal, + resumeOutboxEnqueuedTotal, + validationFailTotal, + unauthorizedTotal, + incompleteAfterApplyTotal, + undefined, + ); + + await expect( + service.resolvePrompt({ + promptId: 'p-gi-4', + tenantId: 't-1', + actor: { type: ActorType.HUMAN, id: 'u-1' }, + answers: { notes: 'ok' }, // patch-valid, but incomplete after merge + }), + ).rejects.toBeInstanceOf(ConflictException); + + expect(tx.goalSpec.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ status: GoalSpecStatus.INCOMPLETE }), + }), + ); + expect(tx.userPromptResolution.create).not.toHaveBeenCalled(); + expect(tx.userPrompt.updateMany).not.toHaveBeenCalled(); + expect(tx.outbox.create).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/user-prompt-resolution.service.ts b/packages/bytebot-workflow-orchestrator/src/services/user-prompt-resolution.service.ts new file mode 100644 index 000000000..5f9bd0839 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/user-prompt-resolution.service.ts @@ -0,0 +1,810 @@ +import { + BadRequestException, + ConflictException, + ForbiddenException, + InternalServerErrorException, + Injectable, + Logger, + NotFoundException, + Optional, + UnprocessableEntityException, +} from '@nestjs/common'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { createId } from '@paralleldrive/cuid2'; +import { + ActorType, + ChecklistItemStatus, + GoalRunPhase, + GoalSpecStatus, + Prisma, + StepType, + UserPromptKind, + UserPromptScope, + UserPromptStatus, +} from '@prisma/client'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import type { Counter, Histogram } from 'prom-client'; +import { PrismaService } from './prisma.service'; +import { AuditService, AuditEventType } from './audit.service'; +import { JsonSchemaValidatorService } from './json-schema-validator.service'; + +@Injectable() +export class UserPromptResolutionService { + private readonly logger = new Logger(UserPromptResolutionService.name); + + constructor( + private readonly prisma: PrismaService, + private readonly eventEmitter: EventEmitter2, + private readonly jsonSchemaValidator: JsonSchemaValidatorService, + @InjectMetric('user_prompt_time_to_resolve_seconds') + private readonly userPromptTimeToResolveSeconds: Histogram, + @InjectMetric('prompt_resolved_total') + private readonly promptResolvedTotal: Counter, + @InjectMetric('goal_intake_completed_total') + private readonly goalIntakeCompletedTotal: Counter, + @InjectMetric('resume_outbox_enqueued_total') + private readonly resumeOutboxEnqueuedTotal: Counter, + @InjectMetric('user_prompt_resolution_validation_fail_total') + private readonly validationFailTotal: Counter, + @InjectMetric('user_prompt_resolution_unauthorized_total') + private readonly unauthorizedTotal: Counter, + @InjectMetric('user_prompt_resolution_incomplete_after_apply_total') + private readonly incompleteAfterApplyTotal: Counter, + @Optional() private readonly auditService?: AuditService, + ) {} + + private evaluateResolutionAuthz(params: { + promptKind: UserPromptKind; + actorType: ActorType; + }): { allowed: true; policy: string; ruleId: string; reason: string } | { allowed: false; policy: string; ruleId: string; reason: string } { + const policy = 'prompt_resolution.v1'; + + // Approvals are governance objects: human-only unless explicitly expanded by policy (not yet). + if (params.promptKind === UserPromptKind.APPROVAL) { + if (params.actorType !== ActorType.HUMAN) { + return { + allowed: false, + policy, + ruleId: 'approval_human_only', + reason: 'Only HUMAN actors may resolve approval prompts', + }; + } + return { allowed: true, policy, ruleId: 'approval_human_only', reason: 'HUMAN actor approved/denied approval prompt' }; + } + + // Desktop takeover prompts require a real human at the controls by default. + if (params.promptKind === UserPromptKind.DESKTOP_TAKEOVER) { + if (params.actorType !== ActorType.HUMAN) { + return { + allowed: false, + policy, + ruleId: 'desktop_takeover_human_only', + reason: 'Only HUMAN actors may resolve desktop takeover prompts', + }; + } + return { allowed: true, policy, ruleId: 'desktop_takeover_human_only', reason: 'HUMAN actor resolved desktop takeover prompt' }; + } + + // Text-only prompts: parent agents may answer by policy (human fallback remains supported). + if ( + params.actorType === ActorType.PARENT_AGENT || + params.actorType === ActorType.AGENT + ) { + return { allowed: true, policy, ruleId: 'parent_agent_text_only', reason: 'Agent actor allowed for text-only prompt kinds' }; + } + + // System automation: allowed for non-approval, non-takeover prompts. + if (params.actorType === ActorType.SYSTEM) { + return { allowed: true, policy, ruleId: 'system_text_only', reason: 'SYSTEM actor allowed for text-only prompt kinds' }; + } + + // Default allow for HUMAN. + return { allowed: true, policy, ruleId: 'human_default', reason: 'HUMAN actor allowed' }; + } + + private validateAnswersAgainstJsonSchema(jsonSchema: any, answers: Record): { + isValid: boolean; + errors: Array<{ path: string; message: string }>; + } { + const errors: Array<{ path: string; message: string }> = []; + + if (!jsonSchema || typeof jsonSchema !== 'object') { + return { isValid: true, errors }; + } + + if (jsonSchema.type && jsonSchema.type !== 'object') { + return { isValid: true, errors }; + } + + const required: string[] = Array.isArray(jsonSchema.required) ? jsonSchema.required : []; + const properties: Record = + jsonSchema.properties && typeof jsonSchema.properties === 'object' ? jsonSchema.properties : {}; + + for (const key of required) { + const value = (answers as any)[key]; + if (value === undefined || value === null) { + errors.push({ path: `/${key}`, message: 'is required' }); + continue; + } + + const prop = properties[key]; + const expectedType = prop?.type; + + if (expectedType === 'string') { + if (typeof value !== 'string') { + errors.push({ path: `/${key}`, message: 'must be a string' }); + continue; + } + const minLength = typeof prop?.minLength === 'number' ? prop.minLength : undefined; + if (minLength !== undefined && value.trim().length < minLength) { + errors.push({ path: `/${key}`, message: `must be at least ${minLength} characters` }); + } + } + + if (expectedType === 'number' || expectedType === 'integer') { + if (typeof value !== 'number' || Number.isNaN(value)) { + errors.push({ path: `/${key}`, message: 'must be a number' }); + } + } + + if (expectedType === 'boolean') { + if (typeof value !== 'boolean') { + errors.push({ path: `/${key}`, message: 'must be a boolean' }); + } + } + } + + return { isValid: errors.length === 0, errors }; + } + + private async recordAttemptOnce(params: { + promptId: string; + tenantId: string; + goalRunId: string; + actor: { + type: ActorType; + id?: string; + email?: string; + name?: string; + authContext?: Record; + }; + answers: Record; + requestId?: string; + clientRequestId?: string; + idempotencyKey?: string; + ipAddress?: string; + userAgent?: string; + authz: { decision: 'ALLOW' | 'DENY'; policy: string; ruleId: string; reason: string }; + isValid: boolean; + validationResult?: Record | null; + errorCode?: string | null; + errorMessage?: string | null; + }): Promise { + // Idempotency: prompt-scoped; prefer idempotencyKey, fallback to clientRequestId. + const idempotencyKey = params.idempotencyKey ?? null; + const clientRequestId = params.clientRequestId ?? null; + + try { + await this.prisma.userPromptAttempt.create({ + data: { + id: createId(), + promptId: params.promptId, + tenantId: params.tenantId, + goalRunId: params.goalRunId, + actorType: params.actor.type, + actorId: params.actor.id, + actorEmail: params.actor.email, + actorName: params.actor.name, + actorIpAddress: params.ipAddress, + actorUserAgent: params.userAgent, + requestId: params.requestId, + authContext: params.actor.authContext ?? {}, + clientRequestId, + idempotencyKey, + authzDecision: params.authz.decision, + authzPolicy: params.authz.policy, + authzRuleId: params.authz.ruleId, + authzReason: params.authz.reason, + answers: params.answers, + isValid: params.isValid, + validationResult: params.validationResult ?? undefined, + errorCode: params.errorCode ?? null, + errorMessage: params.errorMessage ?? null, + }, + }); + } catch (error: any) { + // P2002: unique constraint violation (prompt_id+idempotency_key or prompt_id+client_request_id) + if (error?.code !== 'P2002') { + throw error; + } + + if (!idempotencyKey && !clientRequestId) { + // Should be unreachable, but don't loop if it happens. + return; + } + + const existing = await this.prisma.userPromptAttempt.findFirst({ + where: { + promptId: params.promptId, + OR: [ + ...(idempotencyKey ? [{ idempotencyKey }] : []), + ...(clientRequestId ? [{ clientRequestId }] : []), + ], + }, + select: { id: true }, + }); + + if (existing) return; + throw error; + } + } + + async resolvePrompt(request: { + promptId: string; + tenantId: string; + actor: { + type: ActorType; + id?: string; + email?: string; + name?: string; + authContext?: Record; + }; + answers: Record; + requestId?: string; + clientRequestId?: string; + idempotencyKey?: string; + ipAddress?: string; + userAgent?: string; + }): Promise<{ + promptId: string; + goalRunId: string; + checklistItemId?: string | null; + goalSpecId?: string | null; + didResolve: boolean; + promptStatus: UserPromptStatus; + promptKind: UserPromptKind; + }> { + if (!request.tenantId) throw new BadRequestException('tenantId is required'); + if (!request.actor?.type) throw new BadRequestException('actor.type is required'); + + if (request.actor.type === ActorType.HUMAN) { + if (!request.actor.id && !request.actor.email) { + throw new BadRequestException('actor.id or actor.email is required for HUMAN'); + } + } else if (request.actor.type === ActorType.AGENT || request.actor.type === ActorType.PARENT_AGENT) { + if (!request.actor.id) throw new BadRequestException('actor.id is required for AGENT'); + } else if (request.actor.type === ActorType.SYSTEM) { + if (!request.actor.id) throw new BadRequestException('actor.id is required for SYSTEM'); + } + + const { promptId } = request; + const resolvedAt = new Date(); + + // Preflight: record an immutable attempt row even when we reject the submission. + // This is intentionally not part of the resolution transaction so validation/auth failures persist. + const promptForAttempt = await this.prisma.userPrompt.findUnique({ + where: { id: promptId }, + select: { + id: true, + tenantId: true, + goalRunId: true, + goalSpecId: true, + kind: true, + status: true, + }, + }); + + if (!promptForAttempt) { + throw new NotFoundException(`UserPrompt ${promptId} not found`); + } + + const promptTenantIdForAttempt = + promptForAttempt.tenantId || + ( + await this.prisma.goalRun.findUnique({ + where: { id: promptForAttempt.goalRunId }, + select: { tenantId: true }, + }) + )?.tenantId; + + if (!promptTenantIdForAttempt) { + throw new BadRequestException(`Prompt ${promptId} is missing tenantId`); + } + if (promptTenantIdForAttempt !== request.tenantId) { + // Do not record attempts across tenants (avoid leaking prompt existence). + throw new ForbiddenException('Prompt does not belong to tenant'); + } + + const authzForAttempt = this.evaluateResolutionAuthz({ + promptKind: promptForAttempt.kind, + actorType: request.actor.type, + }); + + if (!authzForAttempt.allowed) { + await this.recordAttemptOnce({ + promptId, + tenantId: promptTenantIdForAttempt, + goalRunId: promptForAttempt.goalRunId, + actor: request.actor, + answers: request.answers, + requestId: request.requestId, + clientRequestId: request.clientRequestId, + idempotencyKey: request.idempotencyKey, + ipAddress: request.ipAddress, + userAgent: request.userAgent, + authz: { decision: 'DENY', policy: authzForAttempt.policy, ruleId: authzForAttempt.ruleId, reason: authzForAttempt.reason }, + isValid: false, + validationResult: null, + errorCode: 'AUTHZ_DENY', + errorMessage: authzForAttempt.reason, + }); + throw new ForbiddenException(authzForAttempt.reason); + } + + if (promptForAttempt.status !== UserPromptStatus.OPEN && promptForAttempt.status !== UserPromptStatus.RESOLVED) { + await this.recordAttemptOnce({ + promptId, + tenantId: promptTenantIdForAttempt, + goalRunId: promptForAttempt.goalRunId, + actor: request.actor, + answers: request.answers, + requestId: request.requestId, + clientRequestId: request.clientRequestId, + idempotencyKey: request.idempotencyKey, + ipAddress: request.ipAddress, + userAgent: request.userAgent, + authz: { decision: 'ALLOW', policy: authzForAttempt.policy, ruleId: authzForAttempt.ruleId, reason: authzForAttempt.reason }, + isValid: false, + validationResult: null, + errorCode: 'PROMPT_NOT_OPEN', + errorMessage: `Prompt is not OPEN (status=${promptForAttempt.status})`, + }); + throw new ConflictException(`Prompt ${promptId} is not OPEN (status=${promptForAttempt.status})`); + } + + // Schema validation (GoalSpec-backed prompts only): reject invalid attempts but keep prompt OPEN. + if (promptForAttempt.goalSpecId) { + const goalSpec = await this.prisma.goalSpec.findUnique({ + where: { id: promptForAttempt.goalSpecId }, + select: { jsonSchema: true }, + }); + + const { isValid, errors } = this.validateAnswersAgainstJsonSchema(goalSpec?.jsonSchema as any, request.answers); + if (!isValid) { + const validationResult = { schema: 'jsonschema.required+types.v1', errors }; + await this.recordAttemptOnce({ + promptId, + tenantId: promptTenantIdForAttempt, + goalRunId: promptForAttempt.goalRunId, + actor: request.actor, + answers: request.answers, + requestId: request.requestId, + clientRequestId: request.clientRequestId, + idempotencyKey: request.idempotencyKey, + ipAddress: request.ipAddress, + userAgent: request.userAgent, + authz: { decision: 'ALLOW', policy: authzForAttempt.policy, ruleId: authzForAttempt.ruleId, reason: authzForAttempt.reason }, + isValid: false, + validationResult, + errorCode: 'SCHEMA_VALIDATION_FAILED', + errorMessage: 'Schema validation failed', + }); + throw new BadRequestException({ + message: 'Schema validation failed', + errors, + }); + } + } + + // Record accepted attempt (append-only). + await this.recordAttemptOnce({ + promptId, + tenantId: promptTenantIdForAttempt, + goalRunId: promptForAttempt.goalRunId, + actor: request.actor, + answers: request.answers, + requestId: request.requestId, + clientRequestId: request.clientRequestId, + idempotencyKey: request.idempotencyKey, + ipAddress: request.ipAddress, + userAgent: request.userAgent, + authz: { decision: 'ALLOW', policy: authzForAttempt.policy, ruleId: authzForAttempt.ruleId, reason: authzForAttempt.reason }, + isValid: true, + validationResult: null, + errorCode: null, + errorMessage: null, + }); + + const result = await this.prisma.$transaction(async (tx) => { + // Lock the prompt row to ensure OPEN->RESOLVED is serialized across concurrent resolvers. + // This keeps derived updates (GoalSpec, outbox rows) single-writer and retry-safe. + const locked = await tx.$queryRaw<{ id: string }[]>( + Prisma.sql`SELECT id FROM workflow_orchestrator.user_prompts WHERE id = ${promptId} FOR UPDATE`, + ); + if (!locked?.length) { + throw new NotFoundException(`UserPrompt ${promptId} not found`); + } + + const prompt = await tx.userPrompt.findUnique({ where: { id: promptId } }); + + if (!prompt) { + throw new NotFoundException(`UserPrompt ${promptId} not found`); + } + + // Enforce tenant boundary (RBAC baseline). + const promptTenantId = prompt.tenantId || ( + await tx.goalRun.findUnique({ where: { id: prompt.goalRunId }, select: { tenantId: true } }) + )?.tenantId; + + if (!promptTenantId) { + throw new BadRequestException(`Prompt ${promptId} is missing tenantId`); + } + if (promptTenantId !== request.tenantId) { + throw new ForbiddenException('Prompt does not belong to tenant'); + } + + const authz = this.evaluateResolutionAuthz({ promptKind: prompt.kind, actorType: request.actor.type }); + if (!authz.allowed) { + try { + this.unauthorizedTotal.labels(prompt.kind, request.actor.type).inc(); + } catch { + // ignore metric failures + } + throw new ForbiddenException(authz.reason); + } + + // Idempotent: resolving twice is a no-op. + if (prompt.status === UserPromptStatus.RESOLVED) { + return { prompt, didResolve: false, phaseChanged: false, previousPhase: null as GoalRunPhase | null }; + } + if (prompt.status !== UserPromptStatus.OPEN) { + throw new ConflictException(`Prompt ${promptId} is not OPEN (status=${prompt.status})`); + } + + // Validate answers against the prompt's schema snapshot (fail-closed). + // - Patch validation: validate only provided fields (no required enforcement) + // - Full validation: validate merged state to prove completeness before resolving + const hasSchemaSnapshot = prompt.jsonSchema != null; + if (prompt.kind === UserPromptKind.GOAL_INTAKE && !hasSchemaSnapshot) { + throw new InternalServerErrorException('GOAL_INTAKE prompt is missing json_schema snapshot'); + } + + if (hasSchemaSnapshot) { + const patchSchema = this.jsonSchemaValidator.makePatchSchema(prompt.jsonSchema as any); + const patchResult = this.jsonSchemaValidator.validate(patchSchema as any, request.answers); + if (!patchResult.valid) { + try { + this.validationFailTotal.labels(prompt.kind, prompt.scope).inc(); + } catch { + // ignore metric failures + } + throw new UnprocessableEntityException({ + code: 'VALIDATION_FAILED', + message: 'answers failed JSON schema validation', + details: patchResult.violations, + }); + } + } + + let checklistItemDescription: string | null = null; + + if (prompt.checklistItemId) { + const checklistItem = await tx.checklistItem.findUnique({ + where: { id: prompt.checklistItemId }, + select: { type: true, description: true }, + }); + + if (!checklistItem) { + throw new NotFoundException(`ChecklistItem ${prompt.checklistItemId} not found`); + } + + checklistItemDescription = checklistItem.description; + + // "Unblock step" semantics: + // - USER_INPUT_REQUIRED: answering satisfies the step → mark COMPLETED. + // - EXECUTE: user answers unblock execution → mark PENDING (executor decides how to resume). + if (checklistItem.type === StepType.USER_INPUT_REQUIRED) { + await tx.checklistItem.updateMany({ + where: { + id: prompt.checklistItemId, + status: ChecklistItemStatus.BLOCKED, + }, + data: { + status: ChecklistItemStatus.COMPLETED, + completedAt: resolvedAt, + actualOutcome: JSON.stringify( + { + promptId, + answers: request.answers, + }, + null, + 2, + ), + }, + }); + } else { + await tx.checklistItem.updateMany({ + where: { + id: prompt.checklistItemId, + status: ChecklistItemStatus.BLOCKED, + }, + data: { + status: ChecklistItemStatus.PENDING, + startedAt: null, + completedAt: null, + }, + }); + } + } + + if (prompt.goalSpecId) { + const goalSpec = await tx.goalSpec.findUnique({ + where: { id: prompt.goalSpecId }, + select: { values: true, status: true }, + }); + + const mergedValues = { + ...(goalSpec?.values as any), + ...(request.answers as any), + }; + + // Full validation: completeness is proven against the prompt's schema snapshot. + // If incomplete, keep prompt OPEN and keep GoalSpec INCOMPLETE (no false RESOLVED). + if (prompt.kind === UserPromptKind.GOAL_INTAKE) { + const fullResult = this.jsonSchemaValidator.validate(prompt.jsonSchema as any, mergedValues); + if (!fullResult.valid) { + try { + this.incompleteAfterApplyTotal.labels(prompt.kind).inc(); + } catch { + // ignore metric failures + } + + // Persist partial progress, but keep GoalSpec INCOMPLETE and do not resolve the prompt. + await tx.goalSpec.update({ + where: { id: prompt.goalSpecId }, + data: { + values: mergedValues, + status: GoalSpecStatus.INCOMPLETE, + completedAt: null, + }, + }); + + throw new ConflictException({ + code: 'INCOMPLETE_AFTER_APPLY', + message: 'GoalSpec is still incomplete after applying answers', + missingFields: fullResult.missingFields, + details: fullResult.violations, + }); + } + } + + // GoalSpec is complete (or this is a non-intake prompt linked to GoalSpec). + await tx.goalSpec.update({ + where: { id: prompt.goalSpecId }, + data: { + values: mergedValues, + status: GoalSpecStatus.COMPLETE, + completedAt: resolvedAt, + }, + }); + } + + // Resolution record is written only on success (idempotency keys consumed only on success). + // Immutable resolution record (unique per promptId) + try { + await tx.userPromptResolution.create({ + data: { + id: createId(), + promptId, + tenantId: promptTenantId, + goalRunId: prompt.goalRunId, + actorType: request.actor.type, + actorId: request.actor.id, + actorEmail: request.actor.email, + actorName: request.actor.name, + actorIpAddress: request.ipAddress, + actorUserAgent: request.userAgent, + requestId: request.requestId, + authContext: request.actor.authContext ?? {}, + clientRequestId: request.clientRequestId, + idempotencyKey: request.idempotencyKey, + authzDecision: 'ALLOW', + authzPolicy: authz.policy, + authzRuleId: authz.ruleId, + authzReason: authz.reason, + answers: request.answers, + }, + }); + } catch (error: any) { + if (error?.code !== 'P2002') throw error; + } + + const updatedCount = await tx.userPrompt.updateMany({ + where: { id: promptId, status: UserPromptStatus.OPEN }, + data: { + status: UserPromptStatus.RESOLVED, + answers: request.answers, + resolvedAt, + }, + }); + + if (updatedCount.count === 0) { + const current = await tx.userPrompt.findUnique({ where: { id: promptId } }); + if (current?.status === UserPromptStatus.RESOLVED) { + return { prompt: current, didResolve: false, phaseChanged: false, previousPhase: null as GoalRunPhase | null }; + } + throw new ConflictException(`Prompt ${promptId} could not be resolved (status changed concurrently)`); + } + + const updatedPrompt = await tx.userPrompt.findUnique({ where: { id: promptId } }); + if (!updatedPrompt) throw new NotFoundException(`UserPrompt ${promptId} not found after resolve`); + + const goalRun = await tx.goalRun.findUnique({ + where: { id: prompt.goalRunId }, + select: { phase: true, tenantId: true }, + }); + + if (!goalRun) { + throw new NotFoundException(`GoalRun ${prompt.goalRunId} not found`); + } + + const nextPhase = + prompt.goalSpecId || prompt.kind === UserPromptKind.GOAL_INTAKE + ? GoalRunPhase.INITIALIZING + : prompt.kind === UserPromptKind.APPROVAL + ? GoalRunPhase.EXECUTING + : GoalRunPhase.EXECUTING; + + const phaseUpdated = await tx.goalRun.updateMany({ + where: { + id: prompt.goalRunId, + phase: { + in: [GoalRunPhase.WAITING_USER_INPUT, GoalRunPhase.WAITING_APPROVAL], + }, + }, + data: { + phase: nextPhase, + }, + }); + + // Outbox: emit once per prompt resolution + const outboxDedupeKey = `user_prompt.resolved:${promptId}`; + try { + await tx.outbox.create({ + data: { + id: createId(), + dedupeKey: outboxDedupeKey, + aggregateId: prompt.goalRunId, + eventType: 'user_prompt.resolved', + payload: { + promptId, + goalRunId: prompt.goalRunId, + tenantId: goalRun.tenantId, + checklistItemId: prompt.checklistItemId ?? null, + goalSpecId: prompt.goalSpecId ?? null, + kind: updatedPrompt.kind, + stepDescription: checklistItemDescription, + resolvedAt: resolvedAt.toISOString(), + }, + }, + }); + } catch (error: any) { + // Idempotent: ignore duplicate outbox emission + if (error?.code !== 'P2002') { + throw error; + } + } + + // Outbox: resume pipeline (DB commit -> outbox -> resumer -> Temporal Update) + const resumeDedupeKey = `user_prompt.resume:${promptId}`; + try { + await tx.outbox.create({ + data: { + id: createId(), + dedupeKey: resumeDedupeKey, + aggregateId: prompt.goalRunId, + eventType: 'user_prompt.resume', + payload: { + promptId, + goalRunId: prompt.goalRunId, + tenantId: goalRun.tenantId, + updateId: resumeDedupeKey, + }, + }, + }); + } catch (error: any) { + if (error?.code !== 'P2002') { + throw error; + } + } + + return { + prompt: updatedPrompt, + didResolve: true, + phaseChanged: phaseUpdated.count > 0, + previousPhase: goalRun.phase, + nextPhase, + }; + }); + + if (result.didResolve && result.phaseChanged) { + this.eventEmitter.emit('goal-run.phase-changed', { + goalRunId: result.prompt.goalRunId, + previousPhase: result.previousPhase, + newPhase: result.nextPhase, + }); + } + + if (result.didResolve) { + try { + this.promptResolvedTotal.labels(request.actor.type, result.prompt.kind).inc(); + if ((result.prompt as any).goalSpecId || result.prompt.kind === UserPromptKind.GOAL_INTAKE) { + this.goalIntakeCompletedTotal.inc(); + } + this.resumeOutboxEnqueuedTotal.labels('resolution').inc(); + } catch (error: any) { + this.logger.debug(`Failed to record prompt resolution counters: ${error.message}`); + } + + if (this.auditService) { + try { + await this.auditService.log({ + eventType: AuditEventType.USER_PROMPT_RESOLVED, + actor: { + type: + request.actor.type === ActorType.HUMAN + ? 'user' + : request.actor.type === ActorType.AGENT || request.actor.type === ActorType.PARENT_AGENT + ? 'agent' + : 'system', + id: request.actor.id, + email: request.actor.email, + name: request.actor.name, + ipAddress: request.ipAddress, + userAgent: request.userAgent, + }, + resource: { + type: 'prompt', + id: request.promptId, + }, + context: { + tenantId: request.tenantId, + workflowRunId: result.prompt.goalRunId, + requestId: request.requestId, + }, + action: { + type: 'resolve', + previousState: UserPromptStatus.OPEN, + newState: UserPromptStatus.RESOLVED, + }, + metadata: { + goalRunId: result.prompt.goalRunId, + checklistItemId: result.prompt.checklistItemId ?? null, + goalSpecId: (result.prompt as any).goalSpecId ?? null, + kind: result.prompt.kind, + }, + }); + } catch (error: any) { + this.logger.warn(`Failed to write audit log for prompt resolution: ${error.message}`); + } + } + + try { + const durationSeconds = (resolvedAt.getTime() - result.prompt.createdAt.getTime()) / 1000; + this.userPromptTimeToResolveSeconds.labels(result.prompt.kind).observe(durationSeconds); + } catch (error: any) { + this.logger.debug(`Failed to record prompt resolution metric: ${error.message}`); + } + } + + return { + promptId: result.prompt.id, + goalRunId: result.prompt.goalRunId, + checklistItemId: result.prompt.checklistItemId ?? null, + goalSpecId: (result.prompt as any).goalSpecId ?? null, + didResolve: result.didResolve, + promptStatus: result.prompt.status, + promptKind: result.prompt.kind, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/user-prompt.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/services/user-prompt.service.spec.ts new file mode 100644 index 000000000..35ead1d9d --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/user-prompt.service.spec.ts @@ -0,0 +1,189 @@ +import { UserPromptService } from './user-prompt.service'; +import { UserPromptCancelReason, UserPromptKind, UserPromptStatus } from '@prisma/client'; + +jest.mock('@paralleldrive/cuid2', () => ({ + createId: () => 'p-new', +})); + +describe(UserPromptService.name, () => { + it('creates an OPEN prompt on first call', async () => { + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(prisma)), + userPrompt: { + create: jest.fn(), + findUnique: jest.fn(), + findFirst: jest.fn(), + updateMany: jest.fn(), + }, + } as any; + + const service = new UserPromptService(prisma); + + const created = { + id: 'p1', + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step1', + kind: UserPromptKind.TEXT_CLARIFICATION, + status: UserPromptStatus.OPEN, + dedupeKey: service.buildDedupeKey('run1', 'step1', UserPromptKind.TEXT_CLARIFICATION), + payload: { question: 'q' }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce(null); + prisma.userPrompt.findFirst.mockResolvedValueOnce(null); + prisma.userPrompt.create.mockResolvedValueOnce(created); + + const result = await service.ensureOpenPromptForStep({ + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step1', + kind: UserPromptKind.TEXT_CLARIFICATION, + payload: { question: 'q' }, + }); + + expect(result).toBe(created); + expect(prisma.userPrompt.create).toHaveBeenCalledTimes(1); + expect(prisma.userPrompt.updateMany).not.toHaveBeenCalled(); + }); + + it('dedupes by returning existing prompt when dedupeKey exists', async () => { + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(prisma)), + userPrompt: { + create: jest.fn(), + findUnique: jest.fn(), + findFirst: jest.fn(), + updateMany: jest.fn(), + }, + } as any; + + const service = new UserPromptService(prisma); + const dedupeKey = service.buildDedupeKey('run1', 'step1', UserPromptKind.TEXT_CLARIFICATION); + + const existing = { + id: 'p1', + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step1', + kind: UserPromptKind.TEXT_CLARIFICATION, + status: UserPromptStatus.OPEN, + dedupeKey, + payload: { question: 'q' }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce(existing); + + const result = await service.ensureOpenPromptForStep({ + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step1', + kind: UserPromptKind.TEXT_CLARIFICATION, + payload: { question: 'q' }, + }); + + expect(result).toBe(existing); + expect(prisma.userPrompt.findUnique).toHaveBeenCalledWith({ where: { dedupeKey } }); + expect(prisma.userPrompt.create).not.toHaveBeenCalled(); + expect(prisma.userPrompt.updateMany).not.toHaveBeenCalled(); + }); + + it('supersedes an existing OPEN prompt for the run', async () => { + const prisma = { + $transaction: jest.fn(async (fn: any) => fn(prisma)), + userPrompt: { + create: jest.fn(), + findUnique: jest.fn(), + findFirst: jest.fn(), + updateMany: jest.fn(), + }, + } as any; + + const service = new UserPromptService(prisma); + + const existingOpen = { + id: 'p-old', + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step-old', + kind: UserPromptKind.TEXT_CLARIFICATION, + status: UserPromptStatus.OPEN, + dedupeKey: 'prompt:run1:step-old:TEXT_CLARIFICATION', + rootPromptId: null, + revision: 2, + createdAt: new Date('2026-01-01T00:00:00Z'), + }; + + const created = { + id: 'p-new', + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step-new', + kind: UserPromptKind.TEXT_CLARIFICATION, + status: UserPromptStatus.OPEN, + dedupeKey: service.buildDedupeKey('run1', 'step-new', UserPromptKind.TEXT_CLARIFICATION), + payload: { question: 'q2' }, + }; + + prisma.userPrompt.findUnique.mockResolvedValueOnce(null); + prisma.userPrompt.findFirst.mockResolvedValueOnce(existingOpen); + prisma.userPrompt.updateMany.mockResolvedValueOnce({ count: 1 }); + prisma.userPrompt.create.mockResolvedValueOnce(created); + + const result = await service.ensureOpenPromptForStep({ + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step-new', + kind: UserPromptKind.TEXT_CLARIFICATION, + payload: { question: 'q2' }, + }); + + expect(result).toBe(created); + expect(prisma.userPrompt.updateMany).toHaveBeenCalledWith({ + where: { id: existingOpen.id, status: UserPromptStatus.OPEN }, + data: expect.objectContaining({ + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.SUPERSEDED, + supersededByPromptId: 'p-new', + }), + }); + expect(prisma.userPrompt.create).toHaveBeenCalledWith({ + data: expect.objectContaining({ + id: 'p-new', + tenantId: 't1', + goalRunId: 'run1', + checklistItemId: 'step-new', + supersedesPromptId: existingOpen.id, + rootPromptId: existingOpen.id, + revision: existingOpen.revision + 1, + }), + }); + }); + + it('lists prompts filtered by tenant and goalRun', async () => { + const prisma = { + userPrompt: { + findMany: jest.fn(), + }, + } as any; + + const service = new UserPromptService(prisma); + + const prompts = [ + { id: 'p1', tenantId: 't1', goalRunId: 'gr1' }, + { id: 'p2', tenantId: 't1', goalRunId: 'gr1' }, + ]; + prisma.userPrompt.findMany.mockResolvedValueOnce(prompts); + + const result = await service.listUserPrompts({ tenantId: 't1', goalRunId: 'gr1', limit: 10 }); + + expect(result).toBe(prompts); + expect(prisma.userPrompt.findMany).toHaveBeenCalledWith( + expect.objectContaining({ + where: expect.objectContaining({ tenantId: 't1', goalRunId: 'gr1' }), + orderBy: { createdAt: 'desc' }, + take: 10, + }), + ); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/src/services/user-prompt.service.ts b/packages/bytebot-workflow-orchestrator/src/services/user-prompt.service.ts new file mode 100644 index 000000000..5fd505949 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/user-prompt.service.ts @@ -0,0 +1,352 @@ +import { Injectable, Logger } from '@nestjs/common'; +import { createId } from '@paralleldrive/cuid2'; +import { PrismaService } from './prisma.service'; +import { + Prisma, + UserPrompt, + UserPromptCancelReason, + UserPromptKind, + UserPromptScope, + UserPromptStatus, +} from '@prisma/client'; + +export interface EnsureOpenPromptForStepRequest { + tenantId: string; + goalRunId: string; + checklistItemId: string; + kind: UserPromptKind; + payload: Prisma.InputJsonValue; + expiresAt?: Date | null; +} + +export interface EnsureOpenPromptForStepKeyRequest { + tenantId: string; + goalRunId: string; + /** + * Stable, engine-agnostic step key (e.g., "step-1") used when no ChecklistItem row exists + * (e.g., TEMPORAL_WORKFLOW runs). + */ + stepKey: string; + kind: UserPromptKind; + payload: Prisma.InputJsonValue; + expiresAt?: Date | null; +} + +export interface ListUserPromptsRequest { + tenantId: string; + goalRunId?: string; + status?: UserPromptStatus; + kind?: UserPromptKind; + scope?: UserPromptScope; + limit?: number; +} + +@Injectable() +export class UserPromptService { + private readonly logger = new Logger(UserPromptService.name); + + constructor(private readonly prisma: PrismaService) {} + + buildDedupeKey(goalRunId: string, checklistItemId: string, kind: UserPromptKind): string { + return `prompt:${goalRunId}:${checklistItemId}:${kind}`; + } + + buildGoalSpecDedupeKey(goalRunId: string, goalSpecId: string, kind: UserPromptKind): string { + return `prompt:${goalRunId}:goalSpec:${goalSpecId}:${kind}`; + } + + buildApprovalDedupeKey(goalRunId: string, approvalRequestId: string, kind: UserPromptKind): string { + return `prompt:${goalRunId}:approval:${approvalRequestId}:${kind}`; + } + + /** + * Create (or return existing) OPEN prompt for a step. + * Idempotent via unique UserPrompt.dedupeKey. + */ + async ensureOpenPromptForStep(request: EnsureOpenPromptForStepRequest): Promise { + const dedupeKey = this.buildDedupeKey(request.goalRunId, request.checklistItemId, request.kind); + + return this.ensureOpenPrompt({ + tenantId: request.tenantId, + goalRunId: request.goalRunId, + checklistItemId: request.checklistItemId, + kind: request.kind, + scope: UserPromptScope.STEP, + payload: request.payload, + dedupeKey, + expiresAt: request.expiresAt ?? null, + }); + } + + /** + * Create (or return existing) OPEN prompt for an engine-native step key (Temporal). + * This does not require a ChecklistItem row (avoids FK violations). + */ + async ensureOpenPromptForStepKey(request: EnsureOpenPromptForStepKeyRequest): Promise { + const dedupeKey = this.buildDedupeKey(request.goalRunId, request.stepKey, request.kind); + + return this.ensureOpenPrompt({ + tenantId: request.tenantId, + goalRunId: request.goalRunId, + kind: request.kind, + scope: UserPromptScope.STEP, + payload: request.payload, + dedupeKey, + expiresAt: request.expiresAt ?? null, + }); + } + + /** + * Create (or return existing) OPEN prompt for goal intake (GoalSpec gate). + * Idempotent via unique UserPrompt.dedupeKey. + */ + async ensureOpenGoalSpecPrompt(request: { + tenantId: string; + goalRunId: string; + goalSpecId: string; + kind: UserPromptKind; + schemaId?: string | null; + schemaVersion?: number | null; + jsonSchema?: Prisma.InputJsonValue | null; + uiSchema?: Prisma.InputJsonValue | null; + validatorVersion?: string | null; + payload: Prisma.InputJsonValue; + expiresAt?: Date | null; + }): Promise { + const dedupeKey = this.buildGoalSpecDedupeKey(request.goalRunId, request.goalSpecId, request.kind); + + return this.ensureOpenPrompt({ + tenantId: request.tenantId, + goalRunId: request.goalRunId, + goalSpecId: request.goalSpecId, + kind: request.kind, + scope: UserPromptScope.RUN, + schemaId: request.schemaId ?? null, + schemaVersion: request.schemaVersion ?? null, + jsonSchema: request.jsonSchema ?? null, + uiSchema: request.uiSchema ?? null, + validatorVersion: request.validatorVersion ?? null, + payload: request.payload, + dedupeKey, + expiresAt: request.expiresAt ?? null, + }); + } + + /** + * Create (or return existing) OPEN prompt for an approval request. + * Idempotent via unique UserPrompt.dedupeKey. + */ + async ensureOpenApprovalPrompt(request: { + tenantId: string; + goalRunId: string; + approvalRequestId: string; + payload: Prisma.InputJsonValue; + expiresAt?: Date | null; + }): Promise { + const dedupeKey = this.buildApprovalDedupeKey(request.goalRunId, request.approvalRequestId, UserPromptKind.APPROVAL); + + return this.ensureOpenPrompt({ + tenantId: request.tenantId, + goalRunId: request.goalRunId, + approvalRequestId: request.approvalRequestId, + kind: UserPromptKind.APPROVAL, + scope: UserPromptScope.APPROVAL, + payload: request.payload, + dedupeKey, + expiresAt: request.expiresAt ?? null, + }); + } + + async listUserPrompts(request: ListUserPromptsRequest): Promise< + Array< + Pick< + UserPrompt, + | 'id' + | 'tenantId' + | 'goalRunId' + | 'checklistItemId' + | 'goalSpecId' + | 'approvalRequestId' + | 'desktopLeaseId' + | 'kind' + | 'scope' + | 'status' + | 'dedupeKey' + | 'schemaId' + | 'schemaVersion' + | 'uiSchema' + | 'validatorVersion' + | 'payload' + | 'rootPromptId' + | 'supersedesPromptId' + | 'supersededByPromptId' + | 'revision' + | 'cancelReason' + | 'cancelledAt' + | 'expiresAt' + | 'createdAt' + | 'updatedAt' + | 'resolvedAt' + > + > + > { + const limit = Math.min(Math.max(request.limit ?? 50, 1), 200); + + return this.prisma.userPrompt.findMany({ + where: { + tenantId: request.tenantId, + ...(request.goalRunId ? { goalRunId: request.goalRunId } : {}), + ...(request.status ? { status: request.status } : {}), + ...(request.kind ? { kind: request.kind } : {}), + ...(request.scope ? { scope: request.scope } : {}), + }, + select: { + id: true, + tenantId: true, + goalRunId: true, + checklistItemId: true, + goalSpecId: true, + approvalRequestId: true, + desktopLeaseId: true, + kind: true, + scope: true, + status: true, + dedupeKey: true, + schemaId: true, + schemaVersion: true, + uiSchema: true, + validatorVersion: true, + payload: true, + rootPromptId: true, + supersedesPromptId: true, + supersededByPromptId: true, + revision: true, + cancelReason: true, + cancelledAt: true, + expiresAt: true, + createdAt: true, + updatedAt: true, + resolvedAt: true, + }, + orderBy: { createdAt: 'desc' }, + take: limit, + }); + } + + private async ensureOpenPrompt(request: { + tenantId: string; + goalRunId: string; + kind: UserPromptKind; + dedupeKey: string; + scope?: UserPromptScope; + schemaId?: string | null; + schemaVersion?: number | null; + jsonSchema?: Prisma.InputJsonValue | null; + uiSchema?: Prisma.InputJsonValue | null; + validatorVersion?: string | null; + payload: Prisma.InputJsonValue; + checklistItemId?: string | null; + goalSpecId?: string | null; + approvalRequestId?: string | null; + desktopLeaseId?: string | null; + expiresAt?: Date | null; + }): Promise { + const maxAttempts = 3; + const scope: UserPromptScope = + request.scope ?? + (request.approvalRequestId + ? UserPromptScope.APPROVAL + : request.checklistItemId + ? UserPromptScope.STEP + : UserPromptScope.RUN); + + for (let attempt = 1; attempt <= maxAttempts; attempt++) { + const now = new Date(); + const newPromptId = createId(); + + try { + return await this.prisma.$transaction(async (tx) => { + // Fast path: already created for this dedupeKey. + const existingByDedupe = await tx.userPrompt.findUnique({ where: { dedupeKey: request.dedupeKey } }); + if (existingByDedupe) { + return existingByDedupe; + } + + // Enforce policy: at most one OPEN prompt per run. + // If another OPEN prompt exists, cancel it as SUPERSEDED (no history overwrites). + const existingOpen = await tx.userPrompt.findFirst({ + where: { goalRunId: request.goalRunId, status: UserPromptStatus.OPEN }, + orderBy: { createdAt: 'asc' }, + }); + + if (existingOpen) { + await tx.userPrompt.updateMany({ + where: { id: existingOpen.id, status: UserPromptStatus.OPEN }, + data: { + status: UserPromptStatus.CANCELLED, + cancelReason: UserPromptCancelReason.SUPERSEDED, + cancelledAt: now, + supersededByPromptId: newPromptId, + }, + }); + } + + try { + return await tx.userPrompt.create({ + data: { + id: newPromptId, + tenantId: request.tenantId, + goalRunId: request.goalRunId, + checklistItemId: request.checklistItemId ?? null, + goalSpecId: request.goalSpecId ?? null, + approvalRequestId: request.approvalRequestId ?? null, + desktopLeaseId: request.desktopLeaseId ?? null, + kind: request.kind, + scope, + status: UserPromptStatus.OPEN, + dedupeKey: request.dedupeKey, + schemaId: request.schemaId ?? null, + schemaVersion: request.schemaVersion ?? null, + jsonSchema: request.jsonSchema ?? Prisma.DbNull, + uiSchema: request.uiSchema ?? Prisma.DbNull, + validatorVersion: request.validatorVersion ?? null, + payload: request.payload, + expiresAt: request.expiresAt ?? null, + supersedesPromptId: existingOpen?.id ?? null, + rootPromptId: existingOpen ? (existingOpen.rootPromptId ?? existingOpen.id) : null, + revision: existingOpen ? (existingOpen.revision + 1) : 1, + }, + }); + } catch (error: any) { + // P2002 is Prisma's unique constraint violation (dedupe key, or one-open-per-run index). + if (error?.code !== 'P2002') { + throw error; + } + + const existing = await tx.userPrompt.findUnique({ where: { dedupeKey: request.dedupeKey } }); + if (existing) return existing; + + const openForRun = await tx.userPrompt.findFirst({ + where: { goalRunId: request.goalRunId, status: UserPromptStatus.OPEN }, + orderBy: { createdAt: 'desc' }, + }); + if (openForRun) return openForRun; + + throw error; + } + }); + } catch (error: any) { + if (error?.code !== 'P2002' || attempt === maxAttempts) { + throw error; + } + + this.logger.warn( + `UserPrompt create race (attempt ${attempt}/${maxAttempts}) for goalRunId=${request.goalRunId} dedupeKey=${request.dedupeKey}; retrying`, + ); + } + } + + // Unreachable + throw new Error('Unexpected prompt create retry exhaustion'); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/webhook.service.ts b/packages/bytebot-workflow-orchestrator/src/services/webhook.service.ts new file mode 100644 index 000000000..ca7f675f0 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/webhook.service.ts @@ -0,0 +1,605 @@ +/** + * Webhook Notification Service + * Post-M5 Enhancement: Sends webhook notifications for approval events + * + * Best Practices Applied: + * - HMAC-SHA256 signature verification for security + * - Exponential backoff retry strategy (3 attempts) + * - Idempotency key in each payload + * - Standard event envelope format + * - Configurable per-tenant webhooks + * + * Event Types: + * - approval.requested: High-risk action awaiting approval + * - approval.approved: Action was approved + * - approval.rejected: Action was rejected + * - approval.expired: Approval request expired + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { PrismaService } from './prisma.service'; +import * as crypto from 'crypto'; + +/** + * Webhook event types + */ +export enum WebhookEventType { + APPROVAL_REQUESTED = 'approval.requested', + APPROVAL_APPROVED = 'approval.approved', + APPROVAL_REJECTED = 'approval.rejected', + APPROVAL_EXPIRED = 'approval.expired', +} + +/** + * Webhook payload envelope + */ +export interface WebhookPayload { + id: string; // Unique event ID (idempotency key) + type: WebhookEventType; + timestamp: string; // ISO 8601 + version: string; // API version + data: WebhookEventData; +} + +/** + * Webhook event data + */ +export interface WebhookEventData { + approvalId: string; + nodeRunId: string; + workflowRunId?: string; + tenantId: string; + toolName: string; + riskLevel: string; + summary: string; + recipient?: string; + subject?: string; + // Decision info (for approved/rejected/expired) + decision?: { + status: string; + reviewerId?: string; + reason?: string; + decidedAt?: string; + }; + // Links for UI integration + links?: { + approval: string; + workflow?: string; + }; +} + +/** + * Webhook configuration + */ +export interface WebhookConfig { + id: string; + tenantId: string; + url: string; + secret: string; // For HMAC signing + events: WebhookEventType[]; + enabled: boolean; + createdAt: Date; +} + +/** + * Webhook delivery result + */ +export interface WebhookDeliveryResult { + success: boolean; + webhookId: string; + eventId: string; + statusCode?: number; + responseBody?: string; + error?: string; + attempts: number; + deliveredAt?: Date; +} + +/** + * Retry configuration + */ +const RETRY_CONFIG = { + maxAttempts: 3, + baseDelayMs: 1000, + maxDelayMs: 30000, + backoffMultiplier: 2, +}; + +@Injectable() +export class WebhookService { + private readonly logger = new Logger(WebhookService.name); + private readonly webhookTimeoutMs: number; + private readonly baseUrl: string; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + ) { + this.webhookTimeoutMs = parseInt( + this.configService.get('WEBHOOK_TIMEOUT_MS', '10000'), + 10, + ); + this.baseUrl = this.configService.get( + 'APPROVAL_UI_BASE_URL', + 'https://app.bytebot.ai', + ); + + this.logger.log('WebhookService initialized'); + } + + /** + * Send webhook notification for an approval event + */ + async sendApprovalNotification( + eventType: WebhookEventType, + approval: { + id: string; + nodeRunId: string; + toolName: string; + previewData?: any; + status: string; + reason?: string; + approvedBy?: string; + rejectedBy?: string; + approvedAt?: Date; + rejectedAt?: Date; + }, + tenantId: string, + ): Promise { + // Get active webhooks for this tenant and event type + const webhooks = await this.getActiveWebhooks(tenantId, eventType); + + if (webhooks.length === 0) { + this.logger.debug(`No webhooks configured for ${eventType} in tenant ${tenantId}`); + return []; + } + + // Build payload + const payload = this.buildPayload(eventType, approval, tenantId); + + // Log the audit event + await this.logWebhookEvent(payload, webhooks.length); + + // Send to all configured webhooks + const results: WebhookDeliveryResult[] = []; + + for (const webhook of webhooks) { + const result = await this.deliverWebhook(webhook, payload); + results.push(result); + + // Record delivery attempt + await this.recordDeliveryAttempt(webhook.id, payload.id, result); + } + + return results; + } + + /** + * Build webhook payload with standard envelope + */ + private buildPayload( + eventType: WebhookEventType, + approval: any, + tenantId: string, + ): WebhookPayload { + const preview = approval.previewData || {}; + + const data: WebhookEventData = { + approvalId: approval.id, + nodeRunId: approval.nodeRunId, + tenantId, + toolName: approval.toolName, + riskLevel: preview.riskLevel || 'UNKNOWN', + summary: preview.summary || `Execute ${approval.toolName}`, + recipient: preview.recipient, + subject: preview.subject, + links: { + approval: `${this.baseUrl}/approvals/${approval.id}`, + }, + }; + + // Add decision info for non-requested events + if (eventType !== WebhookEventType.APPROVAL_REQUESTED) { + data.decision = { + status: approval.status, + reviewerId: approval.approvedBy || approval.rejectedBy, + reason: approval.reason, + decidedAt: (approval.approvedAt || approval.rejectedAt)?.toISOString(), + }; + } + + return { + id: this.generateEventId(), + type: eventType, + timestamp: new Date().toISOString(), + version: '1.0', + data, + }; + } + + /** + * Deliver webhook with retry logic + */ + private async deliverWebhook( + webhook: WebhookConfig, + payload: WebhookPayload, + ): Promise { + let lastError: string | undefined; + let lastStatusCode: number | undefined; + let attempts = 0; + + for (let attempt = 1; attempt <= RETRY_CONFIG.maxAttempts; attempt++) { + attempts = attempt; + + try { + const result = await this.sendWebhookRequest(webhook, payload); + + if (result.success) { + this.logger.log( + `Webhook delivered: ${webhook.id} -> ${payload.type} (attempt ${attempt})`, + ); + + return { + success: true, + webhookId: webhook.id, + eventId: payload.id, + statusCode: result.statusCode, + attempts, + deliveredAt: new Date(), + }; + } + + lastStatusCode = result.statusCode; + lastError = result.error; + + // Don't retry on 4xx errors (client errors) + if (result.statusCode && result.statusCode >= 400 && result.statusCode < 500) { + this.logger.warn( + `Webhook ${webhook.id} returned ${result.statusCode}, not retrying`, + ); + break; + } + } catch (error: any) { + lastError = error.message; + this.logger.warn( + `Webhook ${webhook.id} failed (attempt ${attempt}): ${error.message}`, + ); + } + + // Exponential backoff before retry + if (attempt < RETRY_CONFIG.maxAttempts) { + const delay = Math.min( + RETRY_CONFIG.baseDelayMs * Math.pow(RETRY_CONFIG.backoffMultiplier, attempt - 1), + RETRY_CONFIG.maxDelayMs, + ); + await this.sleep(delay); + } + } + + this.logger.error( + `Webhook ${webhook.id} failed after ${attempts} attempts: ${lastError}`, + ); + + return { + success: false, + webhookId: webhook.id, + eventId: payload.id, + statusCode: lastStatusCode, + error: lastError, + attempts, + }; + } + + /** + * Send single webhook request with signature + */ + private async sendWebhookRequest( + webhook: WebhookConfig, + payload: WebhookPayload, + ): Promise<{ success: boolean; statusCode?: number; error?: string }> { + const body = JSON.stringify(payload); + const signature = this.generateSignature(body, webhook.secret); + const timestamp = Math.floor(Date.now() / 1000).toString(); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), this.webhookTimeoutMs); + + try { + const response = await fetch(webhook.url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Webhook-Id': webhook.id, + 'X-Webhook-Signature': signature, + 'X-Webhook-Timestamp': timestamp, + 'X-Event-Type': payload.type, + 'X-Event-Id': payload.id, + 'User-Agent': 'ByteBot-Webhook/1.0', + }, + body, + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + // 2xx status codes are success + if (response.ok) { + return { success: true, statusCode: response.status }; + } + + const responseText = await response.text().catch(() => ''); + return { + success: false, + statusCode: response.status, + error: `HTTP ${response.status}: ${responseText.substring(0, 200)}`, + }; + } catch (error: any) { + clearTimeout(timeoutId); + + if (error.name === 'AbortError') { + return { success: false, error: `Timeout after ${this.webhookTimeoutMs}ms` }; + } + + return { success: false, error: error.message }; + } + } + + /** + * Generate HMAC-SHA256 signature + * Format: sha256= + */ + private generateSignature(payload: string, secret: string): string { + const hmac = crypto.createHmac('sha256', secret); + hmac.update(payload); + return `sha256=${hmac.digest('hex')}`; + } + + /** + * Generate unique event ID + */ + private generateEventId(): string { + const timestamp = Date.now().toString(36); + const random = crypto.randomBytes(8).toString('hex'); + return `evt_${timestamp}_${random}`; + } + + /** + * Get active webhooks for tenant and event type + */ + async getActiveWebhooks( + tenantId: string, + eventType: WebhookEventType, + ): Promise { + try { + const webhooks = await this.prisma.webhookConfig.findMany({ + where: { + tenantId, + enabled: true, + events: { + has: eventType, + }, + }, + }); + + return webhooks.map((w) => ({ + id: w.id, + tenantId: w.tenantId, + url: w.url, + secret: w.secret, + events: w.events as WebhookEventType[], + enabled: w.enabled, + createdAt: w.createdAt, + })); + } catch (error: any) { + // Table might not exist yet - return empty + if (error.code === 'P2021' || error.message.includes('does not exist')) { + this.logger.debug('WebhookConfig table not found, skipping webhooks'); + return []; + } + throw error; + } + } + + /** + * Create webhook configuration for a tenant + */ + async createWebhook(config: { + tenantId: string; + url: string; + events: WebhookEventType[]; + secret?: string; + }): Promise { + const secret = config.secret || this.generateWebhookSecret(); + + const webhook = await this.prisma.webhookConfig.create({ + data: { + tenantId: config.tenantId, + url: config.url, + secret, + events: config.events, + enabled: true, + }, + }); + + this.logger.log(`Created webhook ${webhook.id} for tenant ${config.tenantId}`); + + return { + id: webhook.id, + tenantId: webhook.tenantId, + url: webhook.url, + secret: webhook.secret, + events: webhook.events as WebhookEventType[], + enabled: webhook.enabled, + createdAt: webhook.createdAt, + }; + } + + /** + * Update webhook configuration + */ + async updateWebhook( + webhookId: string, + updates: { + url?: string; + events?: WebhookEventType[]; + enabled?: boolean; + }, + ): Promise { + const webhook = await this.prisma.webhookConfig.update({ + where: { id: webhookId }, + data: updates, + }); + + return { + id: webhook.id, + tenantId: webhook.tenantId, + url: webhook.url, + secret: webhook.secret, + events: webhook.events as WebhookEventType[], + enabled: webhook.enabled, + createdAt: webhook.createdAt, + }; + } + + /** + * Delete webhook configuration + */ + async deleteWebhook(webhookId: string): Promise { + await this.prisma.webhookConfig.delete({ + where: { id: webhookId }, + }); + + this.logger.log(`Deleted webhook ${webhookId}`); + } + + /** + * Rotate webhook secret + */ + async rotateSecret(webhookId: string): Promise<{ secret: string }> { + const newSecret = this.generateWebhookSecret(); + + await this.prisma.webhookConfig.update({ + where: { id: webhookId }, + data: { secret: newSecret }, + }); + + this.logger.log(`Rotated secret for webhook ${webhookId}`); + + return { secret: newSecret }; + } + + /** + * Generate a secure webhook secret + */ + private generateWebhookSecret(): string { + return `whsec_${crypto.randomBytes(32).toString('hex')}`; + } + + /** + * Log webhook event for audit trail + */ + private async logWebhookEvent(payload: WebhookPayload, webhookCount: number): Promise { + this.logger.log( + `Webhook event: ${payload.type} for approval ${payload.data.approvalId} -> ${webhookCount} endpoints`, + ); + } + + /** + * Record delivery attempt for debugging/monitoring + */ + private async recordDeliveryAttempt( + webhookId: string, + eventId: string, + result: WebhookDeliveryResult, + ): Promise { + try { + await this.prisma.webhookDelivery.create({ + data: { + webhookId, + eventId, + success: result.success, + statusCode: result.statusCode, + error: result.error, + attempts: result.attempts, + deliveredAt: result.deliveredAt, + }, + }); + } catch (error: any) { + // Table might not exist - just log + if (!error.message.includes('does not exist')) { + this.logger.warn(`Failed to record delivery: ${error.message}`); + } + } + } + + /** + * Get webhook delivery history + */ + async getDeliveryHistory( + webhookId: string, + limit: number = 50, + ): Promise { + try { + return await this.prisma.webhookDelivery.findMany({ + where: { webhookId }, + orderBy: { createdAt: 'desc' }, + take: limit, + }); + } catch { + return []; + } + } + + /** + * Test webhook endpoint + */ + async testWebhook(webhookId: string): Promise { + const webhook = await this.prisma.webhookConfig.findUnique({ + where: { id: webhookId }, + }); + + if (!webhook) { + throw new Error('Webhook not found'); + } + + const testPayload: WebhookPayload = { + id: this.generateEventId(), + type: WebhookEventType.APPROVAL_REQUESTED, + timestamp: new Date().toISOString(), + version: '1.0', + data: { + approvalId: 'test_approval_123', + nodeRunId: 'test_node_run_123', + tenantId: webhook.tenantId, + toolName: 'communications_send_email', + riskLevel: 'HIGH', + summary: 'Test webhook notification', + recipient: 'test@example.com', + subject: 'Test Email', + links: { + approval: `${this.baseUrl}/approvals/test`, + }, + }, + }; + + const config: WebhookConfig = { + id: webhook.id, + tenantId: webhook.tenantId, + url: webhook.url, + secret: webhook.secret, + events: webhook.events as WebhookEventType[], + enabled: webhook.enabled, + createdAt: webhook.createdAt, + }; + + return this.deliverWebhook(config, testPayload); + } + + /** + * Helper to sleep for a given number of milliseconds + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/workflow-checkpoint.service.ts b/packages/bytebot-workflow-orchestrator/src/services/workflow-checkpoint.service.ts new file mode 100644 index 000000000..696e00c9e --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/workflow-checkpoint.service.ts @@ -0,0 +1,642 @@ +/** + * Workflow Checkpoint Service + * v1.0.0: Phase 9 Self-Healing & Auto-Recovery + * + * Implements durable execution pattern (DBOS-style): + * - Saves workflow state at key points (before/after node execution) + * - Enables crash recovery by restoring from last checkpoint + * - Uses optimistic locking for concurrent updates + * - Automatic cleanup of old checkpoints + * + * Key concepts: + * - Checkpoint: A snapshot of workflow/node state at a point in time + * - Recovery: Restoring execution from the last valid checkpoint + * - Idempotency: Ensuring operations aren't duplicated on recovery + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { LeaderElectionService } from './leader-election.service'; + +// Checkpoint types +export enum CheckpointType { + WORKFLOW_STARTED = 'workflow:started', + WORKFLOW_NODE_STARTED = 'node:started', + WORKFLOW_NODE_COMPLETED = 'node:completed', + WORKFLOW_NODE_FAILED = 'node:failed', + WORKFLOW_COMPLETED = 'workflow:completed', + WORKFLOW_FAILED = 'workflow:failed', + CUSTOM = 'custom', +} + +// Checkpoint data structure +export interface CheckpointData { + type: CheckpointType; + workflowRunId: string; + tenantId: string; + nodeId?: string; + nodeRunId?: string; + attempt?: number; + state: { + status: string; + input?: any; + output?: any; + error?: string; + metadata?: Record; + }; + timestamp: Date; +} + +// Recovery options +export interface RecoveryOptions { + skipCompletedNodes?: boolean; + resetFailedNodes?: boolean; + fromCheckpoint?: string; +} + +// Recovery result +export interface RecoveryResult { + workflowRunId: string; + success: boolean; + checkpointUsed: string | null; + nodesRecovered: number; + nodesSkipped: number; + error?: string; +} + +@Injectable() +export class WorkflowCheckpointService implements OnModuleInit { + private readonly logger = new Logger(WorkflowCheckpointService.name); + + // Configuration + private readonly checkpointRetentionDays: number; + private readonly checkpointEnabled: boolean; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + private readonly leaderElection: LeaderElectionService, + ) { + this.checkpointRetentionDays = this.configService.get( + 'CHECKPOINT_RETENTION_DAYS', + 7, + ); + this.checkpointEnabled = this.configService.get( + 'CHECKPOINT_ENABLED', + true, + ); + } + + onModuleInit() { + this.logger.log( + `Workflow Checkpoint Service initialized (enabled: ${this.checkpointEnabled}, ` + + `retention: ${this.checkpointRetentionDays} days)`, + ); + } + + /** + * Create a checkpoint for a workflow or node + */ + async createCheckpoint(data: CheckpointData): Promise { + if (!this.checkpointEnabled) { + return ''; + } + + const checkpointKey = this.generateCheckpointKey(data); + const expiresAt = new Date(); + expiresAt.setDate(expiresAt.getDate() + this.checkpointRetentionDays); + + try { + const checkpoint = await this.prisma.workflowCheckpoint.upsert({ + where: { + workflowRunId_checkpointKey: { + workflowRunId: data.workflowRunId, + checkpointKey, + }, + }, + create: { + workflowRunId: data.workflowRunId, + tenantId: data.tenantId, + checkpointKey, + state: { + type: data.type, + nodeId: data.nodeId, + nodeRunId: data.nodeRunId, + attempt: data.attempt, + ...data.state, + timestamp: data.timestamp.toISOString(), + }, + metadata: { + createdAt: new Date().toISOString(), + }, + expiresAt, + }, + update: { + state: { + type: data.type, + nodeId: data.nodeId, + nodeRunId: data.nodeRunId, + attempt: data.attempt, + ...data.state, + timestamp: data.timestamp.toISOString(), + }, + version: { increment: 1 }, + metadata: { + updatedAt: new Date().toISOString(), + }, + expiresAt, + }, + }); + + this.logger.debug( + `Checkpoint created: ${checkpointKey} for workflow ${data.workflowRunId}`, + ); + + return checkpoint.id; + } catch (error) { + this.logger.error( + `Failed to create checkpoint ${checkpointKey}: ${error.message}`, + ); + throw error; + } + } + + /** + * Create checkpoint before node execution + */ + async checkpointNodeStart( + workflowRunId: string, + tenantId: string, + nodeId: string, + nodeRunId: string, + attempt: number, + input: any, + ): Promise { + return this.createCheckpoint({ + type: CheckpointType.WORKFLOW_NODE_STARTED, + workflowRunId, + tenantId, + nodeId, + nodeRunId, + attempt, + state: { + status: 'RUNNING', + input, + }, + timestamp: new Date(), + }); + } + + /** + * Create checkpoint after node completion + */ + async checkpointNodeComplete( + workflowRunId: string, + tenantId: string, + nodeId: string, + nodeRunId: string, + attempt: number, + output: any, + ): Promise { + return this.createCheckpoint({ + type: CheckpointType.WORKFLOW_NODE_COMPLETED, + workflowRunId, + tenantId, + nodeId, + nodeRunId, + attempt, + state: { + status: 'COMPLETED', + output, + }, + timestamp: new Date(), + }); + } + + /** + * Create checkpoint after node failure + */ + async checkpointNodeFailed( + workflowRunId: string, + tenantId: string, + nodeId: string, + nodeRunId: string, + attempt: number, + error: string, + ): Promise { + return this.createCheckpoint({ + type: CheckpointType.WORKFLOW_NODE_FAILED, + workflowRunId, + tenantId, + nodeId, + nodeRunId, + attempt, + state: { + status: 'FAILED', + error, + }, + timestamp: new Date(), + }); + } + + /** + * Get the latest checkpoint for a workflow + */ + async getLatestCheckpoint( + workflowRunId: string, + ): Promise<{ id: string; key: string; state: any } | null> { + const checkpoint = await this.prisma.workflowCheckpoint.findFirst({ + where: { + workflowRunId, + recoverable: true, + }, + orderBy: { updatedAt: 'desc' }, + }); + + if (!checkpoint) { + return null; + } + + return { + id: checkpoint.id, + key: checkpoint.checkpointKey, + state: checkpoint.state, + }; + } + + /** + * Get all checkpoints for a workflow + */ + async getWorkflowCheckpoints(workflowRunId: string): Promise> { + const checkpoints = await this.prisma.workflowCheckpoint.findMany({ + where: { workflowRunId }, + orderBy: { updatedAt: 'asc' }, + }); + + return checkpoints.map((cp) => ({ + id: cp.id, + key: cp.checkpointKey, + version: cp.version, + state: cp.state, + createdAt: cp.createdAt, + updatedAt: cp.updatedAt, + })); + } + + /** + * Recover a workflow from its last checkpoint + */ + async recoverWorkflow( + workflowRunId: string, + options: RecoveryOptions = {}, + ): Promise { + const result: RecoveryResult = { + workflowRunId, + success: false, + checkpointUsed: null, + nodesRecovered: 0, + nodesSkipped: 0, + }; + + try { + // Get workflow run + const workflowRun = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + include: { + nodes: { + include: { + nodeRuns: { + orderBy: { attempt: 'desc' }, + take: 1, + }, + }, + }, + }, + }); + + if (!workflowRun) { + result.error = 'Workflow run not found'; + return result; + } + + // Get checkpoints + const checkpoints = await this.getWorkflowCheckpoints(workflowRunId); + if (checkpoints.length === 0) { + result.error = 'No checkpoints found for workflow'; + return result; + } + + // Find the checkpoint to recover from + let targetCheckpoint = checkpoints[checkpoints.length - 1]; + if (options.fromCheckpoint) { + const found = checkpoints.find((cp) => cp.key === options.fromCheckpoint); + if (found) { + targetCheckpoint = found; + } + } + + result.checkpointUsed = targetCheckpoint.key; + + // Recover each node based on checkpoint state + for (const node of workflowRun.nodes) { + const nodeCheckpoints = checkpoints.filter( + (cp) => (cp.state as any).nodeId === node.id, + ); + + const latestNodeCheckpoint = nodeCheckpoints[nodeCheckpoints.length - 1]; + + if (!latestNodeCheckpoint) { + // No checkpoint for this node - reset to PENDING + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { status: 'PENDING' }, + }); + result.nodesRecovered++; + continue; + } + + const nodeState = latestNodeCheckpoint.state as any; + + if (nodeState.status === 'COMPLETED' && options.skipCompletedNodes) { + result.nodesSkipped++; + continue; + } + + if (nodeState.status === 'FAILED' && !options.resetFailedNodes) { + result.nodesSkipped++; + continue; + } + + // Reset node to appropriate state for re-execution + if (nodeState.status === 'RUNNING' || nodeState.status === 'FAILED') { + await this.prisma.workflowNode.update({ + where: { id: node.id }, + data: { + status: 'PENDING', + error: null, + }, + }); + + // If there's a node run, mark it for retry + const latestRun = node.nodeRuns[0]; + if (latestRun) { + await this.prisma.workflowNodeRun.update({ + where: { id: latestRun.id }, + data: { + status: 'FAILED', + error: 'Recovered from crash - will retry', + completedAt: new Date(), + }, + }); + } + + result.nodesRecovered++; + } + } + + // Update workflow status to allow re-execution + if (workflowRun.status === 'RUNNING' || workflowRun.status === 'FAILED') { + await this.prisma.workflowRun.update({ + where: { id: workflowRunId }, + data: { + status: 'PENDING', + error: null, + }, + }); + } + + // Log recovery action + await this.logRecoveryAction( + workflowRun.tenantId, + workflowRunId, + 'WORKFLOW_RECOVERED', + workflowRun.status, + 'PENDING', + `Recovered from checkpoint: ${targetCheckpoint.key}`, + true, + ); + + result.success = true; + + this.logger.log( + `Workflow ${workflowRunId} recovered from checkpoint ${targetCheckpoint.key}: ` + + `${result.nodesRecovered} nodes recovered, ${result.nodesSkipped} skipped`, + ); + + this.eventEmitter.emit('workflow.recovered', { + workflowRunId, + checkpoint: targetCheckpoint.key, + nodesRecovered: result.nodesRecovered, + }); + + return result; + } catch (error) { + result.error = error.message; + this.logger.error( + `Failed to recover workflow ${workflowRunId}: ${error.message}`, + error.stack, + ); + return result; + } + } + + /** + * Check if a workflow can be recovered + */ + async canRecover(workflowRunId: string): Promise<{ + recoverable: boolean; + checkpointCount: number; + latestCheckpoint: string | null; + reason?: string; + }> { + const workflowRun = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + }); + + if (!workflowRun) { + return { + recoverable: false, + checkpointCount: 0, + latestCheckpoint: null, + reason: 'Workflow run not found', + }; + } + + // Can't recover completed workflows + if (workflowRun.status === 'COMPLETED') { + return { + recoverable: false, + checkpointCount: 0, + latestCheckpoint: null, + reason: 'Workflow already completed', + }; + } + + const checkpoints = await this.prisma.workflowCheckpoint.findMany({ + where: { + workflowRunId, + recoverable: true, + }, + orderBy: { updatedAt: 'desc' }, + }); + + if (checkpoints.length === 0) { + return { + recoverable: false, + checkpointCount: 0, + latestCheckpoint: null, + reason: 'No recoverable checkpoints found', + }; + } + + return { + recoverable: true, + checkpointCount: checkpoints.length, + latestCheckpoint: checkpoints[0].checkpointKey, + }; + } + + /** + * Mark a checkpoint as non-recoverable (e.g., after permanent failure) + */ + async markNonRecoverable( + workflowRunId: string, + reason: string, + ): Promise { + await this.prisma.workflowCheckpoint.updateMany({ + where: { workflowRunId }, + data: { + recoverable: false, + recoveryHint: reason, + }, + }); + + this.logger.log( + `Marked checkpoints for workflow ${workflowRunId} as non-recoverable: ${reason}`, + ); + } + + /** + * Cleanup expired checkpoints (runs daily) + */ + @Cron(CronExpression.EVERY_DAY_AT_3AM) + async cleanupExpiredCheckpoints(): Promise { + // Only run on leader + if (!this.leaderElection.isLeader) { + return; + } + + try { + const result = await this.prisma.workflowCheckpoint.deleteMany({ + where: { + expiresAt: { + lt: new Date(), + }, + }, + }); + + if (result.count > 0) { + this.logger.log(`Cleaned up ${result.count} expired checkpoints`); + } + } catch (error) { + this.logger.error(`Checkpoint cleanup failed: ${error.message}`); + } + } + + /** + * Get checkpoint statistics + */ + async getCheckpointStats(): Promise<{ + totalCheckpoints: number; + recoverableCheckpoints: number; + expiringWithin24h: number; + oldestCheckpoint: Date | null; + }> { + const tomorrow = new Date(); + tomorrow.setDate(tomorrow.getDate() + 1); + + const [total, recoverable, expiring, oldest] = await Promise.all([ + this.prisma.workflowCheckpoint.count(), + this.prisma.workflowCheckpoint.count({ + where: { recoverable: true }, + }), + this.prisma.workflowCheckpoint.count({ + where: { + expiresAt: { + lt: tomorrow, + gte: new Date(), + }, + }, + }), + this.prisma.workflowCheckpoint.findFirst({ + orderBy: { createdAt: 'asc' }, + select: { createdAt: true }, + }), + ]); + + return { + totalCheckpoints: total, + recoverableCheckpoints: recoverable, + expiringWithin24h: expiring, + oldestCheckpoint: oldest?.createdAt ?? null, + }; + } + + // ========================================================================= + // Private Methods + // ========================================================================= + + /** + * Generate a unique checkpoint key + */ + private generateCheckpointKey(data: CheckpointData): string { + if (data.nodeId && data.nodeRunId) { + return `${data.type}:${data.nodeId}:${data.nodeRunId}:${data.attempt ?? 1}`; + } + return `${data.type}:${data.workflowRunId}`; + } + + /** + * Log a recovery action + */ + private async logRecoveryAction( + tenantId: string, + targetId: string, + actionType: string, + previousState: string, + newState: string, + reason: string, + success: boolean, + errorMessage?: string, + ): Promise { + try { + await this.prisma.recoveryLog.create({ + data: { + tenantId, + actionType, + targetType: 'WORKFLOW', + targetId, + previousState, + newState, + reason, + actorType: 'SYSTEM', + success, + errorMessage, + }, + }); + } catch (error) { + this.logger.error(`Failed to log recovery action: ${error.message}`); + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/workflow.service.ts b/packages/bytebot-workflow-orchestrator/src/services/workflow.service.ts new file mode 100644 index 000000000..90723126a --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/workflow.service.ts @@ -0,0 +1,909 @@ +/** + * Workflow Service + * v1.5.0: DB-driven retry gating with nextAttemptAt field (prevents tight loop) + * v1.4.0: Hardened capacity check to use K8s as source of truth + * v1.3.0: Added proactive capacity check with MAX_ACTIVE_WORKSPACES_GLOBAL + * v1.2.0: Fixed orphan pod bug - hibernation with retry and GC tracking + * v1.1.0: Fixed runaway loop bug - link first, provision second pattern + * + * Responsibilities: + * - Create and manage workflow runs + * - Track workflow state transitions + * - Coordinate with workspace and scheduler services + * + * Key Change (v1.4.0): + * - checkCapacity() now queries K8s pod count (source of truth) + * - Falls back to DB count if K8s unreachable + * - Logs warning if DB/K8s counts diverge + * + * Key Change (v1.3.0): + * - Added checkCapacity() to proactively check before provisioning + * - Respects MAX_ACTIVE_WORKSPACES_GLOBAL limit (default: 6) + * - Returns WAITING_FOR_CAPACITY before hitting cluster scheduling limits + * + * Key Change (v1.2.0): + * - Added hibernateWorkspaceWithTracking() with exponential backoff retry + * - Marks failed hibernations as HIBERNATION_FAILED for GC cleanup + * - completeWorkflow() and cancelWorkflow() now never throw on hibernation failure + * + * Key Change (v1.1.0): + * - Separated createWorkflowRecord (DB-only) from workspace provisioning + * - Added workspace provisioning status tracking to prevent infinite loops + * - Implemented idempotent workflow creation per Kubebuilder best practices + * + * @see https://book.kubebuilder.io/reference/good-practices + */ + +import { Injectable, Logger, Inject, forwardRef } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import { PrismaService } from './prisma.service'; +import { WorkspaceService } from './workspace.service'; +import { WorkspaceDbReconcilerService } from './workspace-db-reconciler.service'; +import { createId } from '@paralleldrive/cuid2'; + +// Workflow statuses +export enum WorkflowStatus { + PENDING = 'PENDING', + RUNNING = 'RUNNING', + COMPLETED = 'COMPLETED', + FAILED = 'FAILED', + CANCELLED = 'CANCELLED', +} + +// Node statuses +export enum NodeStatus { + PENDING = 'PENDING', + READY = 'READY', + RUNNING = 'RUNNING', + COMPLETED = 'COMPLETED', + FAILED = 'FAILED', + SKIPPED = 'SKIPPED', +} + +// v1.1.0: Workspace provisioning statuses for idempotency +export enum WorkspaceProvisioningStatus { + PENDING = 'PENDING', + CREATING = 'CREATING', + READY = 'READY', + FAILED = 'FAILED', + WAITING_FOR_CAPACITY = 'WAITING_FOR_CAPACITY', +} + +export interface CreateWorkflowInput { + tenantId: string; + workflowTemplateId?: string; + name?: string; + description?: string; + nodes: WorkflowNodeInput[]; + persistence?: { + enabled: boolean; + storageClass?: string; + size?: string; + }; +} + +export interface WorkflowNodeInput { + id?: string; + name: string; + type: 'TASK' | 'DECISION' | 'PARALLEL' | 'WAIT'; + config: Record; + dependencies?: string[]; +} + +export interface WorkflowRunResult { + id: string; + workspaceId: string; + status: WorkflowStatus; + nodes: any[]; + createdAt: Date; +} + +@Injectable() +export class WorkflowService { + private readonly logger = new Logger(WorkflowService.name); + // v1.3.0: Hard cap on active workspaces to prevent cluster capacity exhaustion + // With only 3 desktop-capable nodes, we can safely run ~6 workspace pods max + // (2 per node, leaving headroom for pool pods and system pods) + private readonly maxActiveWorkspaces: number; + + constructor( + private prisma: PrismaService, + private workspaceService: WorkspaceService, + private eventEmitter: EventEmitter2, + private configService: ConfigService, + @Inject(forwardRef(() => WorkspaceDbReconcilerService)) + private workspaceDbReconciler: WorkspaceDbReconcilerService, + ) { + // v1.3.0: Load max workspaces from config (default: 6 for 3-node cluster) + this.maxActiveWorkspaces = parseInt( + this.configService.get('MAX_ACTIVE_WORKSPACES_GLOBAL', '6'), + 10, + ); + this.logger.log(`Max active workspaces set to ${this.maxActiveWorkspaces}`); + } + + /** + * v1.1.0: Create workflow record only (DB operation) + * + * This is the idempotent first step - creates only DB records: + * 1. A Workspace record (with provisioning status = PENDING) + * 2. A WorkflowRun record + * 3. WorkflowNode records for each node + * + * Workspace provisioning happens separately via ensureWorkspaceProvisioned() + * This separation prevents the runaway loop bug where failed provisioning + * caused new workflows to be created on each loop iteration. + * + * @see Phase 2 fix: https://book.kubebuilder.io/reference/good-practices + */ + async createWorkflowRecord(input: CreateWorkflowInput): Promise { + const workflowId = `wf-${createId()}`; + const workspaceId = `ws-${createId()}`; + + this.logger.log(`Creating workflow record ${workflowId} with workspace ${workspaceId} (DB only, no provisioning)`); + + // Use transaction to ensure atomicity of DB records + const result = await this.prisma.$transaction(async (tx) => { + // 1. Create Workspace record with PENDING provisioning status + const workspace = await tx.workspace.create({ + data: { + id: workspaceId, + tenantId: input.tenantId, + status: WorkspaceProvisioningStatus.PENDING, + lockedBy: null, + lockedAt: null, + persistenceEnabled: input.persistence?.enabled ?? true, + storageClass: input.persistence?.storageClass ?? null, + storageSize: input.persistence?.size || '10Gi', + // v1.1.0: Provisioning tracking fields + provisioningAttemptCount: 0, + lastProvisioningAttemptAt: null, + }, + }); + + // 2. Create WorkflowRun record + const workflowRun = await tx.workflowRun.create({ + data: { + id: workflowId, + workspaceId, + tenantId: input.tenantId, + templateId: input.workflowTemplateId, + name: input.name || `Workflow ${workflowId}`, + description: input.description, + status: WorkflowStatus.PENDING, + }, + }); + + // 3. Create WorkflowNode records + const nodes = await Promise.all( + input.nodes.map(async (node, index) => { + const nodeId = node.id || `node-${createId()}`; + return tx.workflowNode.create({ + data: { + id: nodeId, + workflowRunId: workflowId, + name: node.name, + type: node.type, + config: node.config, + dependencies: node.dependencies || [], + order: index, + status: NodeStatus.PENDING, + }, + }); + }), + ); + + return { workspace, workflowRun, nodes }; + }); + + // Emit workflow created event (record only, not provisioned yet) + this.eventEmitter.emit('workflow.record-created', { + workflowId, + workspaceId, + tenantId: input.tenantId, + }); + + this.logger.log(`Workflow record ${workflowId} created successfully (awaiting provisioning)`); + + return { + id: workflowId, + workspaceId, + status: WorkflowStatus.PENDING, + nodes: result.nodes, + createdAt: result.workflowRun.createdAt, + }; + } + + /** + * vNext (Temporal parity): Ensure a WorkflowRun + Workspace exist for a GoalRun. + * + * Why: TEMPORAL_WORKFLOW runs still dispatch tasks via the orchestrator/agent stack, and + * desktop tasks require a stable `workspaceId`. We create a minimal WorkflowRun record + * (no nodes) that exists solely to anchor the workspace lifecycle. + * + * Design: + * - Deterministic IDs: workflowRunId = wf-{goalRunId}, workspaceId = ws-{goalRunId} + * - Idempotent via upsert in a single DB transaction (no orphans under concurrency) + */ + async getOrCreateGoalRunWorkspace(params: { + goalRunId: string; + tenantId: string; + persistence?: { enabled?: boolean; storageClass?: string | null; size?: string | null }; + }): Promise<{ workflowRunId: string; workspaceId: string; created: boolean }> { + const workflowRunId = `wf-${params.goalRunId}`; + const expectedWorkspaceId = `ws-${params.goalRunId}`; + + const existing = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + select: { id: true, workspaceId: true, tenantId: true }, + }); + + if (existing) { + if (existing.tenantId !== params.tenantId) { + throw new Error( + `WorkflowRun ${workflowRunId} belongs to tenant ${existing.tenantId}, not ${params.tenantId}`, + ); + } + return { workflowRunId: existing.id, workspaceId: existing.workspaceId, created: false }; + } + + await this.prisma.$transaction(async (tx) => { + await tx.workspace.upsert({ + where: { id: expectedWorkspaceId }, + create: { + id: expectedWorkspaceId, + tenantId: params.tenantId, + status: WorkspaceProvisioningStatus.PENDING, + lockedBy: null, + lockedAt: null, + persistenceEnabled: params.persistence?.enabled ?? true, + storageClass: params.persistence?.storageClass ?? null, + storageSize: params.persistence?.size ?? '10Gi', + provisioningAttemptCount: 0, + lastProvisioningAttemptAt: null, + }, + update: {}, + }); + + await tx.workflowRun.upsert({ + where: { id: workflowRunId }, + create: { + id: workflowRunId, + workspaceId: expectedWorkspaceId, + tenantId: params.tenantId, + templateId: null, + name: `GoalRun ${params.goalRunId}`, + description: `Workspace anchor for goal run ${params.goalRunId} (Temporal engine)`, + status: WorkflowStatus.PENDING, + }, + update: {}, + }); + }); + + const createdWorkflow = await this.prisma.workflowRun.findUnique({ + where: { id: workflowRunId }, + select: { workspaceId: true, tenantId: true }, + }); + + if (!createdWorkflow) { + throw new Error(`Failed to create or fetch WorkflowRun ${workflowRunId}`); + } + if (createdWorkflow.tenantId !== params.tenantId) { + throw new Error( + `WorkflowRun ${workflowRunId} belongs to tenant ${createdWorkflow.tenantId}, not ${params.tenantId}`, + ); + } + + return { workflowRunId, workspaceId: createdWorkflow.workspaceId, created: true }; + } + + /** + * v1.1.0: Ensure workspace is provisioned (idempotent) + * + * This method handles workspace desktop provisioning with: + * - Idempotency: Only provisions if status is PENDING or WAITING_FOR_CAPACITY + * - State tracking: Updates provisioning attempt count and timestamps + * - Capacity detection: Returns special status for capacity issues (not failure) + * + * Returns the provisioning result with status and backoff hint for capacity issues. + */ + async ensureWorkspaceProvisioned( + workflowId: string, + tenantId: string, + persistence?: { enabled?: boolean; storageClass?: string; size?: string }, + ): Promise<{ + success: boolean; + status: WorkspaceProvisioningStatus; + retryAfterMs?: number; + error?: string; + }> { + // Get workflow and workspace + const workflow = await this.prisma.workflowRun.findUnique({ + where: { id: workflowId }, + include: { workspace: true }, + }); + + if (!workflow || !workflow.workspace) { + return { + success: false, + status: WorkspaceProvisioningStatus.FAILED, + error: `Workflow ${workflowId} not found or has no workspace`, + }; + } + + const workspace = workflow.workspace; + const currentStatus = workspace.status as WorkspaceProvisioningStatus; + + // Idempotency check: If already READY, return success + if (currentStatus === WorkspaceProvisioningStatus.READY) { + this.logger.debug(`Workspace ${workspace.id} already provisioned (idempotent skip)`); + return { success: true, status: WorkspaceProvisioningStatus.READY }; + } + + // If already FAILED, don't retry automatically + if (currentStatus === WorkspaceProvisioningStatus.FAILED) { + this.logger.warn(`Workspace ${workspace.id} previously failed provisioning`); + return { + success: false, + status: WorkspaceProvisioningStatus.FAILED, + error: workspace.error || 'Previous provisioning failed', + }; + } + + // Calculate backoff for WAITING_FOR_CAPACITY + const attemptCount = (workspace as any).provisioningAttemptCount || 0; + const lastAttempt = (workspace as any).lastProvisioningAttemptAt; + + if (currentStatus === WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY && lastAttempt) { + // Exponential backoff: 30s, 60s, 120s, 240s, capped at 300s (5 min) + const baseBackoffMs = 30000; + const backoffMs = Math.min(baseBackoffMs * Math.pow(2, attemptCount), 300000); + const jitter = (Math.random() - 0.5) * 0.2 * backoffMs; // ±10% jitter + const nextRetryAt = new Date(lastAttempt).getTime() + backoffMs + jitter; + const now = Date.now(); + + if (now < nextRetryAt) { + const retryAfterMs = Math.ceil(nextRetryAt - now); + this.logger.debug( + `Workspace ${workspace.id} waiting for capacity, retry in ${Math.ceil(retryAfterMs / 1000)}s ` + + `(attempt ${attemptCount + 1}, backoff ${Math.ceil(backoffMs / 1000)}s)` + ); + return { + success: false, + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + retryAfterMs, + }; + } + } + + // v1.3.0: Proactive capacity check BEFORE attempting provisioning + // This prevents cluster scheduling failures by rejecting early + const capacityCheck = await this.checkCapacity(workspace.id); + if (!capacityCheck.hasCapacity) { + // v1.5.0: Calculate nextAttemptAt for DB-driven retry gating + // This prevents the orchestrator tight loop from rechecking every second + const backoffMs = capacityCheck.backoffMs || 30000; + const nextAttemptAt = new Date(Date.now() + backoffMs); + + // At capacity - update workspace status and return backoff hint + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + provisioningAttemptCount: attemptCount + 1, + lastProvisioningAttemptAt: new Date(), + nextAttemptAt, // v1.5.0: DB-driven retry gating + error: `Capacity limit reached: ${capacityCheck.activeCount}/${capacityCheck.maxAllowed} active workspaces`, + }, + }); + + this.logger.warn( + `Workspace ${workspace.id} rejected: capacity limit reached ` + + `(${capacityCheck.activeCount}/${capacityCheck.maxAllowed}), retry at ${nextAttemptAt.toISOString()}` + ); + + return { + success: false, + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + retryAfterMs: backoffMs, + error: `Capacity limit reached: ${capacityCheck.activeCount}/${capacityCheck.maxAllowed} active workspaces`, + }; + } + + // Update status to CREATING and increment attempt count + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: WorkspaceProvisioningStatus.CREATING, + provisioningAttemptCount: attemptCount + 1, + lastProvisioningAttemptAt: new Date(), + }, + }); + + this.logger.log( + `Provisioning workspace ${workspace.id} (attempt ${attemptCount + 1})` + ); + + // Attempt provisioning + try { + await this.workspaceService.ensureWorkspaceDesktop( + workspace.id, + tenantId, + persistence, + ); + + // Success - update status to READY and clear retry gating + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: WorkspaceProvisioningStatus.READY, + nextAttemptAt: null, // v1.5.0: Clear retry gating on success + error: null, + }, + }); + + this.logger.log(`Workspace ${workspace.id} provisioned successfully`); + + // Emit workflow created event (now fully provisioned) + this.eventEmitter.emit('workflow.created', { + workflowId, + workspaceId: workspace.id, + tenantId, + }); + + return { success: true, status: WorkspaceProvisioningStatus.READY }; + } catch (error: any) { + const errorMessage = error.message || 'Unknown provisioning error'; + this.logger.error(`Workspace ${workspace.id} provisioning failed: ${errorMessage}`); + + // Detect capacity issues (timeout, scheduling failure, resource exhaustion) + const isCapacityIssue = this.isCapacityError(errorMessage); + + if (isCapacityIssue) { + // Calculate next retry backoff + const backoffMs = Math.min(30000 * Math.pow(2, attemptCount), 300000); + // v1.5.0: Set nextAttemptAt for DB-driven retry gating + const nextAttemptAt = new Date(Date.now() + backoffMs); + + // Capacity issue - set WAITING_FOR_CAPACITY, not FAILED + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + nextAttemptAt, // v1.5.0: DB-driven retry gating + error: errorMessage, + }, + }); + + this.logger.warn( + `Workspace ${workspace.id} waiting for capacity (attempt ${attemptCount + 1}), ` + + `retry at ${nextAttemptAt.toISOString()}` + ); + + return { + success: false, + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + retryAfterMs: backoffMs, + error: errorMessage, + }; + } + + // Non-capacity error - mark as FAILED + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: WorkspaceProvisioningStatus.FAILED, + error: errorMessage, + }, + }); + + // Also mark workflow as failed + await this.prisma.workflowRun.update({ + where: { id: workflowId }, + data: { + status: WorkflowStatus.FAILED, + error: `Workspace creation failed: ${errorMessage}`, + completedAt: new Date(), + }, + }); + + return { + success: false, + status: WorkspaceProvisioningStatus.FAILED, + error: errorMessage, + }; + } + } + + /** + * v1.1.0: Detect if an error is a capacity/scheduling issue + * These should trigger WAITING_FOR_CAPACITY, not FAILED + */ + private isCapacityError(errorMessage: string): boolean { + const capacityPatterns = [ + /timeout/i, + /pod not ready/i, + /insufficient/i, + /unschedulable/i, + /no nodes available/i, + /exceeds available/i, + /quota exceeded/i, + /too many requests/i, + /429/i, + /deadline/i, + /capacity/i, + ]; + return capacityPatterns.some(pattern => pattern.test(errorMessage)); + } + + /** + * v1.3.0: Proactively check if we have capacity for a new workspace + * + * This prevents scheduling failures by checking BEFORE attempting provisioning. + * Returns { hasCapacity: true } if we can provision, or { hasCapacity: false } + * with suggested backoff time if at capacity. + * + * v1.4.0: Active workspaces counted from K8s pods (source of truth) + * Falls back to DB count if K8s is unreachable + * Logs warning if DB and K8s counts diverge (indicates drift) + */ + private async checkCapacity(excludeWorkspaceId?: string): Promise<{ + hasCapacity: boolean; + activeCount: number; + maxAllowed: number; + backoffMs?: number; + source: 'k8s' | 'db'; + }> { + let k8sCount: number | null = null; + let dbCount: number; + + // v1.4.0: Try K8s first (source of truth) + try { + k8sCount = await this.workspaceDbReconciler.getK8sActiveWorkspaceCount(); + } catch (error: any) { + this.logger.warn( + `K8s unreachable for capacity check, falling back to DB: ${error.message}`, + ); + } + + // Always get DB count for comparison + const whereClause: any = { + status: { + in: [ + WorkspaceProvisioningStatus.READY, + WorkspaceProvisioningStatus.CREATING, + ], + }, + }; + + if (excludeWorkspaceId) { + whereClause.id = { not: excludeWorkspaceId }; + } + + dbCount = await this.prisma.workspace.count({ + where: whereClause, + }); + + // Use K8s count if available, otherwise fall back to DB + const activeCount = k8sCount !== null ? k8sCount : dbCount; + const source = k8sCount !== null ? 'k8s' : 'db'; + + // v1.4.0: Warn if counts diverge - this indicates drift + if (k8sCount !== null && k8sCount !== dbCount) { + this.logger.warn( + `Workspace count drift detected: K8s=${k8sCount}, DB=${dbCount} ` + + `(using K8s as source of truth). Reconciler will fix this.`, + ); + } + + const hasCapacity = activeCount < this.maxActiveWorkspaces; + + if (!hasCapacity) { + this.logger.warn( + `Capacity check failed (${source}): ${activeCount}/${this.maxActiveWorkspaces} active workspaces`, + ); + } else { + this.logger.debug( + `Capacity check passed (${source}): ${activeCount}/${this.maxActiveWorkspaces} active workspaces`, + ); + } + + return { + hasCapacity, + activeCount, + maxAllowed: this.maxActiveWorkspaces, + backoffMs: hasCapacity ? undefined : 30000, + source, + }; + } + + /** + * v1.2.0: Helper to retry an async operation with exponential backoff + * + * Implements best practices: + * - Exponential backoff with jitter to prevent thundering herd + * - Configurable max retries + * - Returns result or throws after all retries exhausted + */ + private async withRetry( + operation: () => Promise, + options: { + maxRetries?: number; + baseDelayMs?: number; + maxDelayMs?: number; + operationName?: string; + } = {}, + ): Promise { + const maxRetries = options.maxRetries ?? 3; + const baseDelayMs = options.baseDelayMs ?? 1000; + const maxDelayMs = options.maxDelayMs ?? 10000; + const operationName = options.operationName ?? 'operation'; + + let lastError: Error | null = null; + + for (let attempt = 0; attempt <= maxRetries; attempt++) { + try { + return await operation(); + } catch (error: any) { + lastError = error; + + if (attempt === maxRetries) { + this.logger.error( + `${operationName} failed after ${maxRetries + 1} attempts: ${error.message}`, + ); + throw error; + } + + // Exponential backoff with jitter (±25%) + const delay = Math.min(baseDelayMs * Math.pow(2, attempt), maxDelayMs); + const jitter = delay * (0.5 + Math.random() * 0.5); // 50-100% of delay + const finalDelay = Math.floor(jitter); + + this.logger.warn( + `${operationName} failed (attempt ${attempt + 1}/${maxRetries + 1}), ` + + `retrying in ${finalDelay}ms: ${error.message}`, + ); + + await new Promise((resolve) => setTimeout(resolve, finalDelay)); + } + } + + throw lastError; + } + + /** + * v1.2.0: Helper to hibernate workspace with retry and error tracking + * + * This method: + * 1. Attempts hibernation with exponential backoff + * 2. Updates workspace status on success + * 3. On failure, marks workspace for GC cleanup (HIBERNATION_FAILED status) + * 4. Never throws - returns success/failure result + * + * This prevents orphan pods by: + * - Retrying transient failures + * - Tracking failed hibernations for GC service cleanup + */ + private async hibernateWorkspaceWithTracking( + workspaceId: string, + workflowId: string, + ): Promise<{ success: boolean; error?: string }> { + // Get current attempt count + const workspace = await this.prisma.workspace.findUnique({ + where: { id: workspaceId }, + select: { hibernationAttemptCount: true }, + }); + + const attemptCount = (workspace?.hibernationAttemptCount ?? 0) + 1; + + // Update attempt tracking before trying + await this.prisma.workspace.update({ + where: { id: workspaceId }, + data: { + hibernationAttemptCount: attemptCount, + lastHibernationAttemptAt: new Date(), + }, + }); + + try { + // Attempt hibernation with retry + await this.withRetry( + () => this.workspaceService.hibernateWorkspace(workspaceId), + { + maxRetries: 3, + baseDelayMs: 1000, + maxDelayMs: 5000, + operationName: `hibernateWorkspace(${workspaceId})`, + }, + ); + + // Success - update status + await this.prisma.workspace.update({ + where: { id: workspaceId }, + data: { + status: 'HIBERNATED', + hibernationError: null, + }, + }); + + this.logger.log(`Workspace ${workspaceId} hibernated successfully`); + return { success: true }; + } catch (error: any) { + const errorMessage = error.message || 'Unknown hibernation error'; + + // Mark for GC cleanup - workspace pod may still be running + await this.prisma.workspace.update({ + where: { id: workspaceId }, + data: { + status: 'HIBERNATION_FAILED', + hibernationError: errorMessage, + }, + }); + + this.logger.error( + `Workspace ${workspaceId} hibernation failed after ${attemptCount} attempts: ${errorMessage}. ` + + `Marked for GC cleanup.`, + { workflowId, workspaceId, attemptCount }, + ); + + // Emit event for monitoring/alerting + this.eventEmitter.emit('workspace.hibernation-failed', { + workspaceId, + workflowId, + attemptCount, + error: errorMessage, + }); + + return { success: false, error: errorMessage }; + } + } + + /** + * Create a new workflow run (legacy method - now calls createWorkflowRecord + ensureWorkspaceProvisioned) + * + * This creates: + * 1. A Workspace record (with lock fields) + * 2. A WorkflowRun record + * 3. WorkflowNode records for each node + * 4. Requests a persistent desktop from task-controller + * + * @deprecated Use createWorkflowRecord() + ensureWorkspaceProvisioned() for better idempotency + */ + async createWorkflow(input: CreateWorkflowInput): Promise { + // Create DB records first + const result = await this.createWorkflowRecord(input); + + // Then provision workspace (may fail, but workflow record exists) + const provisionResult = await this.ensureWorkspaceProvisioned( + result.id, + input.tenantId, + input.persistence, + ); + + if (!provisionResult.success && provisionResult.status === WorkspaceProvisioningStatus.FAILED) { + throw new Error(provisionResult.error || 'Workspace provisioning failed'); + } + + return result; + } + + /** + * Get workflow run by ID + */ + async getWorkflow(workflowId: string): Promise { + return this.prisma.workflowRun.findUnique({ + where: { id: workflowId }, + include: { + nodes: { + orderBy: { order: 'asc' }, + }, + workspace: true, + }, + }); + } + + /** + * Start workflow execution + */ + async startWorkflow(workflowId: string): Promise { + this.logger.log(`Starting workflow ${workflowId}`); + + await this.prisma.workflowRun.update({ + where: { id: workflowId }, + data: { + status: WorkflowStatus.RUNNING, + startedAt: new Date(), + }, + }); + + // Mark nodes with no dependencies as READY + await this.prisma.workflowNode.updateMany({ + where: { + workflowRunId: workflowId, + status: NodeStatus.PENDING, + dependencies: { equals: [] }, + }, + data: { status: NodeStatus.READY }, + }); + + this.eventEmitter.emit('workflow.started', { workflowId }); + } + + /** + * Complete workflow + * + * v1.2.0: Uses hibernateWorkspaceWithTracking() to prevent orphan pods + * - Retries hibernation with exponential backoff + * - Marks failed hibernations for GC cleanup + * - Never throws on hibernation failure (workflow is already complete) + */ + async completeWorkflow( + workflowId: string, + status: WorkflowStatus.COMPLETED | WorkflowStatus.FAILED, + error?: string, + ): Promise { + this.logger.log(`Completing workflow ${workflowId} with status ${status}`); + + const workflow = await this.prisma.workflowRun.update({ + where: { id: workflowId }, + data: { + status, + error, + completedAt: new Date(), + }, + include: { workspace: true }, + }); + + // v1.2.0: Hibernate with retry and error tracking (prevents orphan pods) + if (workflow.workspace) { + // This method never throws - marks for GC cleanup on failure + await this.hibernateWorkspaceWithTracking(workflow.workspaceId, workflowId); + } + + this.eventEmitter.emit('workflow.completed', { + workflowId, + status, + error, + }); + } + + /** + * Cancel workflow + * + * v1.2.0: Uses hibernateWorkspaceWithTracking() to prevent orphan pods + * - Retries hibernation with exponential backoff + * - Marks failed hibernations for GC cleanup + * - Never throws on hibernation failure (workflow is already cancelled) + */ + async cancelWorkflow(workflowId: string, reason?: string): Promise { + this.logger.log(`Cancelling workflow ${workflowId}: ${reason}`); + + const workflow = await this.prisma.workflowRun.update({ + where: { id: workflowId }, + data: { + status: WorkflowStatus.CANCELLED, + error: reason || 'Cancelled by user', + completedAt: new Date(), + }, + include: { workspace: true }, + }); + + // Cancel any running nodes + await this.prisma.workflowNode.updateMany({ + where: { + workflowRunId: workflowId, + status: { in: [NodeStatus.PENDING, NodeStatus.READY, NodeStatus.RUNNING] }, + }, + data: { status: NodeStatus.SKIPPED }, + }); + + // v1.2.0: Hibernate with retry and error tracking (prevents orphan pods) + if (workflow.workspace) { + // This method never throws - marks for GC cleanup on failure + await this.hibernateWorkspaceWithTracking(workflow.workspaceId, workflowId); + } + + this.eventEmitter.emit('workflow.cancelled', { workflowId, reason }); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/workspace-db-reconciler.service.ts b/packages/bytebot-workflow-orchestrator/src/services/workspace-db-reconciler.service.ts new file mode 100644 index 000000000..3b357cca3 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/workspace-db-reconciler.service.ts @@ -0,0 +1,410 @@ +/** + * Workspace DB Reconciler Service + * v1.0.0: Detects and reconciles DB/K8s workspace state drift + * + * This service runs on a schedule to find DB workspace records that claim + * to be active (READY/CREATING) but have no corresponding K8s pod. + * + * State drift can occur when: + * - Pod was deleted manually (kubectl delete) + * - Node failure caused pod eviction without proper hibernation + * - Network partition during hibernation caused DB update failure + * - Task-controller deleted pod but orchestrator missed webhook + * + * Two-pass safety pattern: + * 1. First pass: Mark workspace as DRIFT_DETECTED + * 2. Second pass (5 min later): If still DRIFT_DETECTED, transition to HIBERNATED + * + * This prevents race conditions where a pod is being created but not yet visible. + * + * @see OrphanPodGCService for the inverse problem (K8s pods without DB records) + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { Cron, CronExpression } from '@nestjs/schedule'; +import { ConfigService } from '@nestjs/config'; +import { EventEmitter2 } from '@nestjs/event-emitter'; +import * as k8s from '@kubernetes/client-node'; +import { PrismaService } from './prisma.service'; + +export interface ReconcileResult { + checked: number; + driftDetected: number; + reconciled: number; + errors: string[]; +} + +export interface WorkspacePodInfo { + workspaceId: string; + podName: string; + phase: string; + nodeName: string; +} + +@Injectable() +export class WorkspaceDbReconcilerService implements OnModuleInit { + private readonly logger = new Logger(WorkspaceDbReconcilerService.name); + private isRunning = false; + private lastReconcileAt: Date | null = null; + + // K8s client + private k8sApi: k8s.CoreV1Api; + + // Namespace for workspace pods + private readonly namespace: string; + + // Grace period before considering a workspace as drifted + // Default: 10 minutes (allows for slow pod creation) + private readonly gracePeriodMs: number; + + // Whether reconciler is enabled + private readonly enabled: boolean; + + constructor( + private readonly prisma: PrismaService, + private readonly configService: ConfigService, + private readonly eventEmitter: EventEmitter2, + ) { + this.namespace = this.configService.get( + 'KUBERNETES_NAMESPACE', + 'bytebot', + ); + this.gracePeriodMs = this.configService.get( + 'RECONCILER_GRACE_PERIOD_MS', + 10 * 60 * 1000, // 10 minutes + ); + this.enabled = this.configService.get( + 'WORKSPACE_DB_RECONCILER_ENABLED', + true, + ); + + // Initialize K8s client + const kc = new k8s.KubeConfig(); + kc.loadFromDefault(); + this.k8sApi = kc.makeApiClient(k8s.CoreV1Api); + } + + onModuleInit() { + this.logger.log( + `WorkspaceDbReconcilerService initialized (enabled=${this.enabled}, ` + + `namespace=${this.namespace}, gracePeriod=${this.gracePeriodMs}ms)`, + ); + } + + /** + * Run reconciliation every 5 minutes + */ + @Cron(CronExpression.EVERY_5_MINUTES) + async runScheduledReconcile(): Promise { + if (!this.enabled) { + return; + } + + if (this.isRunning) { + this.logger.debug('Workspace DB reconciler already running, skipping'); + return; + } + + await this.runReconcile(); + } + + /** + * Run workspace DB reconciliation + * Can be called manually or via cron + */ + async runReconcile(): Promise { + this.isRunning = true; + const result: ReconcileResult = { + checked: 0, + driftDetected: 0, + reconciled: 0, + errors: [], + }; + + try { + this.logger.debug('Starting workspace DB reconcile cycle'); + + // Phase 1: Get all workspace pods from K8s (source of truth) + const k8sPods = await this.listWorkspacePods(); + const k8sWorkspaceIds = new Set(k8sPods.map(p => p.workspaceId)); + + this.logger.debug( + `Found ${k8sPods.length} workspace pods in K8s: ${Array.from(k8sWorkspaceIds).join(', ')}`, + ); + + // Phase 2: Find DB records claiming to be active + const gracePeriodAgo = new Date(Date.now() - this.gracePeriodMs); + const activeDbWorkspaces = await this.prisma.workspace.findMany({ + where: { + status: { in: ['READY', 'CREATING'] }, + // Only consider workspaces that have been in this state for a while + updatedAt: { lt: gracePeriodAgo }, + }, + select: { + id: true, + status: true, + updatedAt: true, + createdAt: true, + }, + take: 100, // Process in batches + }); + + result.checked = activeDbWorkspaces.length; + + // Phase 3: Detect drift - DB says active but no K8s pod + for (const workspace of activeDbWorkspaces) { + if (k8sWorkspaceIds.has(workspace.id)) { + // Pod exists - no drift + continue; + } + + // Pod doesn't exist - this is drift + this.logger.warn( + `Drift detected: workspace ${workspace.id} is ${workspace.status} in DB ` + + `but has no K8s pod (last updated: ${workspace.updatedAt.toISOString()})`, + ); + + try { + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'DRIFT_DETECTED', + hibernationError: `No K8s pod found. Was ${workspace.status} since ${workspace.updatedAt.toISOString()}`, + }, + }); + result.driftDetected++; + } catch (error: any) { + result.errors.push(`${workspace.id}: ${error.message}`); + } + } + + // Phase 4: Second pass - reconcile DRIFT_DETECTED workspaces + // These were marked in a PREVIOUS cycle, so they've had 5+ minutes to recover + const driftedWorkspaces = await this.prisma.workspace.findMany({ + where: { + status: 'DRIFT_DETECTED', + // Must have been marked drift at least 5 minutes ago (previous cycle) + updatedAt: { lt: new Date(Date.now() - 5 * 60 * 1000) }, + }, + select: { + id: true, + hibernationError: true, + }, + take: 50, + }); + + for (const workspace of driftedWorkspaces) { + // Double-check pod still doesn't exist + if (k8sWorkspaceIds.has(workspace.id)) { + // Pod appeared! Clear drift status + this.logger.log( + `Drift cleared: workspace ${workspace.id} pod recovered`, + ); + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'READY', + hibernationError: null, + }, + }); + continue; + } + + // Still no pod after grace period - reconcile to HIBERNATED + try { + await this.prisma.workspace.update({ + where: { id: workspace.id }, + data: { + status: 'HIBERNATED', + hibernationError: null, + }, + }); + this.logger.log( + `Reconciled: workspace ${workspace.id} transitioned DRIFT_DETECTED → HIBERNATED`, + ); + result.reconciled++; + } catch (error: any) { + result.errors.push(`${workspace.id}: ${error.message}`); + } + } + + // Update last reconcile timestamp + this.lastReconcileAt = new Date(); + + if (result.checked > 0 || result.driftDetected > 0 || result.reconciled > 0) { + this.logger.log( + `Reconcile complete: checked=${result.checked}, ` + + `driftDetected=${result.driftDetected}, reconciled=${result.reconciled}`, + ); + } + + // Emit metrics event + this.eventEmitter.emit('workspace-db-reconcile.completed', { + timestamp: new Date(), + ...result, + }); + + return result; + } catch (error: any) { + this.logger.error(`Workspace DB reconcile failed: ${error.message}`); + result.errors.push(error.message); + return result; + } finally { + this.isRunning = false; + } + } + + /** + * List all workspace pods in K8s + * Uses label selector: bytebot.ai/component=desktop-workspace + */ + private async listWorkspacePods(): Promise { + try { + const response = await this.k8sApi.listNamespacedPod( + this.namespace, + undefined, // pretty + undefined, // allowWatchBookmarks + undefined, // _continue + undefined, // fieldSelector + 'bytebot.ai/component=workspace-desktop', // labelSelector + ); + + return response.body.items + .filter((pod: k8s.V1Pod) => { + // Extract workspace ID from pod name (format: desktop-ws-{workspaceId}) + const match = pod.metadata?.name?.match(/^desktop-ws-(.+)$/); + return match && match[1]; + }) + .map((pod: k8s.V1Pod) => { + const match = pod.metadata!.name!.match(/^desktop-ws-(.+)$/); + return { + workspaceId: `ws-${match![1]}`, + podName: pod.metadata!.name!, + phase: pod.status?.phase || 'Unknown', + nodeName: pod.spec?.nodeName || 'Unknown', + }; + }); + } catch (error: any) { + this.logger.error(`Failed to list workspace pods: ${error.message}`); + throw error; + } + } + + /** + * Get active workspace count from K8s (source of truth) + * Used by checkCapacity() for hardened capacity logic + */ + async getK8sActiveWorkspaceCount(): Promise { + const pods = await this.listWorkspacePods(); + // Only count Running/Pending pods (not Succeeded/Failed/Unknown) + return pods.filter(p => + p.phase === 'Running' || p.phase === 'Pending' + ).length; + } + + /** + * Get current reconciler status and stats + */ + async getStatus(): Promise<{ + enabled: boolean; + isRunning: boolean; + lastReconcileAt: string | null; + gracePeriodMs: number; + namespace: string; + health: { + k8sPodCount: number; + dbActiveCount: number; + driftDetectedCount: number; + isHealthy: boolean; + staleSinceMs: number | null; + }; + }> { + let k8sPodCount = 0; + try { + const pods = await this.listWorkspacePods(); + k8sPodCount = pods.filter(p => + p.phase === 'Running' || p.phase === 'Pending' + ).length; + } catch { + // K8s unreachable + } + + const [dbActiveCount, driftDetectedCount] = await Promise.all([ + this.prisma.workspace.count({ + where: { status: { in: ['READY', 'CREATING'] } }, + }), + this.prisma.workspace.count({ + where: { status: 'DRIFT_DETECTED' }, + }), + ]); + + // Calculate staleness + const staleSinceMs = this.lastReconcileAt + ? Date.now() - this.lastReconcileAt.getTime() + : null; + + // Health check: reconcile not stale (< 15 min) and no drift detected + const isHealthy = + staleSinceMs !== null && + staleSinceMs < 15 * 60 * 1000 && + driftDetectedCount === 0; + + return { + enabled: this.enabled, + isRunning: this.isRunning, + lastReconcileAt: this.lastReconcileAt?.toISOString() || null, + gracePeriodMs: this.gracePeriodMs, + namespace: this.namespace, + health: { + k8sPodCount, + dbActiveCount, + driftDetectedCount, + isHealthy, + staleSinceMs, + }, + }; + } + + /** + * Get workspace health comparison between K8s and DB + * For admin/ops visibility (Phase 3) + */ + async getWorkspaceHealth(): Promise<{ + k8sWorkspaces: WorkspacePodInfo[]; + dbActiveWorkspaces: { id: string; status: string; updatedAt: Date }[]; + driftDetectedWorkspaces: { id: string; hibernationError: string | null; updatedAt: Date }[]; + capacityUsed: number; + capacityRemaining: number; + maxCapacity: number; + }> { + const maxCapacity = parseInt( + this.configService.get('MAX_ACTIVE_WORKSPACES_GLOBAL', '6'), + 10, + ); + + const [k8sWorkspaces, dbActiveWorkspaces, driftDetectedWorkspaces] = await Promise.all([ + this.listWorkspacePods().catch(() => []), + this.prisma.workspace.findMany({ + where: { status: { in: ['READY', 'CREATING'] } }, + select: { id: true, status: true, updatedAt: true }, + }), + this.prisma.workspace.findMany({ + where: { status: 'DRIFT_DETECTED' }, + select: { id: true, hibernationError: true, updatedAt: true }, + }), + ]); + + const capacityUsed = k8sWorkspaces.filter(p => + p.phase === 'Running' || p.phase === 'Pending' + ).length; + + return { + k8sWorkspaces, + dbActiveWorkspaces, + driftDetectedWorkspaces, + capacityUsed, + capacityRemaining: Math.max(0, maxCapacity - capacityUsed), + maxCapacity, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/services/workspace.service.ts b/packages/bytebot-workflow-orchestrator/src/services/workspace.service.ts new file mode 100644 index 000000000..e14d1d6d9 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/services/workspace.service.ts @@ -0,0 +1,398 @@ +/** + * Workspace Service + * v1.0.1: Fixed storage class to use task-controller's configured default + * v1.0.0: Manages workspace lifecycle via task-controller API + * + * This service acts as a client to the task-controller's workspace endpoints. + * It handles: + * - Creating persistent desktops + * - Checking desktop status + * - Hibernating and terminating workspaces + * - Acquiring and releasing workspace locks + */ + +import { Injectable, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import axios, { AxiosInstance } from 'axios'; +import { PrismaService } from './prisma.service'; + +export interface WorkspaceDesktopStatus { + workspaceId: string; + status: 'CREATING' | 'READY' | 'NOT_FOUND' | 'ERROR' | 'TERMINATED' | 'HIBERNATED'; + desktopEndpoint: string | null; + vncEndpoint: string | null; + pvcName: string | null; + podName: string | null; + podIP: string | null; + podPhase: string | null; + message?: string; +} + +export interface PersistenceConfig { + enabled?: boolean; + storageClass?: string; + size?: string; + mounts?: Array<{ mountPath: string; subPath: string }>; + retainOnDelete?: boolean; +} + +@Injectable() +export class WorkspaceService { + private readonly logger = new Logger(WorkspaceService.name); + private readonly taskControllerClient: AxiosInstance; + private readonly internalToken: string; + + constructor( + private configService: ConfigService, + private prisma: PrismaService, + ) { + const taskControllerUrl = this.configService.get( + 'TASK_CONTROLLER_URL', + 'http://bytebot-task-controller:8080', + ); + + this.internalToken = this.configService.get( + 'INTERNAL_SERVICE_TOKEN', + '', + ); + + this.taskControllerClient = axios.create({ + baseURL: taskControllerUrl, + timeout: 120000, // 2 minutes for workspace creation + headers: { + 'Content-Type': 'application/json', + 'X-Internal-Token': this.internalToken, + 'X-Service-Id': 'bytebot-workflow-orchestrator', + }, + }); + + if (!this.internalToken) { + this.logger.warn( + 'INTERNAL_SERVICE_TOKEN not configured - workspace API calls may fail', + ); + } + } + + /** + * Create or ensure a workspace desktop exists + * + * Calls the task-controller's POST /api/v1/workspaces/:workspaceId/desktop endpoint + */ + async ensureWorkspaceDesktop( + workspaceId: string, + tenantId: string, + persistence?: PersistenceConfig, + ): Promise { + this.logger.log(`Ensuring workspace desktop: ${workspaceId}`); + + try { + const response = await this.taskControllerClient.post( + `/api/v1/workspaces/${workspaceId}/desktop`, + { + tenantId, + persistence: { + enabled: persistence?.enabled ?? true, + // Let task-controller use its configured default storage class + // (WORKSPACE_DEFAULT_STORAGE_CLASS env var) when not explicitly specified + storageClass: persistence?.storageClass, + size: persistence?.size || '10Gi', + mounts: persistence?.mounts, + retainOnDelete: persistence?.retainOnDelete ?? true, + }, + }, + ); + + this.logger.log( + `Workspace ${workspaceId} desktop: ${response.data.status}`, + ); + + return response.data; + } catch (error: any) { + const message = error.response?.data?.message || error.message; + this.logger.error(`Failed to ensure workspace desktop: ${message}`); + throw new Error(`Workspace desktop creation failed: ${message}`); + } + } + + /** + * Get workspace desktop status + */ + async getWorkspaceDesktopStatus( + workspaceId: string, + ): Promise { + try { + const response = await this.taskControllerClient.get( + `/api/v1/workspaces/${workspaceId}/desktop`, + ); + return response.data; + } catch (error: any) { + if (error.response?.status === 404) { + return { + workspaceId, + status: 'NOT_FOUND', + desktopEndpoint: null, + vncEndpoint: null, + pvcName: null, + podName: null, + podIP: null, + podPhase: null, + }; + } + throw error; + } + } + + /** + * Wait for workspace desktop to be ready + */ + async waitForWorkspaceReady( + workspaceId: string, + timeoutMs: number = 120000, + ): Promise { + try { + const response = await this.taskControllerClient.get( + `/api/v1/workspaces/${workspaceId}/desktop/wait`, + { params: { timeoutMs } }, + ); + return response.data; + } catch (error: any) { + const message = error.response?.data?.message || error.message; + throw new Error(`Wait for workspace failed: ${message}`); + } + } + + /** + * Hibernate workspace (delete pod, keep PVC) + */ + async hibernateWorkspace(workspaceId: string): Promise { + this.logger.log(`Hibernating workspace: ${workspaceId}`); + + try { + const response = await this.taskControllerClient.post( + `/api/v1/workspaces/${workspaceId}/desktop/hibernate`, + ); + + this.logger.log(`Workspace ${workspaceId} hibernated`); + return response.data; + } catch (error: any) { + const message = error.response?.data?.message || error.message; + this.logger.error(`Failed to hibernate workspace: ${message}`); + throw new Error(`Workspace hibernation failed: ${message}`); + } + } + + /** + * Terminate workspace (delete pod and PVC) + */ + async terminateWorkspace( + workspaceId: string, + deletePVC: boolean = true, + ): Promise { + this.logger.log(`Terminating workspace: ${workspaceId} (deletePVC=${deletePVC})`); + + try { + const response = await this.taskControllerClient.post( + `/api/v1/workspaces/${workspaceId}/desktop/terminate`, + { deletePVC }, + ); + + this.logger.log(`Workspace ${workspaceId} terminated`); + return response.data; + } catch (error: any) { + const message = error.response?.data?.message || error.message; + this.logger.error(`Failed to terminate workspace: ${message}`); + throw new Error(`Workspace termination failed: ${message}`); + } + } + + /** + * v2.3.0 M4: Acquire granular workspace lock for desktop tool execution + * Lock is held only during active desktop tool execution (30-60 seconds) + * Uses atomic update with expiry check for safe concurrent access + * + * @param workspaceId - Workspace to lock + * @param nodeRunId - ID of the node run acquiring the lock + * @param leaseSeconds - Lock duration in seconds (default 30 seconds) + * @returns Lock acquisition result with expiry time + */ + async acquireLock( + workspaceId: string, + nodeRunId: string, + leaseSeconds: number = 30, + ): Promise<{ + acquired: boolean; + lockExpiresAt?: string; + message: string; + retryAfterMs?: number; + currentOwner?: string; + }> { + const lockExpiry = new Date(Date.now() + leaseSeconds * 1000); + + try { + // Use raw query for atomic locking with expiry check + const result = await this.prisma.$executeRaw` + UPDATE workspaces + SET lock_owner_node_run_id = ${nodeRunId}, + lock_acquired_at = NOW(), + lock_expires_at = ${lockExpiry} + WHERE id = ${workspaceId} + AND ( + lock_owner_node_run_id IS NULL + OR lock_owner_node_run_id = ${nodeRunId} + OR lock_expires_at < NOW() + ) + `; + + if (result === 0) { + // Lock acquisition failed - get current owner info + const workspace = await this.prisma.workspace.findUnique({ + where: { id: workspaceId }, + select: { lockOwnerNodeRunId: true, lockExpiresAt: true }, + }); + + const remainingMs = workspace?.lockExpiresAt + ? Math.max(0, workspace.lockExpiresAt.getTime() - Date.now()) + : 5000; + + this.logger.debug( + `Lock contention on workspace ${workspaceId}: owned by ${workspace?.lockOwnerNodeRunId}, expires in ${remainingMs}ms`, + ); + + return { + acquired: false, + message: `Lock held by another node run: ${workspace?.lockOwnerNodeRunId}`, + currentOwner: workspace?.lockOwnerNodeRunId || undefined, + retryAfterMs: Math.min(remainingMs + 1000, 10000), // Add 1 second buffer, cap at 10s + }; + } + + this.logger.log( + `Lock acquired on workspace ${workspaceId} by nodeRun ${nodeRunId}, expires at ${lockExpiry.toISOString()}`, + ); + + return { + acquired: true, + lockExpiresAt: lockExpiry.toISOString(), + message: 'Lock acquired', + }; + } catch (error: any) { + this.logger.error( + `Error acquiring workspace lock: ${error.message}`, + ); + return { + acquired: false, + message: `Lock acquisition error: ${error.message}`, + retryAfterMs: 5000, + }; + } + } + + /** + * v2.3.0 M4: Renew workspace lock + */ + async renewLock( + workspaceId: string, + nodeRunId: string, + leaseSeconds: number = 30, + ): Promise<{ + renewed: boolean; + lockExpiresAt?: string; + message: string; + }> { + const newExpiry = new Date(Date.now() + leaseSeconds * 1000); + + const result = await this.prisma.workspace.updateMany({ + where: { + id: workspaceId, + lockOwnerNodeRunId: nodeRunId, + }, + data: { + lockExpiresAt: newExpiry, + }, + }); + + if (result.count === 0) { + this.logger.warn(`Failed to renew lock for workspace ${workspaceId} - not owned by ${nodeRunId}`); + return { + renewed: false, + message: 'Lock not owned by this node run', + }; + } + + this.logger.debug( + `Lock renewed on workspace ${workspaceId} by nodeRun ${nodeRunId}, new expiry ${newExpiry.toISOString()}`, + ); + + return { + renewed: true, + lockExpiresAt: newExpiry.toISOString(), + message: 'Lock renewed', + }; + } + + /** + * v2.3.0 M4: Release workspace lock + */ + async releaseLock( + workspaceId: string, + nodeRunId: string, + ): Promise<{ + released: boolean; + message: string; + }> { + const result = await this.prisma.workspace.updateMany({ + where: { + id: workspaceId, + lockOwnerNodeRunId: nodeRunId, + }, + data: { + lockOwnerNodeRunId: null, + lockAcquiredAt: null, + lockExpiresAt: null, + }, + }); + + if (result.count === 0) { + this.logger.debug(`Lock on workspace ${workspaceId} not owned by ${nodeRunId}, nothing to release`); + return { + released: false, + message: 'Lock not owned by this node run', + }; + } + + this.logger.log(`Lock released on workspace ${workspaceId} by nodeRun ${nodeRunId}`); + return { + released: true, + message: 'Lock released', + }; + } + + /** + * Get current lock status for a workspace + */ + async getLockStatus(workspaceId: string): Promise<{ + locked: boolean; + ownerNodeRunId: string | null; + expiresAt: string | null; + }> { + const workspace = await this.prisma.workspace.findUnique({ + where: { id: workspaceId }, + select: { lockOwnerNodeRunId: true, lockExpiresAt: true }, + }); + + if (!workspace) { + return { locked: false, ownerNodeRunId: null, expiresAt: null }; + } + + // Check if lock has expired + const isExpired = workspace.lockExpiresAt + ? workspace.lockExpiresAt < new Date() + : true; + + return { + locked: !!workspace.lockOwnerNodeRunId && !isExpired, + ownerNodeRunId: isExpired ? null : workspace.lockOwnerNodeRunId, + expiresAt: isExpired ? null : workspace.lockExpiresAt?.toISOString() || null, + }; + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/constants.ts b/packages/bytebot-workflow-orchestrator/src/temporal/constants.ts new file mode 100644 index 000000000..a9582442b --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/constants.ts @@ -0,0 +1,9 @@ +/** + * Temporal Module Constants + * + * Separated to avoid circular dependencies between + * temporal.module.ts and temporal-workflow.service.ts + */ + +export const TEMPORAL_CLIENT = 'TEMPORAL_CLIENT'; +export const TEMPORAL_CONNECTION = 'TEMPORAL_CONNECTION'; diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/feature-flag.service.ts b/packages/bytebot-workflow-orchestrator/src/temporal/feature-flag.service.ts new file mode 100644 index 000000000..994ff562f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/feature-flag.service.ts @@ -0,0 +1,238 @@ +/** + * Feature Flag Service for Temporal Migration + * + * Controls the gradual rollout of Temporal workflow execution. + * Supports multiple strategies: + * - Environment-based (all or nothing) + * - Percentage-based (random sampling) + * - Tenant-based (specific tenants) + * - Goal-based (specific goal patterns) + * + * Industry patterns from LaunchDarkly, Unleash, and custom implementations. + */ + +import { Injectable, Logger, OnModuleInit } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; + +export interface FeatureFlagContext { + tenantId: string; + goalRunId: string; + goalDescription?: string; + userId?: string; +} + +export interface FeatureFlagResult { + enabled: boolean; + reason: string; + strategy: 'environment' | 'percentage' | 'tenant' | 'goal' | 'disabled'; +} + +@Injectable() +export class FeatureFlagService implements OnModuleInit { + private readonly logger = new Logger(FeatureFlagService.name); + + // Configuration + private enabled = false; + private rolloutPercentage = 0; + private enabledTenants: Set = new Set(); + private disabledTenants: Set = new Set(); + private goalPatterns: RegExp[] = []; + private killSwitch = false; + + constructor(private readonly configService: ConfigService) {} + + onModuleInit(): void { + this.loadConfiguration(); + this.logger.log(`Feature flag initialized: enabled=${this.enabled}, rollout=${this.rolloutPercentage}%`); + } + + /** + * Load feature flag configuration from environment + */ + private loadConfiguration(): void { + // Master switch + this.enabled = this.configService.get('TEMPORAL_WORKFLOW_ENABLED', 'false') === 'true'; + + // Kill switch for instant rollback + this.killSwitch = this.configService.get('TEMPORAL_KILL_SWITCH', 'false') === 'true'; + + // Percentage rollout (0-100) + this.rolloutPercentage = parseInt( + this.configService.get('TEMPORAL_ROLLOUT_PERCENTAGE', '0'), + 10 + ); + + // Tenant-based flags + const enabledTenantsStr = this.configService.get('TEMPORAL_ENABLED_TENANTS', ''); + if (enabledTenantsStr) { + this.enabledTenants = new Set(enabledTenantsStr.split(',').map(t => t.trim())); + } + + const disabledTenantsStr = this.configService.get('TEMPORAL_DISABLED_TENANTS', ''); + if (disabledTenantsStr) { + this.disabledTenants = new Set(disabledTenantsStr.split(',').map(t => t.trim())); + } + + // Goal pattern matching (for testing specific goal types) + const goalPatternsStr = this.configService.get('TEMPORAL_GOAL_PATTERNS', ''); + if (goalPatternsStr) { + this.goalPatterns = goalPatternsStr.split(',').map(p => new RegExp(p.trim(), 'i')); + } + } + + /** + * Check if Temporal workflow should be used for this context + */ + shouldUseTemporalWorkflow(context: FeatureFlagContext): FeatureFlagResult { + // Kill switch overrides everything + if (this.killSwitch) { + return { + enabled: false, + reason: 'Kill switch activated', + strategy: 'disabled', + }; + } + + // Master switch must be on + if (!this.enabled) { + return { + enabled: false, + reason: 'Temporal workflows disabled globally', + strategy: 'disabled', + }; + } + + // Check if tenant is explicitly disabled + if (this.disabledTenants.has(context.tenantId)) { + return { + enabled: false, + reason: `Tenant ${context.tenantId} explicitly disabled`, + strategy: 'tenant', + }; + } + + // Check if tenant is explicitly enabled + if (this.enabledTenants.has(context.tenantId)) { + return { + enabled: true, + reason: `Tenant ${context.tenantId} explicitly enabled`, + strategy: 'tenant', + }; + } + + // Check goal patterns + if (context.goalDescription && this.goalPatterns.length > 0) { + for (const pattern of this.goalPatterns) { + if (pattern.test(context.goalDescription)) { + return { + enabled: true, + reason: `Goal matches pattern: ${pattern.source}`, + strategy: 'goal', + }; + } + } + } + + // Percentage-based rollout + if (this.rolloutPercentage > 0) { + // Use consistent hashing based on goalRunId for deterministic behavior + const hash = this.hashString(context.goalRunId); + const bucket = hash % 100; + + if (bucket < this.rolloutPercentage) { + return { + enabled: true, + reason: `Percentage rollout: ${bucket} < ${this.rolloutPercentage}%`, + strategy: 'percentage', + }; + } + } + + // Default to disabled + return { + enabled: false, + reason: 'No matching rollout criteria', + strategy: 'disabled', + }; + } + + /** + * Simple string hash for percentage-based rollout + * Uses consistent hashing so same goalRunId always gets same bucket + */ + private hashString(str: string): number { + let hash = 0; + for (let i = 0; i < str.length; i++) { + const char = str.charCodeAt(i); + hash = ((hash << 5) - hash) + char; + hash = hash & hash; // Convert to 32-bit integer + } + return Math.abs(hash); + } + + /** + * Get current rollout configuration (for debugging/monitoring) + */ + getConfiguration(): { + enabled: boolean; + killSwitch: boolean; + rolloutPercentage: number; + enabledTenants: string[]; + disabledTenants: string[]; + goalPatterns: string[]; + } { + return { + enabled: this.enabled, + killSwitch: this.killSwitch, + rolloutPercentage: this.rolloutPercentage, + enabledTenants: Array.from(this.enabledTenants), + disabledTenants: Array.from(this.disabledTenants), + goalPatterns: this.goalPatterns.map(p => p.source), + }; + } + + /** + * Update configuration at runtime (for testing/emergency changes) + */ + updateConfiguration(updates: { + enabled?: boolean; + killSwitch?: boolean; + rolloutPercentage?: number; + enabledTenants?: string[]; + disabledTenants?: string[]; + }): void { + if (updates.enabled !== undefined) { + this.enabled = updates.enabled; + } + if (updates.killSwitch !== undefined) { + this.killSwitch = updates.killSwitch; + } + if (updates.rolloutPercentage !== undefined) { + this.rolloutPercentage = Math.max(0, Math.min(100, updates.rolloutPercentage)); + } + if (updates.enabledTenants !== undefined) { + this.enabledTenants = new Set(updates.enabledTenants); + } + if (updates.disabledTenants !== undefined) { + this.disabledTenants = new Set(updates.disabledTenants); + } + + this.logger.log(`Feature flag configuration updated: ${JSON.stringify(this.getConfiguration())}`); + } + + /** + * Activate kill switch (instant rollback) + */ + activateKillSwitch(): void { + this.killSwitch = true; + this.logger.warn('KILL SWITCH ACTIVATED - All new goals will use legacy orchestrator'); + } + + /** + * Deactivate kill switch + */ + deactivateKillSwitch(): void { + this.killSwitch = false; + this.logger.log('Kill switch deactivated - Normal rollout rules apply'); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/index.ts b/packages/bytebot-workflow-orchestrator/src/temporal/index.ts new file mode 100644 index 000000000..b15eae348 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/index.ts @@ -0,0 +1,8 @@ +/** + * Temporal Integration Module Exports + */ + +export * from './constants'; +export * from './temporal.module'; +export * from './temporal-workflow.service'; +export * from './feature-flag.service'; diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/temporal-capability-probe.service.spec.ts b/packages/bytebot-workflow-orchestrator/src/temporal/temporal-capability-probe.service.spec.ts new file mode 100644 index 000000000..06681aa04 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/temporal-capability-probe.service.spec.ts @@ -0,0 +1,81 @@ +import { TemporalCapabilityProbeService } from './temporal-capability-probe.service'; + +describe('TemporalCapabilityProbeService', () => { + function createService(params: { + temporalEnabled: boolean; + mode?: 'REACHABILITY' | 'UPDATE'; + updateAccepted?: boolean; + }) { + const okGauge = { set: jest.fn() } as any; + const failuresCounter = { inc: jest.fn() } as any; + + const handle = { + executeUpdate: jest.fn(async () => ({ + accepted: params.updateAccepted ?? true, + applied: true, + })), + }; + + const client = { + workflowService: { getSystemInfo: jest.fn(async () => ({})) }, + workflow: { + start: jest.fn(async () => ({})), + getHandle: jest.fn(() => handle), + }, + } as any; + + const configService = { + get: jest.fn((key: string, defaultValue?: string) => { + if (key === 'TEMPORAL_WORKFLOW_ENABLED') return params.temporalEnabled ? 'true' : 'false'; + if (key === 'TEMPORAL_CAPABILITY_PROBE_MODE') return params.mode ?? defaultValue ?? 'REACHABILITY'; + if (key === 'TEMPORAL_TASK_QUEUE') return 'bytebot-goal-runs'; + if (key === 'POD_NAME') return 'pod-1'; + return defaultValue; + }), + } as any; + + return { + service: new TemporalCapabilityProbeService(client, configService, okGauge, failuresCounter), + okGauge, + failuresCounter, + client, + handle, + configService, + }; + } + + it('sets disabled when Temporal is off', async () => { + const { service, okGauge, failuresCounter, client } = createService({ temporalEnabled: false }); + + await service.probeOnce(); + + expect(okGauge.set).toHaveBeenCalledWith(0); + expect(failuresCounter.inc).not.toHaveBeenCalled(); + expect(client.workflowService.getSystemInfo).not.toHaveBeenCalled(); + }); + + it('runs reachability probe by default', async () => { + const { service, client, okGauge } = createService({ temporalEnabled: true, mode: 'REACHABILITY' }); + + await service.probeOnce(); + + expect(client.workflowService.getSystemInfo).toHaveBeenCalled(); + expect(okGauge.set).toHaveBeenCalledWith(1); + }); + + it('runs UPDATE probe when configured and fails closed if update is not accepted', async () => { + const { service, client, handle, okGauge, failuresCounter } = createService({ + temporalEnabled: true, + mode: 'UPDATE', + updateAccepted: false, + }); + + await service.probeOnce(); + + expect(client.workflow.start).toHaveBeenCalled(); + expect(handle.executeUpdate).toHaveBeenCalled(); + expect(okGauge.set).toHaveBeenCalledWith(0); + expect(failuresCounter.inc).toHaveBeenCalled(); + }); +}); + diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/temporal-capability-probe.service.ts b/packages/bytebot-workflow-orchestrator/src/temporal/temporal-capability-probe.service.ts new file mode 100644 index 000000000..c6486ba2c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/temporal-capability-probe.service.ts @@ -0,0 +1,167 @@ +import { Inject, Injectable, Logger, OnModuleDestroy, OnModuleInit, Optional } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Client } from '@temporalio/client'; +import { InjectMetric } from '@willsoto/nestjs-prometheus'; +import type { Counter, Gauge } from 'prom-client'; +import { TEMPORAL_CLIENT } from './constants'; + +type TemporalCapabilityProbeMode = 'REACHABILITY' | 'UPDATE'; + +@Injectable() +export class TemporalCapabilityProbeService implements OnModuleInit, OnModuleDestroy { + private readonly logger = new Logger(TemporalCapabilityProbeService.name); + + private healthy = false; + private lastError: string | null = null; + private intervalHandle: NodeJS.Timeout | null = null; + + constructor( + @Optional() @Inject(TEMPORAL_CLIENT) private readonly client: Client | null, + private readonly configService: ConfigService, + @InjectMetric('temporal_capability_probe_ok') private readonly okGauge: Gauge, + @InjectMetric('temporal_capability_probe_failures_total') + private readonly failuresCounter: Counter, + ) {} + + onModuleInit(): void { + // Fire-and-forget initial probe so we have a signal quickly after startup. + void this.probeOnce(); + + // Use an unref'ed interval so tests and short-lived processes can exit cleanly. + const mode = this.getMode(); + const defaultIntervalMs = mode === 'UPDATE' ? 300000 : 30000; + const intervalMs = parseInt(process.env.TEMPORAL_CAPABILITY_PROBE_INTERVAL_MS ?? String(defaultIntervalMs), 10); + this.intervalHandle = setInterval(() => void this.probeOnce(), intervalMs); + this.intervalHandle.unref?.(); + } + + onModuleDestroy(): void { + if (this.intervalHandle) { + clearInterval(this.intervalHandle); + this.intervalHandle = null; + } + } + + isHealthyForTraffic(): boolean { + return this.healthy; + } + + getLastError(): string | null { + return this.lastError; + } + + async probeOnce(): Promise { + const enabled = this.configService.get('TEMPORAL_WORKFLOW_ENABLED', 'false') === 'true'; + if (!enabled) { + this.setDisabled(); + return; + } + + if (!this.client) { + this.setUnhealthy('TEMPORAL_CLIENT_UNAVAILABLE'); + return; + } + + try { + const mode = this.getMode(); + if (mode === 'UPDATE') { + await this.probeUpdateCapabilityOnce(); + } else { + // "Server reachable" capability check. This is intentionally lightweight. + // CI Update-contract tests cover handler registration correctness. + await this.client.workflowService.getSystemInfo({}); + } + this.setHealthy(); + } catch (error: any) { + this.setUnhealthy(String(error?.message ?? error)); + } + } + + private getMode(): TemporalCapabilityProbeMode { + const raw = this.configService.get('TEMPORAL_CAPABILITY_PROBE_MODE', 'REACHABILITY'); + const normalized = String(raw ?? '') + .trim() + .toUpperCase(); + + if (normalized === 'UPDATE') return 'UPDATE'; + return 'REACHABILITY'; + } + + private async probeUpdateCapabilityOnce(): Promise { + if (!this.client) throw new Error('TEMPORAL_CLIENT_UNAVAILABLE'); + + const taskQueue = this.configService.get('TEMPORAL_TASK_QUEUE', '').trim(); + if (!taskQueue) { + throw new Error('TEMPORAL_TASK_QUEUE is required for UPDATE probe mode'); + } + + const podName = this.configService.get('POD_NAME', '').trim() || 'unknown-pod'; + const nowToken = new Date().toISOString().replace(/[^a-zA-Z0-9_-]/g, '-'); + + // Unique workflow ID per probe attempt (keeps the probe side-effect free and avoids workflowId collisions). + // Execution timeout is short so even failures do not leave long-running probe workflows behind. + const workflowId = `bytebot-capability-probe-${podName}-${nowToken}`; + + await this.client.workflow.start('goalRunWorkflow', { + taskQueue, + workflowId, + workflowExecutionTimeout: '1m', + args: [ + { + goalRunId: workflowId, + tenantId: 'system', + userId: 'system', + goalDescription: 'Temporal Update capability probe', + constraints: { + maxSteps: 1, + maxRetries: 0, + maxReplans: 0, + timeoutMs: 60000, + requireApprovalForHighRisk: false, + }, + context: { previousAttempts: 0, inheritedKnowledge: [] }, + mode: 'CAPABILITY_PROBE', + }, + ], + }); + + const handle = this.client.workflow.getHandle(workflowId); + + const updateResult = (await handle.executeUpdate('userPromptResolved', { + updateId: `temporal_capability_probe:${workflowId}`, + args: [{ promptId: workflowId, answers: { ok: true } }], + })) as { accepted?: boolean; applied?: boolean }; + + if (!updateResult?.accepted) { + throw new Error(`Update did not accept probe payload: ${JSON.stringify(updateResult)}`); + } + } + + private setHealthy(): void { + if (!this.healthy) { + this.logger.log('Temporal capability probe recovered (allowing Temporal routing for new runs)'); + } + this.healthy = true; + this.lastError = null; + this.okGauge.set(1); + } + + private setUnhealthy(reason: string): void { + const wasHealthy = this.healthy; + this.healthy = false; + this.lastError = reason; + this.okGauge.set(0); + + // Count failures, but avoid spamming logs every interval. + this.failuresCounter.inc(); + if (wasHealthy) { + this.logger.error(`Temporal capability probe failed; gating Temporal routing. reason=${reason}`); + } + } + + private setDisabled(): void { + this.healthy = false; + this.lastError = 'TEMPORAL_DISABLED'; + this.okGauge.set(0); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/temporal-workflow.service.ts b/packages/bytebot-workflow-orchestrator/src/temporal/temporal-workflow.service.ts new file mode 100644 index 000000000..8b668059f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/temporal-workflow.service.ts @@ -0,0 +1,386 @@ +/** + * Temporal Workflow Service + * + * Provides methods to start, signal, query, and manage Temporal workflows. + * This service wraps the Temporal Client for use by GoalsService. + * + * Key features: + * - Start GoalRunWorkflow with input validation + * - Send signals (pause, resume, cancel, approve, reject, steer) + * - Query workflow state (progress, checkpoint, current step) + * - Get workflow handle for existing workflows + */ + +import { Injectable, Inject, Logger, Optional } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Client, WorkflowHandle, WorkflowNotFoundError } from '@temporalio/client'; +import { TEMPORAL_CLIENT } from './constants'; + +// Import workflow types from temporal worker package +// These types define the workflow interface +interface GoalRunInput { + goalRunId: string; + tenantId: string; + userId: string; + goalDescription: string; + workspaceId?: string; + constraints?: { + maxSteps?: number; + maxRetries?: number; + maxReplans?: number; + timeoutMs?: number; + requireApprovalForHighRisk?: boolean; + }; + context?: { + previousAttempts?: number; + parentGoalRunId?: string; + inheritedKnowledge?: string[]; + }; +} + +interface GoalRunResult { + goalRunId: string; + status: 'COMPLETED' | 'FAILED' | 'CANCELLED' | 'TIMEOUT'; + completedAt: string; + summary: string; + stepsCompleted: number; + totalDurationMs: number; + finalOutcome?: string; + errorDetails?: { + errorType: string; + errorMessage: string; + failedStep?: number; + recoverable: boolean; + }; + artifacts: Array<{ type: string; path: string; description?: string }>; + knowledgeGained: string[]; +} + +interface GoalProgress { + goalRunId: string; + phase: string; + currentStep: number; + totalSteps: number; + completedSteps: number; + failedSteps: number; + percentComplete: number; + startedAt: string; + lastUpdatedAt: string; + isPaused: boolean; + isAwaitingApproval: boolean; +} + +interface GoalCheckpoint { + goalRunId: string; + version: number; + checkpointedAt: string; + phase: string; + progressSummary: { + totalSteps: number; + completedSteps: number; + failedSteps: number; + percentComplete: number; + }; + completedWork: Array<{ + stepNumber: number; + description: string; + outcome: string; + completedAt: string; + }>; + currentContext: { + lastSuccessfulStep?: string; + currentStep?: string; + failureReason?: string; + accumulatedKnowledge: string[]; + }; + remainingSteps: Array<{ + stepNumber: number; + description: string; + }>; +} + +@Injectable() +export class TemporalWorkflowService { + private readonly logger = new Logger(TemporalWorkflowService.name); + private readonly taskQueue: string; + private readonly workflowIdPrefix = 'goal-run'; + + constructor( + @Optional() @Inject(TEMPORAL_CLIENT) private readonly client: Client | null, + private readonly configService: ConfigService, + ) { + this.taskQueue = this.configService.get('TEMPORAL_TASK_QUEUE', 'bytebot-goal-runs'); + } + + /** + * Check if Temporal is enabled and available + */ + isEnabled(): boolean { + return this.client !== null; + } + + /** + * Get workflow ID for a goal run + */ + getWorkflowId(goalRunId: string): string { + return `${this.workflowIdPrefix}-${goalRunId}`; + } + + /** + * Start a new GoalRunWorkflow + */ + async startGoalRunWorkflow(input: GoalRunInput): Promise<{ + workflowId: string; + runId: string; + }> { + if (!this.client) { + throw new Error('Temporal client not available'); + } + + const workflowId = this.getWorkflowId(input.goalRunId); + + this.logger.log(`Starting workflow ${workflowId} for goal run ${input.goalRunId}`); + + try { + const handle = await this.client.workflow.start('goalRunWorkflow', { + taskQueue: this.taskQueue, + workflowId, + args: [input], + // Workflow-level timeout (1 hour default, can be overridden) + workflowExecutionTimeout: input.constraints?.timeoutMs + ? `${input.constraints.timeoutMs}ms` + : '1h', + }); + + this.logger.log(`Workflow ${workflowId} started with run ID ${handle.workflowId}`); + + return { + workflowId: handle.workflowId, + runId: handle.firstExecutionRunId, + }; + } catch (error) { + this.logger.error(`Failed to start workflow ${workflowId}:`, error); + throw error; + } + } + + /** + * Get handle to an existing workflow + */ + async getWorkflowHandle(goalRunId: string): Promise { + if (!this.client) { + return null; + } + + const workflowId = this.getWorkflowId(goalRunId); + + try { + return this.client.workflow.getHandle(workflowId); + } catch (error) { + if (error instanceof WorkflowNotFoundError) { + return null; + } + throw error; + } + } + + /** + * Send pause signal to workflow + */ + async pauseWorkflow(goalRunId: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.log(`Sending pause signal to workflow for goal run ${goalRunId}`); + await handle.signal('pauseGoal'); + } + + /** + * Send resume signal to workflow + */ + async resumeWorkflow(goalRunId: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.log(`Sending resume signal to workflow for goal run ${goalRunId}`); + await handle.signal('resumeGoal'); + } + + /** + * Send cancel signal to workflow + */ + async cancelWorkflow(goalRunId: string, reason: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.log(`Sending cancel signal to workflow for goal run ${goalRunId}: ${reason}`); + await handle.signal('cancelGoal', { reason }); + } + + /** + * Send approve step signal to workflow + */ + async approveStep(goalRunId: string, stepId: string, approver: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.log(`Sending approve signal for step ${stepId} in goal run ${goalRunId}`); + await handle.signal('approveStep', { + stepId, + approver, + approvedAt: new Date().toISOString(), + }); + } + + /** + * Resume a workflow from an external input request (prompt) using a Temporal Update. + * + * Prefer Updates over Signals for this path because Updates provide synchronous confirmation and + * an UpdateId can be used as an idempotency key (replay-safe under retries). + */ + async resumeFromUserPrompt( + goalRunId: string, + payload: { promptId: string; answers: Record }, + options?: { updateId?: string }, + ): Promise<{ didResume: boolean }> { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + return { didResume: false }; + } + + await handle.executeUpdate('userPromptResolved', { + args: [payload], + updateId: options?.updateId, + }); + + return { didResume: true }; + } + + /** + * Send reject step signal to workflow + */ + async rejectStep(goalRunId: string, stepId: string, reason: string, rejector?: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.log(`Sending reject signal for step ${stepId} in goal run ${goalRunId}: ${reason}`); + await handle.signal('rejectStep', { + stepId, + reason, + rejector, + rejectedAt: new Date().toISOString(), + }); + } + + /** + * Send steering instruction to workflow + */ + async sendSteeringInstruction( + goalRunId: string, + instruction: string, + priority: 'LOW' | 'NORMAL' | 'HIGH' | 'URGENT' = 'NORMAL', + ): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.log(`Sending steering instruction to goal run ${goalRunId}: ${instruction}`); + await handle.signal('steer', { + instruction, + priority, + addToContext: true, + }); + } + + /** + * Query workflow progress + */ + async getProgress(goalRunId: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + return null; + } + + try { + return await handle.query('getProgress'); + } catch (error) { + this.logger.warn(`Failed to query progress for ${goalRunId}:`, error); + return null; + } + } + + /** + * Query workflow checkpoint + */ + async getCheckpoint(goalRunId: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + return null; + } + + try { + return await handle.query('getCheckpoint'); + } catch (error) { + this.logger.warn(`Failed to query checkpoint for ${goalRunId}:`, error); + return null; + } + } + + /** + * Wait for workflow result (blocking) + */ + async waitForResult(goalRunId: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + return null; + } + + try { + return await handle.result(); + } catch (error) { + this.logger.error(`Workflow ${goalRunId} failed:`, error); + throw error; + } + } + + /** + * Check if workflow exists and is running + */ + async isWorkflowRunning(goalRunId: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + return false; + } + + try { + const describe = await handle.describe(); + return describe.status.name === 'RUNNING'; + } catch (error) { + return false; + } + } + + /** + * Terminate workflow (force stop) + */ + async terminateWorkflow(goalRunId: string, reason: string): Promise { + const handle = await this.getWorkflowHandle(goalRunId); + if (!handle) { + throw new Error(`Workflow not found for goal run ${goalRunId}`); + } + + this.logger.warn(`Terminating workflow for goal run ${goalRunId}: ${reason}`); + await handle.terminate(reason); + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/temporal/temporal.module.ts b/packages/bytebot-workflow-orchestrator/src/temporal/temporal.module.ts new file mode 100644 index 000000000..829374959 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/temporal/temporal.module.ts @@ -0,0 +1,155 @@ +/** + * Temporal Client Module for ByteBot Orchestrator + * + * Provides Temporal client integration for starting workflows and sending signals. + * This module is used by GoalsService when TEMPORAL_WORKFLOW_ENABLED=true. + * + * Industry patterns: + * - Connection pooling with singleton client + * - Graceful shutdown on module destroy + * - Health check integration + * - Feature flag based activation + * + * Phase 11.7: Enhanced gRPC connection configuration + * - Added gRPC keepalive settings for ClusterMesh cross-cluster routing + * - Added retry logic for transient connection failures + * - Added connection verification on startup + */ + +import { Module, Global, OnModuleDestroy, OnModuleInit, Logger } from '@nestjs/common'; +import { ConfigService } from '@nestjs/config'; +import { Connection, Client } from '@temporalio/client'; +import { TemporalWorkflowService } from './temporal-workflow.service'; +import { FeatureFlagService } from './feature-flag.service'; +import { TEMPORAL_CLIENT, TEMPORAL_CONNECTION } from './constants'; +import { TemporalCapabilityProbeService } from './temporal-capability-probe.service'; + +/** + * Retry helper for Temporal connection with exponential backoff + * Best practice for cross-cluster connectivity via ClusterMesh + */ +async function connectWithRetry( + address: string, + logger: Logger, + maxRetries: number = 3, + initialDelayMs: number = 1000, +): Promise { + let lastError: Error | null = null; + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + logger.log(`Connecting to Temporal at ${address} (attempt ${attempt}/${maxRetries})...`); + + const connection = await Connection.connect({ + address, + // Phase 11.7: Optimized connection settings for ClusterMesh + // Based on Temporal SDK best practices for cross-cluster gRPC + connectTimeout: 10000, // 10 seconds - fast fail for network issues + }); + + // Verify connection is working by making a simple call + const client = new Client({ connection, namespace: 'default' }); + try { + await client.workflowService.getSystemInfo({}); + logger.log(`Temporal connection verified successfully`); + } catch (verifyError) { + // getSystemInfo may fail if namespace doesn't exist, but connection is still valid + logger.debug(`Connection verification note: ${verifyError}`); + } + + return connection; + } catch (error) { + lastError = error as Error; + logger.warn(`Temporal connection attempt ${attempt} failed: ${error}`); + + if (attempt < maxRetries) { + const delay = initialDelayMs * Math.pow(2, attempt - 1); + logger.log(`Retrying in ${delay}ms...`); + await new Promise((resolve) => setTimeout(resolve, delay)); + } + } + } + + throw lastError || new Error('Failed to connect to Temporal after retries'); +} + +@Global() +@Module({ + providers: [ + TemporalWorkflowService, + FeatureFlagService, + TemporalCapabilityProbeService, + { + provide: TEMPORAL_CONNECTION, + useFactory: async (configService: ConfigService): Promise => { + const enabled = configService.get('TEMPORAL_WORKFLOW_ENABLED', 'false') === 'true'; + + if (!enabled) { + return null; + } + + const address = configService.get( + 'TEMPORAL_ADDRESS', + 'temporal-frontend.temporal.svc.cluster.local:7233' + ); + + const logger = new Logger('TemporalConnection'); + + try { + // Phase 11.7: Use retry logic for ClusterMesh cross-cluster routing + // ClusterMesh may have brief connectivity gaps during endpoint sync + const connection = await connectWithRetry(address, logger, 3, 1000); + logger.log('Temporal connection established successfully'); + return connection; + } catch (error) { + logger.error(`Failed to connect to Temporal after all retries: ${error}`); + logger.warn('Temporal workflows will be disabled - falling back to legacy orchestrator'); + return null; // Don't crash, fall back to legacy + } + }, + inject: [ConfigService], + }, + { + provide: TEMPORAL_CLIENT, + useFactory: async ( + connection: Connection | null, + configService: ConfigService + ): Promise => { + if (!connection) { + return null; + } + + const namespace = configService.get('TEMPORAL_NAMESPACE', 'bytebot'); + + return new Client({ + connection, + namespace, + }); + }, + inject: [TEMPORAL_CONNECTION, ConfigService], + }, + ], + exports: [ + TEMPORAL_CLIENT, + TEMPORAL_CONNECTION, + TemporalWorkflowService, + FeatureFlagService, + TemporalCapabilityProbeService, + ], +}) +export class TemporalModule implements OnModuleDestroy { + private readonly logger = new Logger(TemporalModule.name); + + constructor( + private readonly configService: ConfigService, + ) {} + + async onModuleDestroy(): Promise { + const enabled = this.configService.get('TEMPORAL_WORKFLOW_ENABLED', 'false') === 'true'; + + if (enabled) { + this.logger.log('Closing Temporal connection...'); + // Connection is automatically closed when the module is destroyed + } + } +} diff --git a/packages/bytebot-workflow-orchestrator/src/tracing.ts b/packages/bytebot-workflow-orchestrator/src/tracing.ts new file mode 100644 index 000000000..001b9e0c7 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/src/tracing.ts @@ -0,0 +1,147 @@ +/** + * OpenTelemetry Tracing Configuration + * v1.0.0: Nice-to-Have Enhancement for Distributed Tracing + * + * Configures OpenTelemetry SDK for distributed tracing across the + * ByteBot multi-service architecture. + * + * Features: + * - Auto-instrumentation for HTTP, Express, NestJS + * - Custom spans for business logic + * - Context propagation across service boundaries + * - Trace export to Jaeger/Tempo + * + * IMPORTANT: This file must be imported BEFORE any NestJS modules. + * + * Usage in main.ts: + * ```typescript + * import otelSDK from './tracing'; + * await otelSDK.start(); + * // ... rest of bootstrap + * ``` + * + * @see /docs/CONTEXT_PROPAGATION_FIX_JAN_2026.md + */ + +import { NodeSDK } from '@opentelemetry/sdk-node'; +import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node'; +import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http'; +import { Resource } from '@opentelemetry/resources'; +import { + ATTR_SERVICE_NAME, + ATTR_SERVICE_VERSION, +} from '@opentelemetry/semantic-conventions'; +import { + BatchSpanProcessor, + ParentBasedSampler, + TraceIdRatioBasedSampler, +} from '@opentelemetry/sdk-trace-base'; + +const isProduction = process.env.NODE_ENV === 'production'; +const isTracingEnabled = process.env.OTEL_TRACING_ENABLED !== 'false'; + +// Service configuration +const serviceName = process.env.OTEL_SERVICE_NAME || 'bytebot-workflow-orchestrator'; +const serviceVersion = process.env.npm_package_version || '1.0.0'; + +// Sampling configuration +// In production, sample a fraction of traces to reduce overhead +const samplingRate = isProduction + ? parseFloat(process.env.OTEL_SAMPLING_RATE || '0.1') // 10% default in prod + : 1.0; // 100% in development + +// Export endpoint +const exporterEndpoint = process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + || 'http://localhost:4318/v1/traces'; + +// Create OTLP exporter for Jaeger/Tempo +const traceExporter = new OTLPTraceExporter({ + url: exporterEndpoint, + headers: process.env.OTEL_EXPORTER_OTLP_HEADERS + ? JSON.parse(process.env.OTEL_EXPORTER_OTLP_HEADERS) + : {}, +}); + +// Create the SDK with production-ready configuration +const sdk = new NodeSDK({ + resource: new Resource({ + [ATTR_SERVICE_NAME]: serviceName, + [ATTR_SERVICE_VERSION]: serviceVersion, + 'service.namespace': 'bytebot', + 'deployment.environment': process.env.NODE_ENV || 'development', + }), + + // Parent-based sampling with configurable rate + sampler: new ParentBasedSampler({ + root: new TraceIdRatioBasedSampler(samplingRate), + }), + + // Batch processing for efficient trace export + spanProcessor: new BatchSpanProcessor(traceExporter, { + maxQueueSize: isProduction ? 2048 : 512, + maxExportBatchSize: isProduction ? 512 : 128, + scheduledDelayMillis: isProduction ? 5000 : 1000, + exportTimeoutMillis: 30000, + }), + + instrumentations: [ + getNodeAutoInstrumentations({ + // Disable noisy file system instrumentation + '@opentelemetry/instrumentation-fs': { enabled: false }, + + // Configure HTTP instrumentation + '@opentelemetry/instrumentation-http': { + enabled: true, + ignoreIncomingRequestHook: (req) => { + // Filter out health checks and metrics endpoints + const ignorePaths = ['/health', '/ready', '/metrics', '/favicon.ico']; + return ignorePaths.some((path) => req.url?.includes(path)) || false; + }, + }, + + // NestJS instrumentation + '@opentelemetry/instrumentation-nestjs-core': { + enabled: true, + }, + + // Express instrumentation (underlying HTTP server) + '@opentelemetry/instrumentation-express': { + enabled: true, + }, + }), + ], +}); + +// Graceful shutdown handling +const shutdown = async () => { + console.log('Shutting down OpenTelemetry SDK...'); + try { + await sdk.shutdown(); + console.log('OpenTelemetry SDK shut down successfully'); + } catch (err) { + console.error('Error shutting down OpenTelemetry SDK', err); + } +}; + +process.on('SIGTERM', shutdown); +process.on('SIGINT', shutdown); + +// Export a wrapper that can be conditionally started +export const otelSDK = { + start: async () => { + if (!isTracingEnabled) { + console.log('OpenTelemetry tracing disabled via OTEL_TRACING_ENABLED=false'); + return; + } + + try { + await sdk.start(); + console.log(`OpenTelemetry SDK started (service: ${serviceName}, sampling: ${samplingRate * 100}%)`); + } catch (err) { + console.error('Failed to start OpenTelemetry SDK:', err); + } + }, + shutdown, +}; + +export default otelSDK; diff --git a/packages/bytebot-workflow-orchestrator/test/infrastructure-retry.spec.ts b/packages/bytebot-workflow-orchestrator/test/infrastructure-retry.spec.ts new file mode 100644 index 000000000..a1e70c67f --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/test/infrastructure-retry.spec.ts @@ -0,0 +1,404 @@ +/** + * Golden Run Regression Test: Infrastructure Retry (No Replan) + * + * This test suite verifies the fix for consuming replan budget on + * infrastructure failures (404, timeout, network errors). + * + * Bug: Infrastructure failures like 404 were treated as semantic failures, + * triggering replans and consuming the 3-attempt replan budget. + * This caused goal runs to fail when the actual task logic was correct. + * + * Fix: Infrastructure failure classification and retry + * 1. [INFRA] prefix in error messages signals infrastructure failure + * 2. Infrastructure failures trigger RETRY action (not REPLAN) + * 3. Retry uses exponential backoff (10s, 20s, 40s, 80s, 160s) + * 4. Max 5 retries before escalating to replan + * 5. Replan budget is preserved for actual semantic failures + * + * @see orchestrator-loop.service.ts v1.1.1 + */ + +import { OrchestratorLoopService } from '../src/services/orchestrator-loop.service'; +import { ChecklistItemStatus } from '@prisma/client'; + +// Mock modules +jest.mock('../src/services/prisma.service'); +jest.mock('../src/services/goal-run.service'); +jest.mock('../src/services/planner.service'); +jest.mock('../src/services/workflow.service'); +jest.mock('../src/services/task-dispatch.service'); +jest.mock('@nestjs/event-emitter'); +jest.mock('@nestjs/config'); + +describe('InfrastructureRetry', () => { + let orchestratorLoop: OrchestratorLoopService; + let mockPrisma: any; + let mockGoalRunService: any; + let mockPlannerService: any; + let mockWorkflowService: any; + let mockTaskDispatchService: any; + let mockEventEmitter: any; + let mockConfigService: any; + + beforeEach(() => { + jest.clearAllMocks(); + + mockPrisma = { + goalRun: { + findUnique: jest.fn(), + updateMany: jest.fn(), + }, + checklistItem: { + findUnique: jest.fn(), + update: jest.fn(), + }, + steeringMessage: { + update: jest.fn(), + }, + }; + + mockGoalRunService = { + getPendingSteering: jest.fn().mockResolvedValue(null), + createActivityEvent: jest.fn(), + updatePhase: jest.fn(), + failGoalRun: jest.fn(), + completeGoalRun: jest.fn(), + }; + + mockPlannerService = { + generateReplan: jest.fn(), + }; + + mockWorkflowService = { + ensureWorkspaceProvisioned: jest.fn().mockResolvedValue({ success: true }), + }; + + mockTaskDispatchService = { + getStatusCheckHealth: jest.fn().mockReturnValue({ isHealthy: true, consecutiveFailures: 0 }), + getLastProgressTime: jest.fn().mockReturnValue(new Date()), + }; + + mockEventEmitter = { + emit: jest.fn(), + }; + + mockConfigService = { + get: jest.fn(), + }; + + orchestratorLoop = new OrchestratorLoopService( + mockPrisma, + mockGoalRunService, + mockPlannerService, + mockWorkflowService, + mockTaskDispatchService, + mockEventEmitter, + mockConfigService, + ); + }); + + describe('isInfrastructureFailure detection', () => { + const testCases = [ + // Should be detected as infrastructure failures + { outcome: '[INFRA] Task not found after 70s (404 x14)', expected: true }, + { outcome: 'Error: [INFRA] Agent unreachable', expected: true }, + { outcome: 'INFRA_LOOKUP_FAILED: Task not found', expected: true }, + { outcome: 'Task not found in task controller (404)', expected: true }, + { outcome: '503 Service Unavailable', expected: true }, + { outcome: 'Error: connect ECONNREFUSED 10.0.0.1:9991', expected: true }, + { outcome: 'Error: ETIMEDOUT', expected: true }, + { outcome: 'Error: socket hang up', expected: true }, + { outcome: 'Waiting for capacity', expected: true }, + { outcome: 'Workspace not ready: pending provisioning', expected: true }, + + // Should NOT be detected as infrastructure failures (semantic failures) + { outcome: 'Verification failed: expected "login" but got "error"', expected: false }, + { outcome: 'Task failed: could not complete the required action', expected: false }, + { outcome: 'Authentication failed: invalid credentials', expected: false }, + { outcome: 'Permission denied: cannot access resource', expected: false }, + { outcome: 'Error: element not found on page', expected: false }, + { outcome: 'Assertion failed: prices do not match', expected: false }, + ]; + + testCases.forEach(({ outcome, expected }) => { + it(`should ${expected ? '' : 'NOT '}detect "${outcome.substring(0, 40)}..." as infrastructure failure`, () => { + const item = { actualOutcome: outcome }; + const result = (orchestratorLoop as any).isInfrastructureFailure(item); + expect(result).toBe(expected); + }); + }); + + it('should return false for null/undefined actualOutcome', () => { + expect((orchestratorLoop as any).isInfrastructureFailure({})).toBe(false); + expect((orchestratorLoop as any).isInfrastructureFailure({ actualOutcome: null })).toBe(false); + expect((orchestratorLoop as any).isInfrastructureFailure({ actualOutcome: undefined })).toBe(false); + }); + }); + + describe('makeDecision infrastructure handling', () => { + it('should return RETRY for infrastructure failure (not REPLAN)', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + description: 'Do something', + actualOutcome: '[INFRA] Task not found after 70s (404 x14)', + }, + ], + }], + }; + + // Access private method via any cast + const decision = await (orchestratorLoop as any).makeDecision(goalRun); + + expect(decision.action).toBe('RETRY'); + expect(decision.itemId).toBe('item-1'); + expect(decision.reason).toContain('Infrastructure failure'); + }); + + it('should return REPLAN for semantic failure (not RETRY)', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + description: 'Click login button', + actualOutcome: 'Verification failed: login page not displayed', + }, + ], + }], + }; + + const decision = await (orchestratorLoop as any).makeDecision(goalRun); + + expect(decision.action).toBe('REPLAN'); + expect(decision.itemId).toBe('item-1'); + }); + + it('should track infrastructure retry count per item', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + actualOutcome: '[INFRA] 404', + }, + ], + }], + }; + + // First retry + let decision = await (orchestratorLoop as any).makeDecision(goalRun); + expect(decision.action).toBe('RETRY'); + expect(decision.retryCount).toBe(1); + + // Simulate retry was executed + (orchestratorLoop as any).infraRetryCounts.set('item-1', 1); + (orchestratorLoop as any).infraRetryAfter.set('item-1', Date.now() - 1000); // Backoff expired + + // Second retry + decision = await (orchestratorLoop as any).makeDecision(goalRun); + expect(decision.action).toBe('RETRY'); + expect(decision.retryCount).toBe(2); + }); + + it('should respect exponential backoff between retries', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + actualOutcome: '[INFRA] 404', + }, + ], + }], + }; + + // Set retry count but backoff not expired + (orchestratorLoop as any).infraRetryCounts.set('item-1', 2); + (orchestratorLoop as any).infraRetryAfter.set('item-1', Date.now() + 30000); // 30s in future + + const decision = await (orchestratorLoop as any).makeDecision(goalRun); + + // Should return CONTINUE (wait for backoff) + expect(decision.action).toBe('CONTINUE'); + }); + + it('should escalate to REPLAN after max infrastructure retries', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + actualOutcome: '[INFRA] Still failing after many retries', + }, + ], + }], + }; + + // Set retry count to max (5) + (orchestratorLoop as any).infraRetryCounts.set('item-1', 5); + + const decision = await (orchestratorLoop as any).makeDecision(goalRun); + + // Should escalate to REPLAN + expect(decision.action).toBe('REPLAN'); + }); + }); + + describe('executeInfrastructureRetry', () => { + it('should reset checklist item to PENDING for retry', async () => { + const goalRun = { id: 'goal-1' }; + const itemId = 'item-1'; + + mockPrisma.checklistItem.findUnique.mockResolvedValue({ + id: itemId, + description: 'Test step', + }); + + await (orchestratorLoop as any).executeInfrastructureRetry( + goalRun, + itemId, + 1, + 10000, + '[INFRA] 404', + ); + + // Check item was reset to PENDING + expect(mockPrisma.checklistItem.update).toHaveBeenCalledWith({ + where: { id: itemId }, + data: { + status: ChecklistItemStatus.PENDING, + startedAt: null, + completedAt: null, + actualOutcome: null, + }, + }); + + // Check activity event was created + expect(mockGoalRunService.createActivityEvent).toHaveBeenCalledWith( + goalRun.id, + expect.objectContaining({ + eventType: 'STEP_INFRA_RETRY', + severity: 'warning', + }), + ); + }); + + it('should update retry tracking with exponential backoff', async () => { + const goalRun = { id: 'goal-1' }; + const itemId = 'item-1'; + + mockPrisma.checklistItem.findUnique.mockResolvedValue({ + id: itemId, + description: 'Test step', + }); + + const beforeTime = Date.now(); + await (orchestratorLoop as any).executeInfrastructureRetry( + goalRun, + itemId, + 3, // 3rd retry + 40000, // 40 second delay + '[INFRA] 404', + ); + + // Check retry count was updated + expect((orchestratorLoop as any).infraRetryCounts.get(itemId)).toBe(3); + + // Check retry after was set (current time + delay) + const retryAfter = (orchestratorLoop as any).infraRetryAfter.get(itemId); + expect(retryAfter).toBeGreaterThanOrEqual(beforeTime + 40000); + }); + }); + + describe('Golden Run: Replan Budget Preservation', () => { + /** + * CRITICAL REGRESSION TEST + * + * This test verifies that infrastructure failures do NOT consume + * the replan budget, which is reserved for actual semantic failures. + * + * Before fix: 3 x 404 errors = 3 replans = goal run FAILED + * After fix: 404 errors trigger retries, replan budget preserved + */ + it('should NOT consume replan budget on infrastructure failures', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + actualOutcome: '[INFRA] 404 - Task not found', + }, + ], + }], + }; + + // Simulate 3 infrastructure failures + for (let i = 0; i < 3; i++) { + // Clear backoff for testing + (orchestratorLoop as any).infraRetryAfter.set('item-1', 0); + + const decision = await (orchestratorLoop as any).makeDecision(goalRun); + + // All should be RETRY (not REPLAN) + expect(decision.action).toBe('RETRY'); + + // Update retry count + (orchestratorLoop as any).infraRetryCounts.set('item-1', i + 1); + } + + // Replan count should still be 0 + expect((orchestratorLoop as any).replanCounts.get(goalRun.id) || 0).toBe(0); + }); + + it('should preserve replan budget for semantic failures after infra retries', async () => { + const goalRun = { + id: 'goal-1', + phase: 'EXECUTING', + planVersions: [{ + checklistItems: [ + { + id: 'item-1', + status: ChecklistItemStatus.FAILED, + actualOutcome: 'Verification failed: expected result not found', + }, + ], + }], + }; + + // Simulate previous infrastructure retries on a different item + (orchestratorLoop as any).infraRetryCounts.set('item-0', 5); // Max retries exhausted + + // This is a semantic failure - should REPLAN + const decision = await (orchestratorLoop as any).makeDecision(goalRun); + + expect(decision.action).toBe('REPLAN'); + + // Replan count should be used now + (orchestratorLoop as any).replanCounts.set(goalRun.id, 1); + + // Verify we still have 2 replans left + expect((orchestratorLoop as any).replanCounts.get(goalRun.id)).toBe(1); + }); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/test/jest-e2e.json b/packages/bytebot-workflow-orchestrator/test/jest-e2e.json new file mode 100644 index 000000000..d07185464 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/test/jest-e2e.json @@ -0,0 +1,12 @@ +{ + "moduleFileExtensions": ["js", "json", "ts"], + "rootDir": ".", + "testEnvironment": "node", + "testRegex": ".spec.ts$", + "transform": { + "^.+\\.(t|j)s$": "ts-jest" + }, + "moduleNameMapper": { + "^src/(.*)$": "/../src/$1" + } +} diff --git a/packages/bytebot-workflow-orchestrator/test/task-dispatch-race-condition.spec.ts b/packages/bytebot-workflow-orchestrator/test/task-dispatch-race-condition.spec.ts new file mode 100644 index 000000000..3288ac36c --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/test/task-dispatch-race-condition.spec.ts @@ -0,0 +1,471 @@ +/** + * Golden Run Regression Test: Task Dispatch Race Condition (404 Tolerance) + * + * This test suite verifies the fix for the task completion race condition + * where tasks completing faster than the poll interval caused false failures. + * + * Bug: TaskDesktop CRs were deleted immediately after task completion, + * causing 404 errors when the orchestrator polled for status. + * These 404s were misinterpreted as task failures, triggering + * unnecessary replans and consuming the replan budget. + * + * Fix: Multi-layered defense + * 1. Task-controller: TTL-based delayed CR deletion (60s) + * 2. Orchestrator: 404 tolerance with grace window (60s) + * 3. Orchestrator: Infrastructure retry (not replan) for 404s + * 4. Orchestrator: Fallback lookups (database, checklist) + * + * @see 2025-12-19-task-dispatch-404-investigation.md + */ + +import { TaskDispatchService } from '../src/services/task-dispatch.service'; + +// Mock modules before importing +jest.mock('../src/services/prisma.service'); +jest.mock('@nestjs/config'); +jest.mock('@nestjs/event-emitter'); +jest.mock('axios'); + +describe('TaskDispatch404Tolerance', () => { + let taskDispatchService: TaskDispatchService; + let mockPrisma: any; + let mockConfigService: any; + let mockEventEmitter: any; + let mockAxios: any; + + beforeEach(() => { + jest.clearAllMocks(); + + mockPrisma = { + task: { + findFirst: jest.fn(), + findUnique: jest.fn(), + }, + checklistItem: { + findUnique: jest.fn(), + update: jest.fn(), + }, + goalRun: { + findUnique: jest.fn(), + }, + activityEvent: { + create: jest.fn(), + }, + }; + + mockConfigService = { + get: jest.fn((key: string, defaultValue?: string) => { + const config: Record = { + BYTEBOT_AGENT_API_URL: 'http://bytebot-agent:9991', + TASK_POLL_INTERVAL_MS: '5000', + TASK_DISPATCH_ENABLED: 'true', + TASK_STATUS_NOTFOUND_GRACE_MS: '60000', // 60 seconds + }; + return config[key] || defaultValue; + }), + }; + + mockEventEmitter = { + emit: jest.fn(), + }; + + // Create service instance with mocked dependencies + taskDispatchService = new TaskDispatchService( + mockConfigService, + mockPrisma, + mockEventEmitter, + ); + }); + + describe('404 Grace Window', () => { + /** + * CRITICAL REGRESSION TEST + * + * This test simulates the exact scenario that caused false failures: + * 1. Task is dispatched and starts running + * 2. Task completes successfully + * 3. TaskDesktop CR is deleted before next poll + * 4. Orchestrator polls and gets 404 + * 5. Verify 404 enters grace window (not immediate failure) + */ + it('should NOT immediately fail on 404 - enters grace window instead', async () => { + // Setup: Simulate a dispatched task + const record = { + idempotencyKey: 'goal-1:item-1:1', + taskId: 'task-123', + goalRunId: 'goal-1', + checklistItemId: 'item-1', + status: 'RUNNING' as const, + createdAt: new Date(Date.now() - 10000), + consecutiveCheckFailures: 0, + notFoundCount: 0, + }; + + // Add record to internal tracking + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + // Mock 404 response from agent API + (taskDispatchService as any).httpClient = { + get: jest.fn().mockRejectedValue({ + response: { status: 404 }, + message: 'Not Found', + }), + post: jest.fn(), + }; + + // Mock fallback lookups (task not found anywhere yet) + mockPrisma.task.findFirst.mockResolvedValue(null); + mockPrisma.checklistItem.findUnique.mockResolvedValue({ + id: 'item-1', + status: 'IN_PROGRESS', // Still in progress + }); + mockPrisma.goalRun.findUnique.mockResolvedValue({ id: 'goal-1' }); + + // Run poll cycle + await taskDispatchService.pollTaskCompletions(); + + // Get updated record + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // CRITICAL ASSERTIONS: + // 1. Task should NOT be marked as FAILED + expect(updatedRecord.status).not.toBe('FAILED'); + expect(updatedRecord.status).not.toBe('INFRA_FAILED'); + + // 2. Grace window should be started + expect(updatedRecord.notFoundGraceStartedAt).toBeDefined(); + + // 3. 404 count should be incremented + expect(updatedRecord.notFoundCount).toBe(1); + + // 4. Checklist item should NOT be updated to FAILED + expect(mockPrisma.checklistItem.update).not.toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + status: 'FAILED', + }), + }), + ); + }); + + it('should complete task when found in database fallback during 404 grace', async () => { + // Setup: Task in 404 grace window + const record = { + idempotencyKey: 'goal-1:item-1:1', + taskId: 'task-123', + goalRunId: 'goal-1', + checklistItemId: 'item-1', + status: 'RUNNING' as const, + createdAt: new Date(Date.now() - 30000), + consecutiveCheckFailures: 0, + notFoundCount: 1, + notFoundGraceStartedAt: new Date(Date.now() - 5000), // 5 seconds into grace + }; + + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + // Mock 404 response + (taskDispatchService as any).httpClient = { + get: jest.fn().mockRejectedValue({ + response: { status: 404 }, + }), + post: jest.fn(), + }; + + // Mock database fallback finds completed task! + mockPrisma.task.findFirst.mockResolvedValue({ + id: 'task-123', + status: 'COMPLETED', + result: { message: 'Task completed successfully' }, + }); + mockPrisma.goalRun.findUnique.mockResolvedValue({ id: 'goal-1' }); + + // Run poll cycle + await taskDispatchService.pollTaskCompletions(); + + // Get updated record + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // CRITICAL: Task should be marked COMPLETED (not FAILED) + expect(updatedRecord.status).toBe('COMPLETED'); + + // Checklist item should be updated to COMPLETED + expect(mockPrisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + where: { id: 'item-1' }, + data: expect.objectContaining({ + status: 'COMPLETED', + }), + }), + ); + }); + + it('should mark as INFRA_FAILED after grace window expires (not immediate replan)', async () => { + // Setup: Task with expired grace window + const graceStartTime = new Date(Date.now() - 70000); // 70 seconds ago (past 60s grace) + const record = { + idempotencyKey: 'goal-1:item-1:1', + taskId: 'task-123', + goalRunId: 'goal-1', + checklistItemId: 'item-1', + status: 'RUNNING' as const, + createdAt: new Date(Date.now() - 120000), + consecutiveCheckFailures: 0, + notFoundCount: 13, // Many 404s + notFoundGraceStartedAt: graceStartTime, + }; + + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + // Mock 404 response + (taskDispatchService as any).httpClient = { + get: jest.fn().mockRejectedValue({ + response: { status: 404 }, + }), + post: jest.fn(), + }; + + // Mock fallback lookups (task not found anywhere) + mockPrisma.task.findFirst.mockResolvedValue(null); + mockPrisma.checklistItem.findUnique.mockResolvedValue({ + id: 'item-1', + status: 'IN_PROGRESS', + }); + mockPrisma.goalRun.findUnique.mockResolvedValue({ id: 'goal-1' }); + + // Run poll cycle + await taskDispatchService.pollTaskCompletions(); + + // Get updated record + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // CRITICAL: Should be INFRA_FAILED (not regular FAILED) + expect(updatedRecord.status).toBe('INFRA_FAILED'); + expect(updatedRecord.failureType).toBe('INFRASTRUCTURE'); + + // Error message should have [INFRA] prefix + expect(mockPrisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + status: 'FAILED', + actualOutcome: expect.stringContaining('[INFRA]'), + }), + }), + ); + }); + }); + + describe('Infrastructure Failure Classification', () => { + it('should classify 404 as infrastructure failure (triggers retry, not replan)', async () => { + const record = { + idempotencyKey: 'goal-1:item-1:1', + taskId: 'task-123', + goalRunId: 'goal-1', + checklistItemId: 'item-1', + status: 'RUNNING' as const, + createdAt: new Date(Date.now() - 120000), + consecutiveCheckFailures: 0, + notFoundCount: 10, + notFoundGraceStartedAt: new Date(Date.now() - 70000), // Expired + }; + + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + (taskDispatchService as any).httpClient = { + get: jest.fn().mockRejectedValue({ response: { status: 404 } }), + post: jest.fn(), + }; + + mockPrisma.task.findFirst.mockResolvedValue(null); + mockPrisma.checklistItem.findUnique.mockResolvedValue({ id: 'item-1', status: 'IN_PROGRESS' }); + mockPrisma.goalRun.findUnique.mockResolvedValue({ id: 'goal-1' }); + + await taskDispatchService.pollTaskCompletions(); + + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // Should be marked as INFRASTRUCTURE failure type + expect(updatedRecord.failureType).toBe('INFRASTRUCTURE'); + + // Error should have [INFRA] prefix for orchestrator-loop to detect + expect(mockPrisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + actualOutcome: expect.stringContaining('[INFRA]'), + }), + }), + ); + }); + + it('should classify actual task failure as semantic failure (triggers replan)', async () => { + const record = { + idempotencyKey: 'goal-1:item-1:1', + taskId: 'task-123', + goalRunId: 'goal-1', + checklistItemId: 'item-1', + status: 'RUNNING' as const, + createdAt: new Date(Date.now() - 30000), + consecutiveCheckFailures: 0, + notFoundCount: 0, + lastSuccessfulCheck: new Date(), + }; + + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + // Task is found but has FAILED status (semantic failure) + (taskDispatchService as any).httpClient = { + get: jest.fn().mockResolvedValue({ + data: { + id: 'task-123', + status: 'FAILED', + error: 'Verification failed: expected output not found', + }, + }), + post: jest.fn(), + }; + + mockPrisma.goalRun.findUnique.mockResolvedValue({ id: 'goal-1' }); + + await taskDispatchService.pollTaskCompletions(); + + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // Should be FAILED with SEMANTIC type (not INFRASTRUCTURE) + expect(updatedRecord.status).toBe('FAILED'); + expect(updatedRecord.failureType).toBe('SEMANTIC'); + + // Error should NOT have [INFRA] prefix + expect(mockPrisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + actualOutcome: expect.not.stringContaining('[INFRA]'), + }), + }), + ); + }); + }); + + describe('Recovery from Transient 404', () => { + it('should clear grace window when task reappears', async () => { + // Setup: Task was getting 404s but now reappears + const record = { + idempotencyKey: 'goal-1:item-1:1', + taskId: 'task-123', + goalRunId: 'goal-1', + checklistItemId: 'item-1', + status: 'RUNNING' as const, + createdAt: new Date(Date.now() - 30000), + consecutiveCheckFailures: 0, + notFoundCount: 3, + notFoundGraceStartedAt: new Date(Date.now() - 15000), // Was in grace window + lastSuccessfulCheck: new Date(Date.now() - 20000), + }; + + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + // Task reappears and is still running + (taskDispatchService as any).httpClient = { + get: jest.fn().mockResolvedValue({ + data: { + id: 'task-123', + status: 'RUNNING', + }, + }), + post: jest.fn(), + }; + + await taskDispatchService.pollTaskCompletions(); + + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // Grace window should be cleared + expect(updatedRecord.notFoundGraceStartedAt).toBeUndefined(); + expect(updatedRecord.notFoundCount).toBe(0); + + // Status should still be RUNNING + expect(updatedRecord.status).toBe('RUNNING'); + }); + }); + + describe('Golden Run: Fast Task Completion Race', () => { + /** + * CRITICAL REGRESSION TEST + * + * This test simulates the exact scenario that caused the original bug: + * 1. Task is dispatched (PENDING) + * 2. Task immediately starts (RUNNING) + * 3. Task completes in ~5 seconds (COMPLETED) + * 4. CR is deleted within ~5 seconds + * 5. Orchestrator polls at 5s interval + * 6. If timing is unlucky, orchestrator gets 404 + * + * With the fix: + * - CR stays alive for 60s (TTL) + * - Orchestrator has 60s grace window for 404 + * - Fallback lookup finds COMPLETED in database + * - Task marked as COMPLETED (not FAILED) + * - NO replan triggered + */ + it('should handle fast task completion without false failure (full scenario)', async () => { + // Step 1: Dispatch task + const dispatchResult = await (taskDispatchService as any).dispatchTask({ + goalRunId: 'goal-fast-task', + checklistItemId: 'item-fast-task', + title: 'Fast task', + description: 'This task completes very quickly', + }); + + // Skip if dispatch is disabled in test environment + if (!dispatchResult.success) { + // This is expected if httpClient isn't mocked for POST + return; + } + + const taskId = dispatchResult.taskId; + const record = (taskDispatchService as any).dispatchRecords.values().next().value; + + // Step 2: Simulate task running + record.status = 'RUNNING'; + (taskDispatchService as any).dispatchRecords.set(record.idempotencyKey, record); + + // Step 3: First poll - task completes and CR is deleted (404) + (taskDispatchService as any).httpClient.get = jest.fn().mockRejectedValue({ + response: { status: 404 }, + }); + + // Database fallback finds completed task! + mockPrisma.task.findFirst.mockResolvedValue({ + id: taskId, + status: 'COMPLETED', + result: { success: true }, + }); + mockPrisma.goalRun.findUnique.mockResolvedValue({ id: 'goal-fast-task' }); + + await taskDispatchService.pollTaskCompletions(); + + // Get updated record + const updatedRecord = (taskDispatchService as any).dispatchRecords.get(record.idempotencyKey); + + // CRITICAL ASSERTIONS: + // 1. Task should be COMPLETED (not FAILED) + expect(updatedRecord.status).toBe('COMPLETED'); + + // 2. Failure type should NOT be set + expect(updatedRecord.failureType).toBeUndefined(); + + // 3. Checklist item should be COMPLETED + expect(mockPrisma.checklistItem.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + status: 'COMPLETED', + }), + }), + ); + + // 4. NO [INFRA] error should be recorded + const updateCalls = mockPrisma.checklistItem.update.mock.calls; + const lastCall = updateCalls[updateCalls.length - 1]; + expect(lastCall[0].data.actualOutcome).not.toContain('[INFRA]'); + }); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/test/workspace-provisioning.spec.ts b/packages/bytebot-workflow-orchestrator/test/workspace-provisioning.spec.ts new file mode 100644 index 000000000..a301c70a1 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/test/workspace-provisioning.spec.ts @@ -0,0 +1,422 @@ +/** + * Golden Run Regression Test: Workspace Provisioning + * + * This test suite verifies the fix for the runaway loop bug where + * workspace provisioning failures caused infinite workflow creation. + * + * Bug: When workspace provisioning failed (e.g., capacity timeout), + * the error was thrown before linkWorkflowRun() was called, + * leaving goalRun.workflowRunId as NULL. This caused the + * orchestrator loop to create a NEW workflow on each iteration. + * + * Fix: "Link first, provision second" pattern + * 1. Create workflow record (DB only) + * 2. LINK to goal run immediately + * 3. Attempt provisioning (may fail, that's OK) + * 4. Use WAITING_FOR_CAPACITY status with exponential backoff + * + * @see https://book.kubebuilder.io/reference/good-practices (idempotency) + */ + +import { WorkflowService, WorkspaceProvisioningStatus, WorkflowStatus } from '../src/services/workflow.service'; + +// Mock modules before importing +jest.mock('../src/services/prisma.service'); +jest.mock('../src/services/workspace.service'); + +describe('WorkspaceProvisioning', () => { + let workflowService: WorkflowService; + let mockPrisma: any; + let mockWorkspaceService: any; + let mockEventEmitter: any; + + beforeEach(() => { + // Reset mocks + jest.clearAllMocks(); + + // Create mock services with jest.fn() + mockPrisma = { + $transaction: jest.fn(), + workspace: { + create: jest.fn(), + update: jest.fn(), + findUnique: jest.fn(), + }, + workflowRun: { + create: jest.fn(), + update: jest.fn(), + findUnique: jest.fn(), + }, + workflowNode: { + create: jest.fn(), + updateMany: jest.fn(), + }, + }; + + mockWorkspaceService = { + ensureWorkspaceDesktop: jest.fn(), + hibernateWorkspace: jest.fn(), + }; + + mockEventEmitter = { + emit: jest.fn(), + }; + + // Create service instance with mocked dependencies + workflowService = new WorkflowService( + mockPrisma, + mockWorkspaceService, + mockEventEmitter, + ); + }); + + describe('isCapacityError (detection)', () => { + // Test the capacity error detection patterns + const capacityErrors = [ + 'Timeout waiting for pod to become ready', + 'Pod not ready after 60 seconds', + 'Insufficient cpu to schedule pod', + 'Node unschedulable', + 'No nodes available matching pod affinity', + '0/8 nodes are available: Insufficient memory', + 'Quota exceeded for resource cpu', + 'Too many requests (429)', + 'DeadlineExceeded: context deadline exceeded', + 'Cluster at capacity', + ]; + + const nonCapacityErrors = [ + 'Invalid configuration: missing API key', + 'Authentication failed', + 'Permission denied', + 'Image not found: jbutler1980/bytebot-desktop:v9999', + 'Secret "credentials" not found', + ]; + + capacityErrors.forEach((errorMsg) => { + it(`should detect "${errorMsg.substring(0, 30)}..." as capacity error`, () => { + const isCapacity = (workflowService as any).isCapacityError(errorMsg); + expect(isCapacity).toBe(true); + }); + }); + + nonCapacityErrors.forEach((errorMsg) => { + it(`should NOT detect "${errorMsg.substring(0, 30)}..." as capacity error`, () => { + const isCapacity = (workflowService as any).isCapacityError(errorMsg); + expect(isCapacity).toBe(false); + }); + }); + }); + + describe('createWorkflowRecord (DB-only creation)', () => { + it('should create workflow record without calling ensureWorkspaceDesktop', async () => { + const mockWorkspace = { id: 'ws-test', tenantId: 'tenant-1' }; + const mockWorkflowRun = { id: 'wf-test', createdAt: new Date() }; + + mockPrisma.$transaction.mockImplementation(async (callback: any) => { + return callback({ + workspace: { create: jest.fn().mockResolvedValue(mockWorkspace) }, + workflowRun: { create: jest.fn().mockResolvedValue(mockWorkflowRun) }, + workflowNode: { create: jest.fn().mockResolvedValue({}) }, + }); + }); + + const result = await workflowService.createWorkflowRecord({ + tenantId: 'tenant-1', + nodes: [], + }); + + // Verify workflow record was created + expect(result).toBeDefined(); + expect(result.id).toBeDefined(); + expect(result.workspaceId).toBeDefined(); + expect(result.status).toBe(WorkflowStatus.PENDING); + + // CRITICAL: workspaceService.ensureWorkspaceDesktop should NOT be called + expect(mockWorkspaceService.ensureWorkspaceDesktop).not.toHaveBeenCalled(); + + // Verify record-created event was emitted (not workflow.created) + expect(mockEventEmitter.emit).toHaveBeenCalledWith( + 'workflow.record-created', + expect.objectContaining({ + workflowId: expect.any(String), + workspaceId: expect.any(String), + }), + ); + }); + }); + + describe('ensureWorkspaceProvisioned (idempotent provisioning)', () => { + it('should return success immediately if workspace is already READY (idempotent)', async () => { + const workflowId = 'wf-test'; + const workspaceId = 'ws-test'; + + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: workflowId, + workspace: { + id: workspaceId, + status: WorkspaceProvisioningStatus.READY, + }, + }); + + const result = await workflowService.ensureWorkspaceProvisioned( + workflowId, + 'tenant-1', + ); + + expect(result.success).toBe(true); + expect(result.status).toBe(WorkspaceProvisioningStatus.READY); + + // CRITICAL: workspaceService was NOT called (idempotent skip) + expect(mockWorkspaceService.ensureWorkspaceDesktop).not.toHaveBeenCalled(); + }); + + it('should return FAILED without retrying if status is already FAILED', async () => { + const workflowId = 'wf-test'; + + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: workflowId, + workspace: { + id: 'ws-test', + status: WorkspaceProvisioningStatus.FAILED, + error: 'Previous failure', + }, + }); + + const result = await workflowService.ensureWorkspaceProvisioned( + workflowId, + 'tenant-1', + ); + + expect(result.success).toBe(false); + expect(result.status).toBe(WorkspaceProvisioningStatus.FAILED); + expect(result.error).toBe('Previous failure'); + + // CRITICAL: workspaceService was NOT called + expect(mockWorkspaceService.ensureWorkspaceDesktop).not.toHaveBeenCalled(); + }); + + it('should set WAITING_FOR_CAPACITY on timeout error (not FAILED)', async () => { + const workflowId = 'wf-test'; + const workspaceId = 'ws-test'; + + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: workflowId, + workspace: { + id: workspaceId, + status: WorkspaceProvisioningStatus.PENDING, + provisioningAttemptCount: 0, + }, + }); + + // Simulate timeout error + mockWorkspaceService.ensureWorkspaceDesktop.mockRejectedValue( + new Error('Timeout waiting for pod to become ready'), + ); + + const result = await workflowService.ensureWorkspaceProvisioned( + workflowId, + 'tenant-1', + ); + + expect(result.success).toBe(false); + expect(result.status).toBe(WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY); + expect(result.retryAfterMs).toBeDefined(); + expect(result.retryAfterMs).toBeGreaterThan(0); + + // CRITICAL: Workspace was updated to WAITING_FOR_CAPACITY, not FAILED + expect(mockPrisma.workspace.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + }), + }), + ); + }); + + it('should implement exponential backoff for WAITING_FOR_CAPACITY', async () => { + const workflowId = 'wf-test'; + const workspaceId = 'ws-test'; + const lastAttemptTime = new Date(); + + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: workflowId, + workspace: { + id: workspaceId, + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + provisioningAttemptCount: 2, // 3rd attempt + lastProvisioningAttemptAt: lastAttemptTime, + }, + }); + + const result = await workflowService.ensureWorkspaceProvisioned( + workflowId, + 'tenant-1', + ); + + // Should return waiting status with backoff time + expect(result.success).toBe(false); + expect(result.status).toBe(WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY); + expect(result.retryAfterMs).toBeDefined(); + }); + }); + + describe('Golden Run: Runaway Loop Prevention', () => { + /** + * CRITICAL REGRESSION TEST + * + * This test simulates the exact scenario that caused the runaway loop: + * 1. Create a workflow + * 2. Provisioning fails with timeout + * 3. Verify workflow is created (for linking) + * 4. Verify status is WAITING_FOR_CAPACITY (not FAILED) + * 5. Verify subsequent calls are idempotent (no new workflows created) + */ + it('should NOT create new workflows when provisioning fails (runaway loop fix)', async () => { + const tenantId = 'tenant-test'; + let createCallCount = 0; + let createdWorkflowId: string | null = null; + let createdWorkspaceId: string | null = null; + + // Mock transaction to track workflow creation + mockPrisma.$transaction.mockImplementation(async (callback: any) => { + createCallCount++; + const workflowId = `wf-${createCallCount}`; + const workspaceId = `ws-${createCallCount}`; + + if (!createdWorkflowId) { + createdWorkflowId = workflowId; + createdWorkspaceId = workspaceId; + } + + return callback({ + workspace: { + create: jest.fn().mockResolvedValue({ + id: workspaceId, + tenantId, + status: WorkspaceProvisioningStatus.PENDING, + }), + }, + workflowRun: { + create: jest.fn().mockResolvedValue({ + id: workflowId, + createdAt: new Date(), + }), + }, + workflowNode: { + create: jest.fn().mockResolvedValue({}), + }, + }); + }); + + // First call: Create workflow record + const result1 = await workflowService.createWorkflowRecord({ + tenantId, + nodes: [], + }); + + expect(result1.id).toBeDefined(); + expect(createCallCount).toBe(1); + + // Setup for provisioning failure + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: result1.id, + workspace: { + id: result1.workspaceId, + status: WorkspaceProvisioningStatus.PENDING, + provisioningAttemptCount: 0, + }, + }); + + mockWorkspaceService.ensureWorkspaceDesktop.mockRejectedValue( + new Error('Timeout waiting for pod'), + ); + + // Attempt provisioning (should fail with WAITING_FOR_CAPACITY) + const provisionResult = await workflowService.ensureWorkspaceProvisioned( + result1.id, + tenantId, + ); + + expect(provisionResult.success).toBe(false); + expect(provisionResult.status).toBe(WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY); + + // KEY ASSERTION: No new workflow was created (createCallCount still 1) + expect(createCallCount).toBe(1); + + // Simulate orchestrator loop calling ensureWorkspaceProvisioned again + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: result1.id, + workspace: { + id: result1.workspaceId, + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + provisioningAttemptCount: 1, + lastProvisioningAttemptAt: new Date(), + }, + }); + + const provisionResult2 = await workflowService.ensureWorkspaceProvisioned( + result1.id, + tenantId, + ); + + // Should return backoff time without creating new workflow + expect(provisionResult2.success).toBe(false); + expect(provisionResult2.status).toBe(WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY); + expect(provisionResult2.retryAfterMs).toBeDefined(); + + // CRITICAL ASSERTION: Still only 1 workflow created (no runaway loop) + expect(createCallCount).toBe(1); + }); + + it('should transition to READY when provisioning eventually succeeds', async () => { + const workflowId = 'wf-test'; + const workspaceId = 'ws-test'; + const tenantId = 'tenant-1'; + + // Setup workspace in WAITING_FOR_CAPACITY after backoff period + mockPrisma.workflowRun.findUnique.mockResolvedValue({ + id: workflowId, + workspace: { + id: workspaceId, + status: WorkspaceProvisioningStatus.WAITING_FOR_CAPACITY, + provisioningAttemptCount: 2, + lastProvisioningAttemptAt: new Date(Date.now() - 120000), // 2 minutes ago + }, + }); + + // Provisioning now succeeds + mockWorkspaceService.ensureWorkspaceDesktop.mockResolvedValue({ + status: 'Running', + ready: true, + }); + + const result = await workflowService.ensureWorkspaceProvisioned( + workflowId, + tenantId, + ); + + // Should succeed + expect(result.success).toBe(true); + expect(result.status).toBe(WorkspaceProvisioningStatus.READY); + + // Workspace should be updated to READY + expect(mockPrisma.workspace.update).toHaveBeenCalledWith( + expect.objectContaining({ + data: expect.objectContaining({ + status: WorkspaceProvisioningStatus.READY, + }), + }), + ); + + // workflow.created event should be emitted + expect(mockEventEmitter.emit).toHaveBeenCalledWith( + 'workflow.created', + expect.objectContaining({ + workflowId, + workspaceId, + }), + ); + }); + }); +}); diff --git a/packages/bytebot-workflow-orchestrator/tsconfig.build.json b/packages/bytebot-workflow-orchestrator/tsconfig.build.json new file mode 100644 index 000000000..64f86c6bd --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/tsconfig.build.json @@ -0,0 +1,4 @@ +{ + "extends": "./tsconfig.json", + "exclude": ["node_modules", "test", "dist", "**/*spec.ts"] +} diff --git a/packages/bytebot-workflow-orchestrator/tsconfig.json b/packages/bytebot-workflow-orchestrator/tsconfig.json new file mode 100644 index 000000000..b3ef7f855 --- /dev/null +++ b/packages/bytebot-workflow-orchestrator/tsconfig.json @@ -0,0 +1,28 @@ +{ + "compilerOptions": { + "module": "commonjs", + "declaration": true, + "removeComments": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "target": "ES2022", + "sourceMap": true, + "outDir": "./dist", + "baseUrl": "./", + "incremental": true, + "skipLibCheck": true, + "strictNullChecks": true, + "noImplicitAny": true, + "strictBindCallApply": true, + "forceConsistentCasingInFileNames": true, + "noFallthroughCasesInSwitch": true, + "esModuleInterop": true, + "resolveJsonModule": true, + "paths": { + "@/*": ["src/*"] + } + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/packages/bytebotd/package-lock.json b/packages/bytebotd/package-lock.json index 1f884aa5b..c134552d1 100644 --- a/packages/bytebotd/package-lock.json +++ b/packages/bytebotd/package-lock.json @@ -22,6 +22,7 @@ "class-transformer": "^0.5.1", "class-validator": "^0.14.2", "http-proxy-middleware": "^3.0.5", + "pngjs": "^7.0.0", "reflect-metadata": "^0.2.2", "rxjs": "^7.8.1", "sharp": "^0.34.2", @@ -2766,6 +2767,15 @@ "@jimp/custom": ">=0.3.5" } }, + "node_modules/@jimp/png/node_modules/pngjs": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/pngjs/-/pngjs-6.0.0.tgz", + "integrity": "sha512-TRzzuFRRmEoSW/p1KVAmiOgPco2Irlah+bGFCeNfJXxxYGwSw7YwAOAcd7X28K/m5bjBWKsC29KyoMfHbypayg==", + "license": "MIT", + "engines": { + "node": ">=12.13.0" + } + }, "node_modules/@jimp/tiff": { "version": "0.22.12", "resolved": "https://registry.npmjs.org/@jimp/tiff/-/tiff-0.22.12.tgz", @@ -2933,6 +2943,40 @@ "@napi-rs/nice-win32-x64-msvc": "1.0.1" } }, + "node_modules/@napi-rs/nice-android-arm-eabi": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-android-arm-eabi/-/nice-android-arm-eabi-1.0.1.tgz", + "integrity": "sha512-5qpvOu5IGwDo7MEKVqqyAxF90I6aLj4n07OzpARdgDRfz8UbBztTByBp0RC59r3J1Ij8uzYi6jI7r5Lws7nn6w==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-android-arm64": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-android-arm64/-/nice-android-arm64-1.0.1.tgz", + "integrity": "sha512-GqvXL0P8fZ+mQqG1g0o4AO9hJjQaeYG84FRfZaYjyJtZZZcMjXW5TwkL8Y8UApheJgyE13TQ4YNUssQaTgTyvA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 10" + } + }, "node_modules/@napi-rs/nice-darwin-arm64": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@napi-rs/nice-darwin-arm64/-/nice-darwin-arm64-1.0.1.tgz", @@ -2950,6 +2994,227 @@ "node": ">= 10" } }, + "node_modules/@napi-rs/nice-darwin-x64": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-darwin-x64/-/nice-darwin-x64-1.0.1.tgz", + "integrity": "sha512-jXnMleYSIR/+TAN/p5u+NkCA7yidgswx5ftqzXdD5wgy/hNR92oerTXHc0jrlBisbd7DpzoaGY4cFD7Sm5GlgQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-freebsd-x64": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-freebsd-x64/-/nice-freebsd-x64-1.0.1.tgz", + "integrity": "sha512-j+iJ/ezONXRQsVIB/FJfwjeQXX7A2tf3gEXs4WUGFrJjpe/z2KB7sOv6zpkm08PofF36C9S7wTNuzHZ/Iiccfw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-arm-gnueabihf": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-arm-gnueabihf/-/nice-linux-arm-gnueabihf-1.0.1.tgz", + "integrity": "sha512-G8RgJ8FYXYkkSGQwywAUh84m946UTn6l03/vmEXBYNJxQJcD+I3B3k5jmjFG/OPiU8DfvxutOP8bi+F89MCV7Q==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-arm64-gnu": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-arm64-gnu/-/nice-linux-arm64-gnu-1.0.1.tgz", + "integrity": "sha512-IMDak59/W5JSab1oZvmNbrms3mHqcreaCeClUjwlwDr0m3BoR09ZiN8cKFBzuSlXgRdZ4PNqCYNeGQv7YMTjuA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-arm64-musl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-arm64-musl/-/nice-linux-arm64-musl-1.0.1.tgz", + "integrity": "sha512-wG8fa2VKuWM4CfjOjjRX9YLIbysSVV1S3Kgm2Fnc67ap/soHBeYZa6AGMeR5BJAylYRjnoVOzV19Cmkco3QEPw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-ppc64-gnu": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-ppc64-gnu/-/nice-linux-ppc64-gnu-1.0.1.tgz", + "integrity": "sha512-lxQ9WrBf0IlNTCA9oS2jg/iAjQyTI6JHzABV664LLrLA/SIdD+I1i3Mjf7TsnoUbgopBcCuDztVLfJ0q9ubf6Q==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-riscv64-gnu": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-riscv64-gnu/-/nice-linux-riscv64-gnu-1.0.1.tgz", + "integrity": "sha512-3xs69dO8WSWBb13KBVex+yvxmUeEsdWexxibqskzoKaWx9AIqkMbWmE2npkazJoopPKX2ULKd8Fm9veEn0g4Ig==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-s390x-gnu": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-s390x-gnu/-/nice-linux-s390x-gnu-1.0.1.tgz", + "integrity": "sha512-lMFI3i9rlW7hgToyAzTaEybQYGbQHDrpRkg+1gJWEpH0PLAQoZ8jiY0IzakLfNWnVda1eTYYlxxFYzW8Rqczkg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-x64-gnu": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-x64-gnu/-/nice-linux-x64-gnu-1.0.1.tgz", + "integrity": "sha512-XQAJs7DRN2GpLN6Fb+ZdGFeYZDdGl2Fn3TmFlqEL5JorgWKrQGRUrpGKbgZ25UeZPILuTKJ+OowG2avN8mThBA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-linux-x64-musl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-linux-x64-musl/-/nice-linux-x64-musl-1.0.1.tgz", + "integrity": "sha512-/rodHpRSgiI9o1faq9SZOp/o2QkKQg7T+DK0R5AkbnI/YxvAIEHf2cngjYzLMQSQgUhxym+LFr+UGZx4vK4QdQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-win32-arm64-msvc": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-win32-arm64-msvc/-/nice-win32-arm64-msvc-1.0.1.tgz", + "integrity": "sha512-rEcz9vZymaCB3OqEXoHnp9YViLct8ugF+6uO5McifTedjq4QMQs3DHz35xBEGhH3gJWEsXMUbzazkz5KNM5YUg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-win32-ia32-msvc": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-win32-ia32-msvc/-/nice-win32-ia32-msvc-1.0.1.tgz", + "integrity": "sha512-t7eBAyPUrWL8su3gDxw9xxxqNwZzAqKo0Szv3IjVQd1GpXXVkb6vBBQUuxfIYaXMzZLwlxRQ7uzM2vdUE9ULGw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@napi-rs/nice-win32-x64-msvc": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@napi-rs/nice-win32-x64-msvc/-/nice-win32-x64-msvc-1.0.1.tgz", + "integrity": "sha512-JlF+uDcatt3St2ntBG8H02F1mM45i5SF9W+bIKiReVE6wiy3o16oBP/yxt+RZ+N6LbCImJXJ6bXNO2kn9AXicg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, "node_modules/@nestjs/cli": { "version": "11.0.5", "resolved": "https://registry.npmjs.org/@nestjs/cli/-/cli-11.0.5.tgz", @@ -3803,6 +4068,159 @@ "node": ">=10" } }, + "node_modules/@swc/core-darwin-x64": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.11.13.tgz", + "integrity": "sha512-uSA4UwgsDCIysUPfPS8OrQTH2h9spO7IYFd+1NB6dJlVGUuR6jLKuMBOP1IeLeax4cGHayvkcwSJ3OvxHwgcZQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.11.13.tgz", + "integrity": "sha512-boVtyJzS8g30iQfe8Q46W5QE/cmhKRln/7NMz/5sBP/am2Lce9NL0d05NnFwEWJp1e2AMGHFOdRr3Xg1cDiPKw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.11.13.tgz", + "integrity": "sha512-+IK0jZ84zHUaKtwpV+T+wT0qIUBnK9v2xXD03vARubKF+eUqCsIvcVHXmLpFuap62dClMrhCiwW10X3RbXNlHw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.11.13.tgz", + "integrity": "sha512-+ukuB8RHD5BHPCUjQwuLP98z+VRfu+NkKQVBcLJGgp0/+w7y0IkaxLY/aKmrAS5ofCNEGqKL+AOVyRpX1aw+XA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.11.13.tgz", + "integrity": "sha512-q9H3WI3U3dfJ34tdv60zc8oTuWvSd5fOxytyAO9Pc5M82Hic3jjWaf2xBekUg07ubnMZpyfnv+MlD+EbUI3Llw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.11.13.tgz", + "integrity": "sha512-9aaZnnq2pLdTbAzTSzy/q8dr7Woy3aYIcQISmw1+Q2/xHJg5y80ZzbWSWKYca/hKonDMjIbGR6dp299I5J0aeA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.11.13.tgz", + "integrity": "sha512-n3QZmDewkHANcoHvtwvA6yJbmS4XJf0MBMmwLZoKDZ2dOnC9D/jHiXw7JOohEuzYcpLoL5tgbqmjxa3XNo9Oow==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.11.13.tgz", + "integrity": "sha512-wM+Nt4lc6YSJFthCx3W2dz0EwFNf++j0/2TQ0Js9QLJuIxUQAgukhNDVCDdq8TNcT0zuA399ALYbvj5lfIqG6g==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.11.13.tgz", + "integrity": "sha512-+X5/uW3s1L5gK7wAo0E27YaAoidJDo51dnfKSfU7gF3mlEUuWH8H1bAy5OTt2mU4eXtfsdUMEVXSwhDlLtQkuA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, "node_modules/@swc/counter": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", @@ -10545,12 +10963,12 @@ } }, "node_modules/pngjs": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/pngjs/-/pngjs-6.0.0.tgz", - "integrity": "sha512-TRzzuFRRmEoSW/p1KVAmiOgPco2Irlah+bGFCeNfJXxxYGwSw7YwAOAcd7X28K/m5bjBWKsC29KyoMfHbypayg==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pngjs/-/pngjs-7.0.0.tgz", + "integrity": "sha512-LKWqWJRhstyYo9pGvgor/ivk2w94eSjE3RGVuzLGlr3NmD8bf7RcYGze1mNdEHRP6TRP6rMuDHk5t44hnTRyow==", "license": "MIT", "engines": { - "node": ">=12.13.0" + "node": ">=14.19.0" } }, "node_modules/prelude-ls": { diff --git a/packages/bytebotd/package.json b/packages/bytebotd/package.json index 2be870d79..4dcc1b7ef 100644 --- a/packages/bytebotd/package.json +++ b/packages/bytebotd/package.json @@ -39,6 +39,7 @@ "class-transformer": "^0.5.1", "class-validator": "^0.14.2", "http-proxy-middleware": "^3.0.5", + "pngjs": "^7.0.0", "reflect-metadata": "^0.2.2", "rxjs": "^7.8.1", "sharp": "^0.34.2", diff --git a/packages/bytebotd/root/etc/supervisor/conf.d/supervisord.conf b/packages/bytebotd/root/etc/supervisor/conf.d/supervisord.conf index 6c39ad2ec..a9b0561b2 100644 --- a/packages/bytebotd/root/etc/supervisor/conf.d/supervisord.conf +++ b/packages/bytebotd/root/etc/supervisor/conf.d/supervisord.conf @@ -68,7 +68,10 @@ redirect_stderr=true depends_on=xfce4 [program:websockify] -command=websockify 6080 localhost:5900 +# v2.0.29: Added --heartbeat=30 for WebSocket connection stability +# Sends PING frames every 30 seconds to prevent proxy/firewall timeouts +# Recommended for VNC connections behind Kong/HAProxy (typically timeout at 60s) +command=websockify --heartbeat=30 6080 localhost:5900 autostart=true autorestart=true startsecs=5 diff --git a/packages/bytebotd/src/computer-use/computer-use.controller.ts b/packages/bytebotd/src/computer-use/computer-use.controller.ts index 68c79513f..da70ae82d 100644 --- a/packages/bytebotd/src/computer-use/computer-use.controller.ts +++ b/packages/bytebotd/src/computer-use/computer-use.controller.ts @@ -1,5 +1,6 @@ import { Controller, + Get, Post, Body, Logger, @@ -16,6 +17,28 @@ export class ComputerUseController { constructor(private readonly computerUseService: ComputerUseService) {} + @Get('capabilities') + capabilities() { + return this.computerUseService.getCapabilities(); + } + + @Post('reset-input') + async resetInput() { + try { + this.logger.log('Reset input request'); + return await this.computerUseService.resetInput(); + } catch (error: any) { + this.logger.error( + `Error resetting input: ${error.message}`, + error.stack, + ); + throw new HttpException( + `Failed to reset input: ${error.message}`, + HttpStatus.INTERNAL_SERVER_ERROR, + ); + } + } + @Post() async action( @Body(new ComputerActionValidationPipe()) params: ComputerActionDto, diff --git a/packages/bytebotd/src/computer-use/computer-use.service.spec.ts b/packages/bytebotd/src/computer-use/computer-use.service.spec.ts new file mode 100644 index 000000000..12e37ff37 --- /dev/null +++ b/packages/bytebotd/src/computer-use/computer-use.service.spec.ts @@ -0,0 +1,148 @@ +import { PNG } from 'pngjs'; + +jest.mock('../nut/nut.service', () => ({ + NutService: class NutService {}, +})); + +const { ComputerUseService } = + require('./computer-use.service') as typeof import('./computer-use.service'); + +describe('ComputerUseService (input safety)', () => { + afterEach(() => { + jest.useRealTimers(); + delete process.env.BYTEBOT_INPUT_DEADMAN_MS; + }); + + it('treats non-modifier press_keys down as a tap (sendKeys)', async () => { + const nutService: any = { + holdKeys: jest.fn().mockResolvedValue({ success: true }), + sendKeys: jest.fn().mockResolvedValue({ success: true }), + mouseMoveEvent: jest.fn().mockResolvedValue({ success: true }), + mouseClickEvent: jest.fn().mockResolvedValue({ success: true }), + mouseButtonEvent: jest.fn().mockResolvedValue({ success: true }), + mouseWheelEvent: jest.fn().mockResolvedValue({ success: true }), + typeText: jest.fn().mockResolvedValue(undefined), + pasteText: jest.fn().mockResolvedValue(undefined), + screendump: jest.fn().mockResolvedValue(Buffer.alloc(0)), + getCursorPosition: jest.fn().mockResolvedValue({ x: 0, y: 0 }), + }; + + const service = new ComputerUseService(nutService); + + await service.action({ action: 'press_keys', keys: ['Enter'], press: 'down' } as any); + + expect(nutService.sendKeys).toHaveBeenCalledWith(['Enter'], 75); + expect(nutService.holdKeys).not.toHaveBeenCalledWith(['Enter'], true); + }); + + it('auto-releases modifier holds via deadman', async () => { + jest.useFakeTimers(); + process.env.BYTEBOT_INPUT_DEADMAN_MS = '50'; + + const nutService: any = { + holdKeys: jest.fn().mockResolvedValue({ success: true }), + sendKeys: jest.fn().mockResolvedValue({ success: true }), + mouseMoveEvent: jest.fn().mockResolvedValue({ success: true }), + mouseClickEvent: jest.fn().mockResolvedValue({ success: true }), + mouseButtonEvent: jest.fn().mockResolvedValue({ success: true }), + mouseWheelEvent: jest.fn().mockResolvedValue({ success: true }), + typeText: jest.fn().mockResolvedValue(undefined), + pasteText: jest.fn().mockResolvedValue(undefined), + screendump: jest.fn().mockResolvedValue(Buffer.alloc(0)), + getCursorPosition: jest.fn().mockResolvedValue({ x: 0, y: 0 }), + }; + + const service = new ComputerUseService(nutService); + + await service.action({ action: 'press_keys', keys: ['Shift'], press: 'down' } as any); + + expect(nutService.holdKeys).toHaveBeenCalledWith(['Shift'], true); + + await jest.advanceTimersByTimeAsync(60); + + expect(nutService.holdKeys).toHaveBeenCalledWith(['Shift'], false); + }); + + it('resetInput releases stuck mouse buttons', async () => { + jest.useFakeTimers(); + process.env.BYTEBOT_INPUT_DEADMAN_MS = '1000'; + + const nutService: any = { + holdKeys: jest.fn().mockResolvedValue({ success: true }), + sendKeys: jest.fn().mockResolvedValue({ success: true }), + mouseMoveEvent: jest.fn().mockResolvedValue({ success: true }), + mouseClickEvent: jest.fn().mockResolvedValue({ success: true }), + mouseButtonEvent: jest.fn().mockResolvedValue({ success: true }), + mouseWheelEvent: jest.fn().mockResolvedValue({ success: true }), + typeText: jest.fn().mockResolvedValue(undefined), + pasteText: jest.fn().mockResolvedValue(undefined), + screendump: jest.fn().mockResolvedValue(Buffer.alloc(0)), + getCursorPosition: jest.fn().mockResolvedValue({ x: 0, y: 0 }), + }; + + const service = new ComputerUseService(nutService); + + await service.action({ action: 'press_mouse', button: 'left', press: 'down' } as any); + + const result = await service.resetInput(); + + expect(nutService.mouseButtonEvent).toHaveBeenCalledWith('left', false); + expect(result.releasedButtons).toContain('left'); + }); + + it('includes imageHash in screenshot response when possible', async () => { + const png = new PNG({ width: 32, height: 32 }); + for (let i = 0; i < png.data.length; i += 4) { + png.data[i] = 10; + png.data[i + 1] = 20; + png.data[i + 2] = 30; + png.data[i + 3] = 255; + } + const buf = PNG.sync.write(png); + + const nutService: any = { + holdKeys: jest.fn().mockResolvedValue({ success: true }), + sendKeys: jest.fn().mockResolvedValue({ success: true }), + mouseMoveEvent: jest.fn().mockResolvedValue({ success: true }), + mouseClickEvent: jest.fn().mockResolvedValue({ success: true }), + mouseButtonEvent: jest.fn().mockResolvedValue({ success: true }), + mouseWheelEvent: jest.fn().mockResolvedValue({ success: true }), + typeText: jest.fn().mockResolvedValue(undefined), + pasteText: jest.fn().mockResolvedValue(undefined), + screendump: jest.fn().mockResolvedValue(buf), + getCursorPosition: jest.fn().mockResolvedValue({ x: 0, y: 0 }), + }; + + const service = new ComputerUseService(nutService); + + const res = await service.action({ action: 'screenshot' } as any); + + expect(typeof res.image).toBe('string'); + expect(res.image.length).toBeGreaterThan(0); + expect(res.imageHash).toMatch(/^[0-9a-f]{16}$/); + }); + + it('reports capabilities for handshake', () => { + const nutService: any = { + holdKeys: jest.fn().mockResolvedValue({ success: true }), + sendKeys: jest.fn().mockResolvedValue({ success: true }), + mouseMoveEvent: jest.fn().mockResolvedValue({ success: true }), + mouseClickEvent: jest.fn().mockResolvedValue({ success: true }), + mouseButtonEvent: jest.fn().mockResolvedValue({ success: true }), + mouseWheelEvent: jest.fn().mockResolvedValue({ success: true }), + typeText: jest.fn().mockResolvedValue(undefined), + pasteText: jest.fn().mockResolvedValue(undefined), + screendump: jest.fn().mockResolvedValue(Buffer.alloc(0)), + getCursorPosition: jest.fn().mockResolvedValue({ x: 0, y: 0 }), + }; + + const service = new ComputerUseService(nutService); + const caps = service.getCapabilities(); + + expect(caps.resetInput).toBe(true); + expect(caps.screenshotHash).toBe(true); + expect(caps.inputDeadmanMs).toBeGreaterThan(0); + expect(caps.supportedActions).toContain('press_keys'); + expect(caps.supportedActions).toContain('screenshot'); + }); +}); diff --git a/packages/bytebotd/src/computer-use/computer-use.service.ts b/packages/bytebotd/src/computer-use/computer-use.service.ts index dbf44ac2e..d55eb6b75 100644 --- a/packages/bytebotd/src/computer-use/computer-use.service.ts +++ b/packages/bytebotd/src/computer-use/computer-use.service.ts @@ -3,6 +3,7 @@ import { exec, spawn } from 'child_process'; import { promisify } from 'util'; import * as fs from 'fs/promises'; import * as path from 'path'; +import { PNG } from 'pngjs'; import { NutService } from '../nut/nut.service'; import { ComputerAction, @@ -25,9 +26,52 @@ import { @Injectable() export class ComputerUseService { private readonly logger = new Logger(ComputerUseService.name); + private readonly deadmanMs = parseInt( + process.env.BYTEBOT_INPUT_DEADMAN_MS || '1500', + 10, + ); + + private readonly keyDownSince = new Map(); + private readonly keyDeadmanTimers = new Map(); + private readonly buttonDownSince = new Map<'left' | 'right' | 'middle', number>(); + private readonly buttonDeadmanTimers = new Map< + 'left' | 'right' | 'middle', + NodeJS.Timeout + >(); constructor(private readonly nutService: NutService) {} + getCapabilities(): { + resetInput: true; + screenshotHash: true; + inputDeadmanMs: number; + supportedActions: string[]; + } { + return { + resetInput: true, + screenshotHash: true, + inputDeadmanMs: this.deadmanMs, + supportedActions: [ + 'move_mouse', + 'trace_mouse', + 'click_mouse', + 'press_mouse', + 'drag_mouse', + 'scroll', + 'type_keys', + 'press_keys', + 'type_text', + 'paste_text', + 'wait', + 'screenshot', + 'cursor_position', + 'application', + 'write_file', + 'read_file', + ], + }; + } + async action(params: ComputerAction): Promise { this.logger.log(`Executing computer action: ${params.action}`); @@ -114,51 +158,33 @@ export class ComputerUseService { // Move to the first coordinate await this.nutService.mouseMoveEvent(path[0]); - // Hold keys if provided - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, true); - } - - // Move to each coordinate in the path - for (const coordinates of path) { - await this.nutService.mouseMoveEvent(coordinates); - } - - // Release hold keys - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, false); - } + await this.withHeldModifiers(holdKeys, async () => { + // Move to each coordinate in the path + for (const coordinates of path) { + await this.nutService.mouseMoveEvent(coordinates); + } + }); } private async clickMouse(action: ClickMouseAction): Promise { const { coordinates, button, holdKeys, clickCount } = action; - // Move to coordinates if provided - if (coordinates) { - await this.nutService.mouseMoveEvent(coordinates); - } - - // Hold keys if provided - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, true); - } + await this.withHeldModifiers(holdKeys, async () => { + // Move to coordinates if provided + if (coordinates) { + await this.nutService.mouseMoveEvent(coordinates); + } - // Perform clicks - if (clickCount > 1) { - // Perform multiple clicks - for (let i = 0; i < clickCount; i++) { + // Perform clicks + if (clickCount > 1) { + for (let i = 0; i < clickCount; i++) { + await this.nutService.mouseClickEvent(button); + await this.delay(150); + } + } else { await this.nutService.mouseClickEvent(button); - await this.delay(150); } - } else { - // Perform a single click - await this.nutService.mouseClickEvent(button); - } - - // Release hold keys - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, false); - } + }); } private async pressMouse(action: PressMouseAction): Promise { @@ -172,8 +198,10 @@ export class ComputerUseService { // Perform press if (press === 'down') { await this.nutService.mouseButtonEvent(button, true); + this.markButtonDown(button); } else { await this.nutService.mouseButtonEvent(button, false); + this.markButtonUp(button); } } @@ -183,47 +211,38 @@ export class ComputerUseService { // Move to the first coordinate await this.nutService.mouseMoveEvent(path[0]); - // Hold keys if provided - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, true); - } - - // Perform drag - await this.nutService.mouseButtonEvent(button, true); - for (const coordinates of path) { - await this.nutService.mouseMoveEvent(coordinates); - } - await this.nutService.mouseButtonEvent(button, false); - - // Release hold keys - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, false); - } + await this.withHeldModifiers(holdKeys, async () => { + await this.nutService.mouseButtonEvent(button, true); + this.markButtonDown(button); + try { + for (const coordinates of path) { + await this.nutService.mouseMoveEvent(coordinates); + } + } finally { + try { + await this.nutService.mouseButtonEvent(button, false); + } finally { + this.markButtonUp(button); + } + } + }); } private async scroll(action: ScrollAction): Promise { const { coordinates, direction, scrollCount, holdKeys } = action; - // Move to coordinates if provided - if (coordinates) { - await this.nutService.mouseMoveEvent(coordinates); - } - - // Hold keys if provided - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, true); - } - - // Perform scroll - for (let i = 0; i < scrollCount; i++) { - await this.nutService.mouseWheelEvent(direction, 1); - await new Promise((resolve) => setTimeout(resolve, 150)); - } + await this.withHeldModifiers(holdKeys, async () => { + // Move to coordinates if provided + if (coordinates) { + await this.nutService.mouseMoveEvent(coordinates); + } - // Release hold keys - if (holdKeys) { - await this.nutService.holdKeys(holdKeys, false); - } + // Perform scroll + for (let i = 0; i < scrollCount; i++) { + await this.nutService.mouseWheelEvent(direction, 1); + await new Promise((resolve) => setTimeout(resolve, 150)); + } + }); } private async typeKeys(action: TypeKeysAction): Promise { @@ -233,7 +252,22 @@ export class ComputerUseService { private async pressKeys(action: PressKeysAction): Promise { const { keys, press } = action; - await this.nutService.holdKeys(keys, press === 'down'); + const { modifiers, nonModifiers } = this.splitModifiers(keys); + + // Safety invariant: non-modifier holds are not allowed. Treat as a tap/chord tap. + if (press === 'down' && nonModifiers.length > 0) { + await this.nutService.sendKeys(keys, 75); + return; + } + + if (press === 'down') { + await this.nutService.holdKeys(modifiers, true); + this.markKeysDown(modifiers); + return; + } + + await this.nutService.holdKeys(keys, false); + this.markKeysUp(keys); } private async typeText(action: TypeTextAction): Promise { @@ -250,10 +284,17 @@ export class ComputerUseService { return new Promise((resolve) => setTimeout(resolve, ms)); } - async screenshot(): Promise<{ image: string }> { + async screenshot(): Promise<{ image: string; imageHash?: string }> { this.logger.log(`Taking screenshot`); const buffer = await this.nutService.screendump(); - return { image: `${buffer.toString('base64')}` }; + let imageHash: string | undefined; + try { + imageHash = this.computeAHash(buffer); + } catch (error: any) { + this.logger.warn(`Failed to compute screenshot hash: ${error.message}`); + } + + return { image: `${buffer.toString('base64')}`, ...(imageHash ? { imageHash } : {}) }; } private async cursor_position(): Promise<{ x: number; y: number }> { @@ -261,6 +302,245 @@ export class ComputerUseService { return await this.nutService.getCursorPosition(); } + async resetInput(): Promise<{ + success: true; + releasedKeys: string[]; + releasedButtons: Array<'left' | 'right' | 'middle'>; + }> { + const releasedKeys: string[] = []; + const releasedButtons: Array<'left' | 'right' | 'middle'> = []; + + for (const [key, timer] of this.keyDeadmanTimers.entries()) { + clearTimeout(timer); + this.keyDeadmanTimers.delete(key); + } + for (const [button, timer] of this.buttonDeadmanTimers.entries()) { + clearTimeout(timer); + this.buttonDeadmanTimers.delete(button); + } + + for (const key of this.keyDownSince.keys()) { + try { + await this.nutService.holdKeys([key], false); + releasedKeys.push(key); + } catch (error: any) { + this.logger.warn(`Failed to release key '${key}' during reset: ${error.message}`); + } + } + for (const button of this.buttonDownSince.keys()) { + try { + await this.nutService.mouseButtonEvent(button, false); + releasedButtons.push(button); + } catch (error: any) { + this.logger.warn( + `Failed to release mouse button '${button}' during reset: ${error.message}`, + ); + } + } + + this.keyDownSince.clear(); + this.buttonDownSince.clear(); + + return { success: true, releasedKeys, releasedButtons }; + } + + private splitModifiers(keys: string[]): { modifiers: string[]; nonModifiers: string[] } { + const modifiers: string[] = []; + const nonModifiers: string[] = []; + + for (const key of keys) { + if (this.isModifierKeyName(key)) { + modifiers.push(key); + } else { + nonModifiers.push(key); + } + } + + return { modifiers, nonModifiers }; + } + + private isModifierKeyName(key: string): boolean { + const normalized = key.trim().toLowerCase(); + if (!normalized) return false; + + return ( + normalized.startsWith('shift') || + normalized.startsWith('control') || + normalized.startsWith('ctrl') || + normalized.startsWith('alt') || + normalized.startsWith('meta') || + normalized.startsWith('super') || + normalized.startsWith('cmd') || + normalized.startsWith('command') || + normalized.startsWith('option') + ); + } + + private async withHeldModifiers( + holdKeys: string[] | undefined, + fn: () => Promise, + ): Promise { + const keys = Array.isArray(holdKeys) ? holdKeys : []; + if (keys.length === 0) { + return await fn(); + } + + const { modifiers, nonModifiers } = this.splitModifiers(keys); + if (nonModifiers.length > 0) { + this.logger.warn( + `Ignoring non-modifier hold keys: ${nonModifiers.join(', ')} (only modifiers are allowed)`, + ); + } + + if (modifiers.length === 0) { + return await fn(); + } + + await this.nutService.holdKeys(modifiers, true); + this.markKeysDown(modifiers); + try { + await fn(); + } finally { + try { + await this.nutService.holdKeys(modifiers, false); + } finally { + this.markKeysUp(modifiers); + } + } + } + + private markKeysDown(keys: string[]): void { + const now = Date.now(); + for (const key of keys) { + if (this.keyDownSince.has(key)) continue; + this.keyDownSince.set(key, now); + this.scheduleKeyDeadman(key, now); + } + } + + private markKeysUp(keys: string[]): void { + for (const key of keys) { + this.keyDownSince.delete(key); + const timer = this.keyDeadmanTimers.get(key); + if (timer) { + clearTimeout(timer); + this.keyDeadmanTimers.delete(key); + } + } + } + + private scheduleKeyDeadman(key: string, sinceMs: number): void { + const existing = this.keyDeadmanTimers.get(key); + if (existing) clearTimeout(existing); + + const timer = setTimeout(() => { + void this.deadmanReleaseKey(key, sinceMs); + }, this.deadmanMs); + this.keyDeadmanTimers.set(key, timer); + } + + private async deadmanReleaseKey(key: string, sinceMs: number): Promise { + if (this.keyDownSince.get(key) !== sinceMs) { + return; + } + this.logger.warn(`Deadman auto-releasing key held too long: ${key} (${this.deadmanMs}ms)`); + try { + await this.nutService.holdKeys([key], false); + } catch (error: any) { + this.logger.error(`Deadman failed to release key '${key}': ${error.message}`, error.stack); + } finally { + this.markKeysUp([key]); + } + } + + private markButtonDown(button: 'left' | 'right' | 'middle'): void { + const now = Date.now(); + if (this.buttonDownSince.has(button)) return; + this.buttonDownSince.set(button, now); + this.scheduleButtonDeadman(button, now); + } + + private markButtonUp(button: 'left' | 'right' | 'middle'): void { + this.buttonDownSince.delete(button); + const timer = this.buttonDeadmanTimers.get(button); + if (timer) { + clearTimeout(timer); + this.buttonDeadmanTimers.delete(button); + } + } + + private scheduleButtonDeadman(button: 'left' | 'right' | 'middle', sinceMs: number): void { + const existing = this.buttonDeadmanTimers.get(button); + if (existing) clearTimeout(existing); + + const timer = setTimeout(() => { + void this.deadmanReleaseButton(button, sinceMs); + }, this.deadmanMs); + this.buttonDeadmanTimers.set(button, timer); + } + + private async deadmanReleaseButton( + button: 'left' | 'right' | 'middle', + sinceMs: number, + ): Promise { + if (this.buttonDownSince.get(button) !== sinceMs) { + return; + } + this.logger.warn( + `Deadman auto-releasing mouse button held too long: ${button} (${this.deadmanMs}ms)`, + ); + try { + await this.nutService.mouseButtonEvent(button, false); + } catch (error: any) { + this.logger.error( + `Deadman failed to release mouse button '${button}': ${error.message}`, + error.stack, + ); + } finally { + this.markButtonUp(button); + } + } + + private computeAHash(buffer: Buffer): string { + const width = 8; + const height = 8; + + const png = PNG.sync.read(buffer); + const pixels: number[] = []; + + for (let y = 0; y < height; y++) { + for (let x = 0; x < width; x++) { + const sampleX = Math.min( + png.width - 1, + Math.floor(((x + 0.5) * png.width) / width), + ); + const sampleY = Math.min( + png.height - 1, + Math.floor(((y + 0.5) * png.height) / height), + ); + const idx = (sampleY * png.width + sampleX) * 4; + const r = png.data[idx]; + const g = png.data[idx + 1]; + const b = png.data[idx + 2]; + const lum = Math.round(0.299 * r + 0.587 * g + 0.114 * b); + pixels.push(lum); + } + } + + const avg = pixels.reduce((sum, v) => sum + v, 0) / pixels.length; + let bits = ''; + for (const v of pixels) { + bits += v >= avg ? '1' : '0'; + } + + let hex = ''; + for (let i = 0; i < bits.length; i += 4) { + hex += parseInt(bits.slice(i, i + 4), 2).toString(16); + } + + return hex.padStart(16, '0'); + } + private async application(action: ApplicationAction): Promise { const execAsync = promisify(exec); diff --git a/packages/bytebotd/src/main.ts b/packages/bytebotd/src/main.ts index 3999c8eb4..981a4d0c8 100644 --- a/packages/bytebotd/src/main.ts +++ b/packages/bytebotd/src/main.ts @@ -1,3 +1,11 @@ +/** + * ByteBotD - Desktop Daemon for ByteBot + * + * v2.0.29: WebSocket stability fixes + * - Added server timeout configuration to prevent disconnects + * - Node.js 20 defaults keepAliveTimeout to 5000ms which terminates WebSockets + * - Added proxy timeout configuration for long-lived VNC connections + */ import { NestFactory } from '@nestjs/core'; import { AppModule } from './app.module'; import { createProxyMiddleware } from 'http-proxy-middleware'; @@ -18,15 +26,40 @@ async function bootstrap() { credentials: true, }); + // v2.0.29: WebSocket proxy with timeout configuration for VNC stability const wsProxy = createProxyMiddleware({ target: 'http://localhost:6080', ws: true, changeOrigin: true, pathRewrite: { '^/websockify': '/' }, + // Disable timeouts for long-lived VNC WebSocket connections + proxyTimeout: 0, + timeout: 0, + on: { + proxyReqWs: (proxyReq, req, socket) => { + // Optimize socket for low latency + if (socket.setNoDelay) { + socket.setNoDelay(true); // Disable Nagle's algorithm + } + if (socket.setKeepAlive) { + socket.setKeepAlive(true, 30000); // Keep-alive every 30s + } + }, + }, }); app.use('/websockify', express.raw({ type: '*/*' }), wsProxy); const server = await app.listen(9990); + // v2.0.29: Configure server timeouts for WebSocket stability + // Node.js 18+ defaults keepAliveTimeout to 5000ms (5 seconds) which causes + // VNC WebSocket connections to disconnect after ~7 seconds + // Setting to 0 disables timeouts for long-lived WebSocket connections + server.keepAliveTimeout = 0; // Disable keep-alive timeout (default: 5000ms) + server.headersTimeout = 0; // Disable headers timeout + server.requestTimeout = 0; // Disable request timeout + server.timeout = 0; // Disable socket timeout + console.log('[ByteBotD] Server timeout configuration applied (keepAliveTimeout=0)'); + // Selective upgrade routing server.on('upgrade', (req, socket, head) => { if (req.url?.startsWith('/websockify')) { @@ -34,5 +67,7 @@ async function bootstrap() { } // else let Socket.IO/Nest handle it by not hijacking the socket }); + + console.log('[ByteBotD] Desktop daemon listening on port 9990 (v2.0.29)'); } bootstrap(); diff --git a/packages/shared/src/types/messageContent.types.ts b/packages/shared/src/types/messageContent.types.ts index dade86b60..98864c0f4 100644 --- a/packages/shared/src/types/messageContent.types.ts +++ b/packages/shared/src/types/messageContent.types.ts @@ -127,6 +127,7 @@ export type PressKeysToolUseBlock = ToolUseContentBlock & { input: { keys: string[]; press: Press; + holdMs?: number; }; }; @@ -223,6 +224,17 @@ export type SetTaskStatusToolUseBlock = ToolUseContentBlock & { input: { status: "completed" | "failed" | "needs_help"; description: string; + /** + * Optional structured error code for needs_help. + * When present, downstream systems can deterministically classify whether this requires + * external input (e.g., UI_BLOCKED_SIGNIN) vs internal recovery. + */ + errorCode?: string; + /** + * Optional structured details for needs_help. + * Must not include secrets. + */ + details?: Record; }; };