diff --git a/docs/development/backend/database.mdx b/docs/development/backend/database.mdx
index 37942bf..378c6ea 100644
--- a/docs/development/backend/database.mdx
+++ b/docs/development/backend/database.mdx
@@ -292,9 +292,10 @@ When writing database operations:
The database schema is defined in `src/db/schema.sqlite.ts`. This is the **single source of truth** for all database schema definitions and works across all supported database types.
The schema contains:
-1. Core application tables
-2. Plugin table definitions (populated dynamically)
-3. Proper foreign key relationships and constraints
+1. Core application tables (users, teams, MCP configurations, etc.)
+2. Background job queue tables (`queueJobs` and `queueJobBatches`) - see [Background Job Queue](/development/backend/job-queue)
+3. Plugin table definitions (populated dynamically)
+4. Proper foreign key relationships and constraints
**Important**: Only `schema.sqlite.ts` should be edited for schema changes. All databases use SQLite syntax.
diff --git a/docs/development/backend/index.mdx b/docs/development/backend/index.mdx
index 427d73d..123a370 100644
--- a/docs/development/backend/index.mdx
+++ b/docs/development/backend/index.mdx
@@ -5,7 +5,7 @@ sidebar: Getting Started
---
import { Card, Cards } from 'fumadocs-ui/components/card';
-import { Database, Shield, Plug, Settings, Mail, TestTube, Wrench, BookOpen, Terminal } from 'lucide-react';
+import { Database, Shield, Plug, Settings, Mail, TestTube, Wrench, BookOpen, Terminal, ListTodo } from 'lucide-react';
# DeployStack Backend Development
@@ -104,6 +104,14 @@ The development server starts at `http://localhost:3000` with API documentation
>
API endpoints for satellite registration, configuration management, and command orchestration with polling-based communication.
+
+ }
+ href="/deploystack/development/backend/job-queue"
+ title="Background Job Queue"
+ >
+ Database-backed job processing system with persistent storage, automatic retries, and rate limiting for long-running background tasks.
+
## Project Structure
diff --git a/docs/development/backend/job-queue.mdx b/docs/development/backend/job-queue.mdx
new file mode 100644
index 0000000..0b00d3a
--- /dev/null
+++ b/docs/development/backend/job-queue.mdx
@@ -0,0 +1,533 @@
+---
+title: Background Job Queue
+description: Database-backed job processing system with persistent storage, automatic retries, and rate limiting for long-running background tasks in DeployStack.
+---
+
+import { Callout } from 'fumadocs-ui/components/callout';
+
+# Background Job Queue System
+
+The DeployStack backend includes a custom background job queue system for processing long-running tasks that cannot be completed within a typical HTTP request/response cycle. The system uses database-backed persistence with automatic retries and rate limiting.
+
+## Overview
+
+The job queue system solves the challenge of processing tasks that require:
+
+- **Extended execution time** - Operations taking minutes to hours
+- **External API dependencies** - Calls to third-party services with rate limits
+- **Large-scale batch operations** - Processing hundreds or thousands of items
+- **Failure resilience** - Automatic retry logic for transient errors
+- **Progress tracking** - Monitor task completion status
+
+## Architecture
+
+The system consists of four core components working together:
+
+### JobQueueService
+CRUD operations for managing jobs in the database. Handles job creation, status updates, and retrieval of pending jobs.
+
+### JobProcessorService
+Background worker loop that polls the database every second, processes jobs sequentially, respects rate limits via `scheduled_for` timestamps, and implements exponential backoff for retries.
+
+### Worker Registry
+Plugin-style pattern where each worker implements the `Worker` interface. Workers are registered by type (e.g., `send_email`, `process_csv`, `sync_registry`) and execute specific job types.
+
+### Database Tables
+Two tables provide persistence: `queueJobs` stores individual jobs with payload and status, while `queueJobBatches` tracks groups of related jobs for progress monitoring.
+
+## Core Concepts
+
+### Jobs
+Individual units of work with a specific type, JSON payload, and status tracking. Jobs flow through states: `pending` β `processing` β `completed` or `failed`.
+
+### Workers
+Execution handlers that implement the `Worker` interface. Each worker is responsible for one job type and receives payload data plus context (database, logger).
+
+### Batches
+Logical grouping of related jobs that enables tracking progress across multiple jobs. Useful for operations like sending 1,000 emails or processing a large CSV file.
+
+### Rate Limiting
+Built-in support through the `scheduled_for` field. Jobs scheduled for future execution remain in `pending` state until their scheduled time, preventing API rate limit violations.
+
+## When to Use Job Queue vs Events
+
+
+The job queue system complements rather than replaces the [Global Event Bus](/development/backend/events).
+
+
+**Use Job Queue for:**
+- Long-running operations (>30 seconds)
+- Tasks requiring retry logic
+- Operations that must survive server restarts
+- Batch processing with progress tracking
+- Rate-limited external API calls
+
+**Use Event Bus for:**
+- Fire-and-forget notifications
+- Real-time event distribution
+- Triggering multiple actions from one event
+- Low-latency intra-application messaging
+
+**Combining Both:**
+Event listeners can create jobs for heavy processing:
+
+```typescript
+// Event listener creates job for heavy work
+eventBus.on('user.registered', async (eventData, context) => {
+ await jobQueueService.createJob('send_welcome_email', {
+ userId: eventData.userId,
+ email: eventData.data.email
+ });
+});
+```
+
+## Creating Workers
+
+Workers live in `services/backend/src/workers/` and implement the `Worker` interface from `workers/types.ts`.
+
+### Worker Interface
+
+```typescript
+interface Worker {
+ execute(payload: unknown, jobId: string): Promise;
+}
+
+interface WorkerResult {
+ success: boolean;
+ message?: string;
+ data?: any;
+}
+```
+
+### Basic Worker Pattern
+
+```typescript
+import type { AnyDatabase } from '../db';
+import type { FastifyBaseLogger } from 'fastify';
+import type { Worker, WorkerResult } from './types';
+
+interface EmailPayload {
+ to: string;
+ subject: string;
+ body: string;
+}
+
+export class EmailWorker implements Worker {
+ constructor(
+ private readonly db: AnyDatabase,
+ private readonly logger: FastifyBaseLogger
+ ) {}
+
+ async execute(payload: unknown, jobId: string): Promise {
+ if (!this.isValidPayload(payload)) {
+ return {
+ success: false,
+ message: 'Invalid payload format'
+ };
+ }
+
+ const emailPayload = payload as EmailPayload;
+
+ try {
+ await this.sendEmail(emailPayload);
+
+ this.logger.info({
+ jobId,
+ to: emailPayload.to,
+ operation: 'send_email'
+ }, 'Email sent successfully');
+
+ return {
+ success: true,
+ message: 'Email sent successfully'
+ };
+ } catch (error) {
+ this.logger.error({
+ jobId,
+ error,
+ operation: 'send_email'
+ }, 'Failed to send email');
+
+ throw error; // Triggers retry logic
+ }
+ }
+
+ private isValidPayload(payload: unknown): payload is EmailPayload {
+ if (typeof payload !== 'object' || payload === null) return false;
+ const p = payload as any;
+ return typeof p.to === 'string' &&
+ typeof p.subject === 'string' &&
+ typeof p.body === 'string';
+ }
+
+ private async sendEmail(payload: EmailPayload): Promise {
+ // Implementation
+ }
+}
+```
+
+### Worker Registration
+
+Register workers in `workers/index.ts`:
+
+```typescript
+import { EmailWorker } from './emailWorker';
+
+export function registerWorkers(
+ processor: JobProcessorService,
+ db: AnyDatabase,
+ logger: FastifyBaseLogger
+) {
+ processor.registerWorker('send_email', new EmailWorker(db, logger));
+
+ logger.info('Workers registered successfully');
+}
+```
+
+### Error Handling Strategies
+
+**Retriable Errors** - Throw errors for temporary failures that might succeed on retry:
+
+```typescript
+async execute(payload: unknown): Promise {
+ try {
+ await this.callExternalApi(payload);
+ return { success: true };
+ } catch (error) {
+ throw error; // JobProcessor handles exponential backoff
+ }
+}
+```
+
+**Non-Retriable Errors** - Return failure for permanent errors that won't be fixed by retrying:
+
+```typescript
+async execute(payload: unknown): Promise {
+ if (!this.isValid(payload)) {
+ return {
+ success: false,
+ message: 'Invalid data format - will not retry'
+ };
+ }
+}
+```
+
+### Worker Best Practices
+
+1. **Keep Workers Stateless** - No state between executions
+2. **Inject Dependencies** - Database, logger, and services through constructor
+3. **Validate Payloads** - Always validate before processing
+4. **Use Structured Logging** - Include context objects with operation, jobId, and relevant data
+5. **Single Responsibility** - One worker per job type
+6. **Make Testable** - Design for easy unit testing with mocked dependencies
+
+## Creating Jobs
+
+### From API Routes
+
+```typescript
+server.post('/api/users/:id/send-welcome', async (request, reply) => {
+ const { id } = request.params;
+
+ const job = await jobQueueService.createJob('send_welcome_email', {
+ userId: id,
+ email: user.email
+ });
+
+ return { jobId: job.id, message: 'Email queued' };
+});
+```
+
+### With Rate Limiting
+
+```typescript
+// Schedule jobs 1 second apart to respect API limits
+for (let i = 0; i < users.length; i++) {
+ await jobQueueService.createJob('sync_user_data', {
+ userId: users[i].id
+ }, {
+ scheduledFor: new Date(Date.now() + (i * 1000))
+ });
+}
+```
+
+### Batch Operations
+
+```typescript
+const batch = await jobQueueService.createBatch(
+ 'process_users',
+ userIds.length,
+ { source: 'admin_action', requestedBy: adminId }
+);
+
+for (const userId of userIds) {
+ await jobQueueService.createJob('process_user',
+ { userId },
+ { batchId: batch.id }
+ );
+}
+```
+
+## Server Integration
+
+The job queue integrates with the backend server lifecycle in `server.ts` within the `initializeDatabaseDependentServices` function.
+
+### Initialization
+
+The job queue system is initialized automatically after the database is ready:
+
+```typescript
+// In initializeDatabaseDependentServices function
+try {
+ server.log.debug('π Initializing Job Queue System...');
+ const { JobQueueService } = await import('./services/jobQueueService');
+ const { JobProcessorService } = await import('./services/jobProcessorService');
+ const { registerWorkers } = await import('./workers');
+
+ // Initialize JobQueueService
+ const jobQueueService = new JobQueueService(dbInstance, server.log);
+ server.log.debug('β
JobQueueService initialized');
+
+ // Initialize JobProcessorService (pass db and logger)
+ const jobProcessorService = new JobProcessorService(dbInstance, server.log);
+ server.log.debug('β
JobProcessorService initialized');
+
+ // Register workers
+ registerWorkers(jobProcessorService, dbInstance, server.log);
+ server.log.debug('β
Workers registered');
+
+ // Start processing jobs
+ await jobProcessorService.start();
+ server.log.info('β
Job Queue System started and processing jobs');
+
+ // Decorate server with job services for use in routes
+ if (!server.hasDecorator('jobQueueService')) {
+ server.decorate('jobQueueService', jobQueueService);
+ } else {
+ (server as any).jobQueueService = jobQueueService;
+ }
+
+ if (!server.hasDecorator('jobProcessorService')) {
+ server.decorate('jobProcessorService', jobProcessorService);
+ } else {
+ (server as any).jobProcessorService = jobProcessorService;
+ }
+
+} catch (jobQueueError) {
+ server.log.error({
+ error: jobQueueError,
+ message: jobQueueError instanceof Error ? jobQueueError.message : 'Unknown error',
+ stack: jobQueueError instanceof Error ? jobQueueError.stack : 'No stack trace'
+ }, 'β Job Queue System failed to initialize:');
+ server.log.warn('β οΈ Continuing without Job Queue System due to error');
+}
+```
+
+### Graceful Shutdown
+
+Graceful shutdown is handled in the `onClose` hook to ensure current jobs complete before server shutdown:
+
+```typescript
+server.addHook('onClose', async () => {
+ // Stop job processor first to gracefully finish current jobs
+ if ((server as any).jobProcessorService) {
+ server.log.info('Stopping job processor...');
+ await (server as any).jobProcessorService.stop();
+ server.log.info('Job processor stopped.');
+ }
+
+ await pluginManager.shutdownPlugins();
+ const rawConn = server.rawDbConnection;
+ if (rawConn) {
+ const status = getDbStatus();
+ if (status.dialect === 'sqlite' && 'close' in rawConn) {
+ (rawConn as SqliteDriver.Database).close();
+ server.log.info('SQLite connection closed.');
+ }
+ }
+});
+```
+
+The job processor's `stop()` method will:
+1. Stop accepting new jobs from the queue
+2. Wait for the current job to complete (with 30-second timeout)
+3. Clean up resources
+
+This ensures no jobs are interrupted mid-execution during server shutdown.
+
+## Job Monitoring
+
+### Job Status Lifecycle
+
+Jobs transition through these states:
+
+1. **pending** - Job created, waiting to be processed
+2. **processing** - Currently executing
+3. **completed** - Executed successfully
+4. **failed** - Execution failed after all retries
+
+### Database Queries
+
+Check job status:
+
+```sql
+SELECT id, type, status, created_at, started_at, completed_at, attempts
+FROM queue_jobs
+WHERE id = ?;
+```
+
+Monitor batch progress:
+
+```sql
+SELECT
+ b.id,
+ b.total_jobs,
+ COUNT(CASE WHEN j.status = 'completed' THEN 1 END) as completed,
+ COUNT(CASE WHEN j.status = 'failed' THEN 1 END) as failed,
+ COUNT(CASE WHEN j.status = 'processing' THEN 1 END) as processing
+FROM queue_job_batches b
+LEFT JOIN queue_jobs j ON j.batch_id = b.id
+WHERE b.id = ?
+GROUP BY b.id;
+```
+
+### Common Issues
+
+**Jobs Not Processing:**
+- Verify JobProcessorService is started
+- Check worker is registered for job type
+- Review server logs for errors
+
+**Jobs Stuck in Processing:**
+- Server crashed during job execution
+- Manual intervention: Set status back to `pending`
+- System will retry after exponential backoff
+
+**High Retry Count:**
+- Worker throwing errors unnecessarily
+- External service temporarily unavailable
+- Review worker error handling logic
+
+## Database Schema
+
+
+For the complete database schema, see [schema.sqlite.ts](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.sqlite.ts) in the backend directory.
+
+
+### Jobs Table
+
+```sql
+CREATE TABLE queue_jobs (
+ id TEXT PRIMARY KEY,
+ type TEXT NOT NULL,
+ payload TEXT,
+ status TEXT DEFAULT 'pending',
+ attempts INTEGER DEFAULT 0,
+ max_attempts INTEGER DEFAULT 3,
+ scheduled_for INTEGER,
+ created_at INTEGER DEFAULT (unixepoch()),
+ started_at INTEGER,
+ completed_at INTEGER,
+ last_error TEXT,
+ batch_id TEXT,
+ FOREIGN KEY (batch_id) REFERENCES queue_job_batches(id)
+);
+```
+
+### Batches Table
+
+```sql
+CREATE TABLE queue_job_batches (
+ id TEXT PRIMARY KEY,
+ name TEXT NOT NULL,
+ total_jobs INTEGER NOT NULL,
+ created_at INTEGER DEFAULT (unixepoch()),
+ metadata TEXT
+);
+```
+
+## System Behavior
+
+### Processing Loop
+
+1. Poll database every 1 second for pending jobs
+2. Check if job's `scheduled_for` time has passed
+3. Lock job by setting status to `processing`
+4. Execute worker for job type
+5. Update status based on result
+6. Implement exponential backoff for retries (1s, 2s, 4s, etc.)
+
+### Resource Usage
+
+- **CPU**: \<5% during normal operation (mostly waiting)
+- **Memory**: Minimal footprint, jobs processed sequentially
+- **Database**: Single query per second, additional writes during processing
+- **Non-blocking**: Async processing doesn't block main event loop
+
+### Performance Characteristics
+
+- **Throughput**: 1-60 jobs per minute depending on job duration
+- **Latency**: 1-second maximum delay before job starts
+- **Concurrency**: Sequential processing prevents resource overload
+- **Scalability**: Suitable for small to medium deployments
+
+## Design Decisions
+
+### Why Database-Backed?
+
+No additional infrastructure required (Redis, message queues). Uses existing SQLite/Turso database, and jobs persist across server restarts.
+
+### Why Sequential Processing?
+
+Prevents server resource exhaustion from hundreds of concurrent operations. Simplifies rate limiting for external APIs. Adequate for most use cases (1,000 items = 15-30 minutes).
+
+### Why 1-Second Polling?
+
+Balance between responsiveness and database load. Adequate latency for background tasks. Can be adjusted if needed.
+
+### Limitations
+
+- Not suitable for sub-second latency requirements
+- Single-server deployment (no distributed workers)
+- No built-in job scheduling (cron-like patterns)
+- Sequential processing limits throughput
+
+## Migration Path
+
+If scaling beyond single-server becomes necessary, clear upgrade paths exist:
+
+- **Redis Backend**: Migrate to BullMQ for distributed processing
+- **PostgreSQL**: Switch to pg-boss or Graphile Worker
+- **Cloud Queues**: Move to AWS SQS, Google Cloud Tasks, etc.
+
+Worker interface remains compatible, simplifying migration.
+
+## Related Documentation
+
+- [Database Management](/development/backend/database) - Database configuration and schema
+- [Global Event Bus](/development/backend/events) - Event system for real-time notifications
+- [Logging](/development/backend/logging) - Logging best practices and patterns
+- [API Documentation](/development/backend/api) - REST API endpoints and patterns
+
+## Common Use Cases
+
+### Batch Email Sending
+Send emails to hundreds of users with rate limiting to avoid SMTP throttling.
+
+### CSV File Processing
+Process uploaded files row-by-row, validate data, and store results.
+
+### External API Synchronization
+Fetch data from third-party APIs respecting rate limits (e.g., 1 request per second).
+
+### Database Backups
+Schedule periodic database backups and upload to cloud storage.
+
+### Report Generation
+Generate complex reports from large datasets without blocking API requests.
+
+## Summary
+
+The background job queue system provides a simple, reliable way to process long-running tasks in DeployStack. Built on familiar SQLite/Turso infrastructure, it requires no additional services while providing persistence, retry logic, and rate limiting. Workers follow a straightforward pattern making them easy to implement and test.
+
+For routine operations, the system handles thousands of jobs efficiently. For specialized needs requiring higher throughput or distributed processing, the architecture supports clear migration paths to more advanced solutions.
diff --git a/docs/development/backend/mcp-configuration-architecture.mdx b/docs/development/backend/mcp-configuration-architecture.mdx
index a52e75b..289b973 100644
--- a/docs/development/backend/mcp-configuration-architecture.mdx
+++ b/docs/development/backend/mcp-configuration-architecture.mdx
@@ -8,6 +8,28 @@ sidebar: MCP Configuration Architecture
DeployStack implements a sophisticated three-tier configuration architecture for managing MCP server command line arguments and environment variables. This system supports multi-user teams while maintaining clean separation between fixed template parameters, shared team settings, and individual user configurations.
+## Server Sources
+
+MCP servers in the catalog come from two sources, both using the same three-tier configuration architecture:
+
+### Official Registry Servers (Automatic)
+
+- **Source**: Synced automatically from registry.modelcontextprotocol.io
+- **Schema Creation**: Automatic transformation via `RegistrySyncService` and background jobs
+- **Configuration Mapping**: Environment variables from registry automatically categorized into three tiers
+- **Transport Detection**: Automatically derived from `packages` (stdio) or `remotes` (HTTP/SSE)
+- **Maintenance**: Updates sync automatically from official registry
+
+### Manual Servers (Custom)
+
+- **Source**: Created manually by global administrators
+- **Schema Creation**: Manual categorization through Configuration Schema Step
+- **Configuration Mapping**: Precise admin control over every element
+- **Transport**: Explicitly configured by administrator
+- **Maintenance**: Manual updates as needed
+
+**Key Point**: Both server types use the identical three-tier configuration system at runtime. The difference is in how schemas are initially created.
+
## Architecture Overview
The three-tier system separates MCP server configuration into distinct layers:
@@ -66,18 +88,49 @@ The three-tier system addresses this by:
The catalog defines the configuration structure for each MCP server type:
+**Core Configuration Fields:**
```sql
-- Template Level (with lock controls)
template_args: text('template_args') -- [{value, locked, description}]
template_env: text('template_env') -- Fixed environment variables
+template_headers: text('template_headers') -- Fixed HTTP headers (for remotes)
-- Team Schema (with lock/visibility controls)
team_args_schema: text('team_args_schema') -- Schema with lock controls
team_env_schema: text('team_env_schema') -- [{name, type, required, default_team_locked, visible_to_users}]
+team_headers_schema: text('team_headers_schema') -- HTTP headers schema
-- User Schema
user_args_schema: text('user_args_schema') -- User-configurable argument schema
user_env_schema: text('user_env_schema') -- User-configurable environment schema
+user_headers_schema: text('user_headers_schema') -- User HTTP headers schema
+```
+
+**Transport Configuration:**
+```sql
+transport_type: text('transport_type') -- 'stdio' | 'http' | 'sse'
+packages: text('packages') -- JSON: npm/pip/docker packages (stdio)
+remotes: text('remotes') -- JSON: HTTP/SSE endpoints
+```
+
+**Official Registry Tracking Fields:**
+```sql
+official_name: text('official_name') -- Reverse-DNS name from registry
+synced_from_official_registry: boolean -- True if synced from registry
+official_registry_server_id: text -- Registry's server identifier
+official_registry_version_id: text -- Registry's version identifier
+official_registry_published_at: timestamp -- Original publication date
+official_registry_updated_at: timestamp -- Last update in registry
+```
+
+**GitHub Enhancement Fields:**
+```sql
+repository_source: text -- 'github' | 'gitlab' | 'bitbucket'
+repository_id: text -- Platform-specific repo ID
+repository_subfolder: text -- Monorepo subfolder path
+github_account_id: text -- For avatar URLs
+github_readme_base64: text -- Cached README content
+github_stars: integer -- Star count for social proof
```
### Tier 2: Team Installation (`mcpServerInstallations`)
@@ -139,6 +192,31 @@ const assembleConfiguration = (server, teamInstallation, userConfig) => {
## Service Layer
+### RegistrySyncService
+
+Manages automatic synchronization with the official MCP Registry:
+
+**Key Responsibilities:**
+- Fetches server list from registry.modelcontextprotocol.io
+- Creates job queue batches for progress tracking
+- Schedules individual server sync jobs with rate limiting
+- Coordinates with `McpServerSyncWorker` for transformation
+
+**Sync Process:**
+1. Fetch servers from official registry (with pagination)
+2. Filter out existing servers (if `skipExisting` enabled)
+3. Create job batch for tracking
+4. Create individual jobs with scheduled delays (rate limiting)
+5. Job queue processes sequentially via `McpServerSyncWorker`
+
+**Configuration Options:**
+- `maxServers`: Limit number of servers to sync (for testing)
+- `skipExisting`: Skip servers already in database
+- `forceRefresh`: Force refresh of existing servers
+- `rateLimitDelay`: Seconds between jobs (default: 2)
+
+For complete job queue details, see [Job Queue System](/development/backend/job-queue).
+
### McpUserConfigurationService
The service layer provides complete CRUD operations for user configurations:
@@ -157,6 +235,46 @@ The service layer provides complete CRUD operations for user configurations:
- Schema validation against server-defined schemas
- Input sanitization and type checking
+## Automatic Schema Transformation
+
+When servers are synced from the official MCP Registry, their configurations are automatically transformed to the three-tier system:
+
+### Environment Variable Mapping
+
+**Registry Format β DeployStack Tiers:**
+
+- **Template Level**: Fixed values provided in registry
+- **Team Level**: `isRequired: true` + `isSecret: true` β Encrypted team secrets
+- **Team Level**: `isRequired: true` + `isSecret: false` β Required team settings
+- **User Level**: `isRequired: false` β Optional personal preferences
+
+**Example Transformation:**
+
+Official registry environment variables:
+```json
+[
+ {"name": "API_KEY", "isRequired": true, "isSecret": true},
+ {"name": "DEBUG", "isRequired": false, "default": "false"}
+]
+```
+
+Automatically mapped to:
+- `API_KEY` β `team_env_schema` (encrypted, `default_team_locked: true`, `visible_to_users: false`)
+- `DEBUG` β `user_env_schema` (unlocked, user-configurable)
+
+### Transport Detection
+
+**STDIO Servers (packages):**
+- Command and package name β `template_args` (locked)
+- Runtime arguments β Team/user schemas based on registry metadata
+
+**HTTP/SSE Servers (remotes):**
+- URL β `template_env` or embedded in remotes config (locked)
+- Authentication headers β `team_headers_schema` (secrets)
+- Optional headers β `user_headers_schema` (personal preferences)
+
+The transformation layer (`officialRegistryTransforms.ts`) handles all automatic mapping without admin intervention.
+
## API Endpoints
## API Endpoints
@@ -167,36 +285,121 @@ Configuration management through REST API:
- User configurations: `/api/teams/{teamId}/mcp/installations/{installationId}/user-configs/`
- Schema validation: Built into all endpoints
-## Schema Example
+## Schema Examples
+
+### Manual Server Schema
-Configuration schema with lock/unlock controls:
+Configuration schema created manually by global administrator:
```json
{
+ "name": "Custom Company API",
+ "transport_type": "stdio",
+ "synced_from_official_registry": false,
"template_args": [
{"value": "-y", "locked": true, "description": ""},
- {"value": "@modelcontextprotocol/server-memory", "locked": true, "description": ""}
+ {"value": "@company/api-server", "locked": true, "description": ""}
],
"team_env_schema": [
{
- "name": "MEMORY_FILE_PATH",
- "type": "string",
+ "name": "COMPANY_API_KEY",
+ "type": "secret",
"required": true,
"default_team_locked": true,
- "visible_to_users": false
+ "visible_to_users": false,
+ "description": "Company API authentication key"
}
],
"user_env_schema": [
{
"name": "DEBUG_MODE",
- "type": "string",
+ "type": "boolean",
"required": false,
- "locked": false
+ "default": "false",
+ "description": "Enable debug logging"
}
]
}
```
+### Synced Server Schema
+
+Configuration schema automatically transformed from official registry:
+
+```json
+{
+ "name": "Context7",
+ "official_name": "io.github.upstash/context7",
+ "transport_type": "stdio",
+ "synced_from_official_registry": true,
+ "official_registry_server_id": "srv_abc123",
+ "packages": [
+ {
+ "registryType": "npm",
+ "identifier": "@upstash/context7",
+ "transport": {
+ "type": "stdio",
+ "command": "npx",
+ "args": ["-y", "@upstash/context7"]
+ },
+ "environmentVariables": [
+ {
+ "name": "UPSTASH_REDIS_URL",
+ "isRequired": true,
+ "isSecret": false
+ },
+ {
+ "name": "UPSTASH_REDIS_TOKEN",
+ "isRequired": true,
+ "isSecret": true
+ },
+ {
+ "name": "DEBUG",
+ "isRequired": false,
+ "default": "false"
+ }
+ ]
+ }
+ ],
+ "template_args": [
+ {"value": "-y", "locked": true},
+ {"value": "@upstash/context7", "locked": true}
+ ],
+ "team_env_schema": [
+ {
+ "name": "UPSTASH_REDIS_URL",
+ "type": "string",
+ "required": true,
+ "default_team_locked": true,
+ "visible_to_users": true
+ },
+ {
+ "name": "UPSTASH_REDIS_TOKEN",
+ "type": "secret",
+ "required": true,
+ "default_team_locked": true,
+ "visible_to_users": false
+ }
+ ],
+ "user_env_schema": [
+ {
+ "name": "DEBUG",
+ "type": "boolean",
+ "required": false,
+ "default": "false"
+ }
+ ],
+ "github_stars": 142,
+ "github_account_id": "12345678"
+}
+```
+
+**Key Differences:**
+- Synced servers include `official_name` and registry tracking fields
+- Synced servers have `packages` array with original registry format preserved
+- Schema transformation is automatic based on `isRequired` and `isSecret` properties
+- GitHub metadata automatically populated during sync
+
## Related Documentation
@@ -205,8 +408,10 @@ For specific implementation details:
- [Backend API](/development/backend/api) - Complete API endpoint documentation
- [Database Schema](/development/backend/database) - Database structure and relationships
+- [Job Queue System](/development/backend/job-queue) - Background job processing for registry sync
- [Teams](/teams) - Team management and structure
- [MCP Configuration System](/mcp-configuration) - User-facing configuration guide
- [MCP Installation](/mcp-installation) - Installation and team setup
+- [MCP Catalog](/mcp-catalog) - Official registry integration details
-The three-tier configuration architecture provides a robust foundation for managing complex MCP server configurations in multi-user team environments while maintaining security, flexibility, and ease of use.
+The three-tier configuration architecture provides a robust foundation for managing complex MCP server configurations in multi-user team environments while maintaining security, flexibility, and ease of use. The system seamlessly handles both manually created custom servers and automatically synced official registry servers.
diff --git a/docs/development/satellite/architecture.mdx b/docs/development/satellite/architecture.mdx
index fc07643..9237bb4 100644
--- a/docs/development/satellite/architecture.mdx
+++ b/docs/development/satellite/architecture.mdx
@@ -17,11 +17,11 @@ DeployStack Satellite is an edge worker service that manages MCP servers with du
Satellites operate as edge workers similar to GitHub Actions runners, providing:
- **MCP Transport Protocols**: SSE, Streamable HTTP, Direct HTTP communication
-- **Dual MCP Server Management**: HTTP proxy + stdio subprocess support (planned)
-- **Team Isolation**: Linux namespaces, cgroups v2, resource jailing (planned)
-- **OAuth 2.1 Resource Server**: Token introspection with Backend (planned)
+- **Dual MCP Server Management**: HTTP proxy + stdio subprocess support (ready for implementation)
+- **Team Isolation**: nsjail sandboxing with built-in resource limits (ready for implementation)
+- **OAuth 2.1 Resource Server**: Token introspection with Backend (implemented)
- **Backend Polling Communication**: Outbound-only, firewall-friendly (implemented)
-- **Process Lifecycle Management**: Spawn, monitor, terminate MCP servers (planned)
+- **Process Lifecycle Management**: Spawn, monitor, terminate MCP servers (ready for implementation)
## Current Implementation Architecture
@@ -315,21 +315,24 @@ For complete implementation details, see [Backend Polling Implementation](/devel
- **Scope-Based Access**: Fine-grained permissions
- **Team Context**: Automatic team resolution from tokens
-## MCP Server Management (Planned)
+## MCP Server Management
### Dual MCP Server Support
**stdio Subprocess Servers:**
-- **Local Execution**: MCP servers as child processes
-- **JSON-RPC Communication**: Standard MCP protocol
-- **Process Lifecycle**: Spawn, monitor, terminate
-- **Team Isolation**: Processes isolated per team
+- **Local Execution**: MCP servers as Node.js child processes
+- **JSON-RPC Communication**: Full MCP protocol 2025-11-05 over stdin/stdout
+- **Process Lifecycle**: Spawn, monitor, auto-restart (max 3 attempts), terminate
+- **Team Isolation**: Processes tracked by team_id with environment-based security
+- **Tool Discovery**: Automatic tool caching with namespacing
+- **Resource Limits**: nsjail in production (100MB RAM, 60s CPU, 50 processes)
+- **Development Mode**: Plain spawn() on all platforms for easy debugging
**HTTP Proxy Servers:**
- **External Endpoints**: Proxy to remote MCP servers
- **Load Balancing**: Distribute requests across instances
- **Health Monitoring**: Endpoint availability checks
-- **Caching**: Response caching for performance
+- **Tool Discovery**: Automatic at startup from remote endpoints
### Process Management
@@ -357,11 +360,13 @@ Configuration β Spawn β Monitor β Health Check β Restart/Terminate
- **Session Management**: Cryptographically secure session handling
- **JSON-RPC 2.0**: Full protocol compliance with error handling
-### Phase 2: MCP Server Process Management (Next)
-- **Process Lifecycle**: Spawn, monitor, terminate MCP servers
-- **stdio Communication**: JSON-RPC with local processes
-- **Basic Health Monitoring**: Process health checks
-- **Simple Configuration**: Static MCP server definitions
+### Phase 2: MCP Server Process Management β
COMPLETED
+- **Process Lifecycle**: Spawn, monitor, terminate MCP servers with auto-restart
+- **stdio Communication**: JSON-RPC 2.0 over stdin/stdout with buffer-based parsing
+- **Tool Discovery**: Discover and cache tools from stdio MCP servers
+- **Health Monitoring**: Process health checks and crash detection
+- **Auto-Restart**: Max 3 attempts with exponential backoff, then permanently_failed status
+- **Team-Aware Reporting**: processes_by_team in heartbeat every 30 seconds
### Phase 3: Team Isolation
- **Resource Boundaries**: CPU and memory limits
@@ -469,9 +474,3 @@ The satellite service has completed **Phase 1: MCP Transport Implementation** an
- **Logging System**: Pino with structured logging
- **Build Pipeline**: TypeScript compilation and bundling
- **Development Workflow**: Hot reload and code quality tools
-
-Next milestone: **Phase 2 - MCP Server Process Management** with stdio JSON-RPC communication.
-
-
-**Current Status**: The satellite service has completed Phase 1 (MCP Transport Implementation) and Phase 4 (Backend Integration). It provides full external client interface support and complete backend communication including command orchestration, configuration management, and status reporting. The next major milestone is implementing MCP server process management (Phase 2) to enable actual MCP server hosting.
-
diff --git a/docs/development/satellite/commands.mdx b/docs/development/satellite/commands.mdx
index 29b25b3..5132848 100644
--- a/docs/development/satellite/commands.mdx
+++ b/docs/development/satellite/commands.mdx
@@ -55,10 +55,14 @@ Each satellite command contains:
**Satellite Actions**:
1. Fetch updated MCP server configurations from backend
2. Compare with existing configurations using hash-based change detection
-3. Spawn new MCP server processes for added/modified servers
-4. Terminate MCP server processes for deleted installations
-5. Update HTTP proxy routes for new/removed MCP servers
-6. Perform tool discovery on newly spawned servers
+3. Spawn new MCP servers based on transport type:
+ - **stdio transport**: Spawn Node.js subprocess via ProcessManager
+ - **HTTP transport**: Configure HTTP proxy routes
+4. Terminate MCP servers for deleted installations:
+ - **stdio transport**: Graceful process termination (SIGTERM β SIGKILL)
+ - **HTTP transport**: Remove proxy routes
+5. Perform tool discovery on newly spawned/configured servers
+6. Report process status to backend via team-grouped heartbeat
### restart
diff --git a/docs/development/satellite/index.mdx b/docs/development/satellite/index.mdx
index 3c48e48..290feab 100644
--- a/docs/development/satellite/index.mdx
+++ b/docs/development/satellite/index.mdx
@@ -13,7 +13,7 @@ DeployStack Satellites are **edge workers** (similar to GitHub Actions runners)
## Current Implementation Status
-The satellite service has completed **Phase 1: MCP Transport Implementation** with working external client interfaces:
+:The satellite service has completed
- β
**Fastify HTTP Server** with Swagger API documentation
- β
**Pino Logging System** identical to backend configuration
@@ -22,9 +22,12 @@ The satellite service has completed **Phase 1: MCP Transport Implementation** wi
- β
**JSON-RPC 2.0 Protocol** compliance for MCP communication
- β
**TypeScript + Webpack** build system with full type safety
- β
**Development Workflow** with hot reload and linting
-- π§ **MCP Server Process Management** (planned)
-- π§ **Team Isolation** (planned)
-- π§ **Backend Communication** (planned)
+- β
**Backend Communication** (polling, commands, heartbeat with team-grouped processes)
+- β
**OAuth 2.1 Authentication** (token introspection, team context)
+- β
**stdio MCP Server Process Management** (spawn, monitor, auto-restart, terminate)
+- β
**Team Isolation** (environment-based: nsjail in production, plain spawn in dev)
+- β
**Auto-Restart Protection** (max 3 attempts, permanently_failed status)
+- β
**Tool Discovery** (HTTP and stdio MCP servers)
## Architecture Vision
@@ -217,28 +220,31 @@ curl -X POST http://localhost:3001/mcp \
- **Build System**: TypeScript compilation with Webpack bundling
- **Release Management**: Conventional changelog with release-it
-## Planned Features (Roadmap)
+## Implemented Features
-### Phase 2: MCP Server Process Management
-- **Process Lifecycle**: Spawn, monitor, and terminate MCP server processes
-- **stdio Communication**: JSON-RPC communication with local MCP servers
-- **HTTP Proxy**: Reverse proxy for external MCP server endpoints
-- **Health Monitoring**: Process health checks and automatic restart
+### Phase 2: MCP Server Process Management β
COMPLETED
+- **Process Lifecycle**: Spawn, monitor, auto-restart (max 3), and terminate MCP servers
+- **stdio Communication**: Full JSON-RPC 2.0 protocol over stdin/stdout
+- **HTTP Proxy**: Reverse proxy for external MCP server endpoints β
working
+- **Health Monitoring**: Process crash detection with auto-restart
+- **Resource Limits**: nsjail with 100MB RAM, 60s CPU, 50 processes (production Linux)
+- **Tool Discovery**: Automatic tool caching from both HTTP and stdio servers
+- **Team-Grouped Heartbeat**: processes_by_team reporting every 30 seconds
-### Phase 3: Team Isolation
-- **Resource Boundaries**: CPU and memory limits per team
-- **Process Isolation**: Separate process groups and namespaces
-- **Filesystem Isolation**: Team-specific directories and permissions
-- **Credential Management**: Secure team credential injection
+### Phase 3: Team Isolation (Infrastructure Ready)
+- **nsjail Sandboxing**: Complete process isolation with built-in resource limits
+- **Namespace Isolation**: PID, mount, UTS, IPC namespaces per team
+- **Filesystem Isolation**: Team-specific read-only and writable directories
+- **Credential Management**: Secure environment injection via nsjail
-### Phase 4: Backend Integration
+### Phase 4: Backend Integration β
COMPLETED
- **HTTP Polling**: Outbound communication with DeployStack Backend
- **Configuration Sync**: Dynamic configuration updates from Backend
- **Status Reporting**: Real-time satellite health and usage metrics
- **Command Processing**: Execute Backend commands with acknowledgment
### Phase 5: Enterprise Features
-- **OAuth 2.1 Authentication**: Resource server with token introspection
+- **OAuth 2.1 Authentication**: Resource server with token introspection β
COMPLETED
- **Audit Logging**: Complete audit trails for compliance
- **Multi-Region Support**: Global satellite deployment
- **Auto-Scaling**: Dynamic resource allocation based on demand
@@ -296,8 +302,3 @@ When contributing to satellite development:
5. **Consider Enterprise**: Design features with team isolation and security in mind
6. **MCP Compliance**: Ensure JSON-RPC 2.0 protocol compliance
-## Next Steps
-
-The satellite service has completed Phase 1 (MCP Transport Implementation) and is ready for Phase 2 development. The next major milestone is implementing MCP server process management, which will enable the core satellite functionality of managing MCP servers on behalf of teams.
-
-For detailed implementation guidance, see the architecture and MCP transport documentation linked above.
diff --git a/docs/development/satellite/tool-discovery.mdx b/docs/development/satellite/tool-discovery.mdx
index 686ce93..ee055dd 100644
--- a/docs/development/satellite/tool-discovery.mdx
+++ b/docs/development/satellite/tool-discovery.mdx
@@ -8,10 +8,18 @@ import { Callout } from 'fumadocs-ui/components/callout';
# Tool Discovery Implementation
-DeployStack Satellite implements automatic tool discovery from remote HTTP MCP servers, providing dynamic tool availability without manual configuration. This system enables MCP clients to discover and execute tools from external MCP servers through the satellite's proxy layer.
+DeployStack Satellite implements automatic tool discovery from MCP servers, providing dynamic tool availability without manual configuration. This system enables MCP clients to discover and execute tools through the satellite's unified interface.
+
+
+**Current Implementation**: Tool discovery currently supports HTTP/SSE remote MCP servers only. Future implementation will add stdio tool discovery from locally spawned MCP server processes (see Phase 2 in [Architecture](/development/satellite/architecture)). Both transport types will use the same caching and namespacing approach.
+
For information about the overall satellite architecture, see [Satellite Architecture Design](/development/satellite/architecture). For details about the MCP transport protocols that expose discovered tools, see [MCP Transport Protocols](/development/satellite/mcp-transport).
+## Current Implementation: HTTP Tool Discovery
+
+This document describes the current HTTP-based tool discovery system. The same architectural patterns will be extended to support stdio transport in the future.
+
## Technical Overview
### Discovery Architecture
diff --git a/docs/mcp-admin-schema-workflow.mdx b/docs/mcp-admin-schema-workflow.mdx
index 4b7bbc2..fc0cb46 100644
--- a/docs/mcp-admin-schema-workflow.mdx
+++ b/docs/mcp-admin-schema-workflow.mdx
@@ -6,40 +6,83 @@ sidebar: Admin Schema Workflow
# MCP Schema Creation Workflow for Global Administrators
-Global administrators transform raw MCP server configurations into structured schemas that enable teams and users to safely configure MCP servers. This workflow creates the foundation for DeployStack's three-tier configuration system.
+Global administrators manage MCP server schemas through two distinct paths: automatic synchronization from the official MCP Registry and manual creation for custom integrations. This workflow creates the foundation for DeployStack's three-tier configuration system.
+
+## Two Paths to Catalog Servers
+
+DeployStack supports adding MCP servers to the catalog through two methods:
+
+### Path 1: Official Registry Sync (Automatic)
+
+**Recommended for public MCP servers**
+
+- **Source**: Synced automatically from registry.modelcontextprotocol.io
+- **Schema Creation**: Automatic transformation via background jobs
+- **Configuration Mapping**: Environment variables automatically categorized into three tiers
+- **Maintenance**: Updates sync automatically from registry
+- **Effort**: One-click sync operation
+
+**How it works**: When you trigger a registry sync, the system automatically:
+1. Fetches server data from the official registry
+2. Transforms package/remote configurations to DeployStack format
+3. Maps environment variables to appropriate tiers (template/team/user)
+4. Enriches with GitHub metadata (README, stars, topics)
+5. Stores in catalog ready for team installation
+
+For details on the sync process, see [MCP Catalog - Official Registry Integration](/mcp-catalog#official-mcp-registry-integration).
+
+### Path 2: Manual Creation (Custom)
+
+**Required for custom integrations not in official registry**
+
+- **Source**: Manually created by global administrators
+- **Schema Creation**: Sophisticated manual categorization process (this document)
+- **Configuration Mapping**: Precise control over every configuration element
+- **Maintenance**: Manual updates as needed
+- **Effort**: Complete four-step workflow with detailed categorization
+
+**When to use**: Custom company integrations, private MCP servers, proprietary tools, or servers not yet in the official registry.
+
+---
+
+**The rest of this document describes Path 2: Manual Creation** for custom MCP servers that require the sophisticated schema categorization process.
## Overview
-When you add new MCP servers to the catalog, you design the entire configuration experience by precisely categorizing every configuration element and setting sophisticated lock/unlock controls:
+When you manually add custom MCP servers to the catalog, you design the entire configuration experience by precisely categorizing every configuration element and setting sophisticated lock/unlock controls:
- **What stays locked forever** (template elements like system commands, package names)
- **What teams can configure and control** (team-level settings like API keys, shared credentials)
- **What users can always customize** (user-level settings like local paths, personal preferences)
- **Lock/unlock defaults and visibility controls** for each configurable element
-For an overview of how the three-tier system works, see [MCP Configuration System](/mcp-configuration).
+For an overview of how the three-tier system works, see [MCP Configuration System](/mcp-configuration). To understand how official registry servers are automatically transformed, see [MCP Catalog - Official Registry Integration](/mcp-catalog#official-mcp-registry-integration).
-## The Four-Step Admin Workflow
+## The Four-Step Manual Creation Workflow
-Adding an MCP server to the catalog follows this process:
+Manually adding a custom MCP server to the catalog follows this process:
```
-Step 1: GitHub Repository βββ Link to source repository
+Step 1: GitHub Repository βββ Link to source repository (optional for custom servers)
Step 2: Claude Desktop Config βββ Input raw configuration JSON
Step 3: Configuration Schema βββ **Categorize every element into three tiers with lock/unlock controls**
Step 4: Basic Info βββ Set name, description, category
```
+**Note**: This manual workflow is for custom servers. Official registry servers are synced automatically with schema transformation handled by the system.
+
**Step 3** is the sophisticated categorization process where you transform raw configuration into the three-tier system with precise lock/unlock controls.
-### Step 1: GitHub Repository
+### Step 1: GitHub Repository (Optional)
-- **Repository URL** - GitHub repository containing the MCP server
+- **Repository URL** - GitHub repository containing the MCP server (optional for custom servers)
- **Branch** - Usually `main` or `master`
- **Sync Settings** - Automatic vs manual synchronization
+**Note**: For custom company integrations without public repositories, this step can be skipped.
+
### Step 2: Claude Desktop Configuration
-Input the raw Claude Desktop configuration:
+Input the raw Claude Desktop configuration for your custom MCP server:
```json
{
@@ -58,11 +101,23 @@ Input the raw Claude Desktop configuration:
}
}
-The system extracts all arguments and environment variables for categorization.
+The system extracts all arguments and environment variables for manual categorization.
+
+**For Official Registry Servers**: This extraction and categorization happens automatically during the sync process. The system reads `environmentVariables[]` from the registry format and maps them to appropriate tiers based on `isRequired` and `isSecret` properties.
+
+### Step 3: Configuration Schema Definition (Manual)
+
+For custom servers, you manually categorize every argument and environment variable from the Claude Desktop config into one of three tiers with sophisticated lock/unlock controls - this determines exactly what teams and users can configure.
-### Step 3: Configuration Schema Definition
+**Comparison to Official Registry Servers**:
-Categorize every argument and environment variable from the Claude Desktop config into one of three tiers with sophisticated lock/unlock controls - this determines exactly what teams and users can configure.
+| Aspect | Manual Creation (Custom) | Official Registry (Synced) |
+|--------|-------------------------|---------------------------|
+| Schema Creation | Manual categorization by admin | Automatic transformation |
+| Environment Variables | Admin decides tier placement | Auto-mapped by `isRequired`/`isSecret` |
+| Arguments | Admin categorizes each arg | Auto-extracted from packages/remotes |
+| Lock/Unlock Defaults | Admin sets defaults | Intelligent defaults based on registry |
+| Effort | 15-30 minutes per server | Instant (background job) |
### Step 4: Basic Information
@@ -70,11 +125,80 @@ Categorize every argument and environment variable from the Claude Desktop confi
- **Description** - Clear explanation of functionality
- **Category** - Organizational classification
-## Step 3: Configuration Schema Definition (Detailed)
+## Automatic Registry Server Transformation
-### The Sophisticated Categorization Process
+When servers are synced from the official MCP Registry, the transformation happens automatically:
-For every argument and environment variable extracted from the Claude Desktop config, you make precise categorization decisions with lock/unlock controls:
+### Automatic Environment Variable Mapping
+
+**Registry Format:**
+```json
+{
+ "environmentVariables": [
+ {
+ "name": "UPSTASH_REDIS_URL",
+ "isRequired": true,
+ "isSecret": false
+ },
+ {
+ "name": "UPSTASH_REDIS_TOKEN",
+ "isRequired": true,
+ "isSecret": true
+ },
+ {
+ "name": "DEBUG",
+ "isRequired": false,
+ "default": "false"
+ }
+ ]
+}
+```
+
+**Automatic Tier Mapping:**
+- **Template Level**: Fixed values (if provided in registry)
+- **Team Level**: `isRequired: true` + `isSecret: true` β Encrypted team secrets
+- **Team Level**: `isRequired: true` + `isSecret: false` β Required team settings
+- **User Level**: `isRequired: false` β Optional personal preferences
+
+**Result**: The synced server works immediately with the three-tier system without any manual schema work.
+
+### Automatic Transport Detection
+
+**STDIO Servers (packages):**
+```json
+{
+ "packages": [{
+ "transport": {
+ "type": "stdio",
+ "command": "npx",
+ "args": ["-y", "@upstash/context7"]
+ }
+ }]
+}
+```
+- Command and package name β Template level (locked)
+- Runtime args β Team/user level based on registry metadata
+
+**HTTP/SSE Servers (remotes):**
+```json
+{
+ "remotes": [{
+ "type": "sse",
+ "url": "https://api.example.com/mcp",
+ "headers": [...]
+ }]
+}
+```
+- URL β Template level (locked)
+- Headers β Team/user level based on authentication requirements
+
+For complete details on automatic transformation, see the [Official Registry Integration documentation](/mcp-catalog#official-mcp-registry-integration).
+
+## Step 3: Configuration Schema Definition for Manual Servers (Detailed)
+
+### The Sophisticated Categorization Process (Manual Servers Only)
+
+For custom MCP servers created manually, you make precise categorization decisions with lock/unlock controls for every argument and environment variable extracted from the Claude Desktop config:
**Categorization Options:**
- **π Template (Static)** - Locked forever, never changes (system commands, package names)
@@ -88,6 +212,8 @@ For every argument and environment variable extracted from the Claude Desktop co
This sophisticated system determines the exact configuration experience for teams and users.
+**Note**: Official registry servers receive automatic categorization based on their registry metadata, eliminating this manual process.
+
### Example: Web Search MCP Server
**Raw Configuration:**
@@ -110,11 +236,16 @@ This sophisticated system determines the exact configuration experience for team
}
```
-**Your Categorization:**
+**Your Manual Categorization:**
- **π Template**: `-y` and `@brightdata/mcp-server-web-search` (system commands, locked forever)
- **π§ Team Configurable**: `API_KEY` and `SEARCH_QUOTA` (shared credentials and limits)
- **π User Configurable**: `DEFAULT_ENGINE` (personal search preference, default unlocked)
+**If This Were an Official Registry Server**: The system would automatically categorize based on registry metadata:
+- `API_KEY` with `isRequired: true, isSecret: true` β Team level (encrypted)
+- `SEARCH_QUOTA` with `isRequired: true` β Team level
+- `DEFAULT_ENGINE` with `isRequired: false` β User level
+
**Lock/Unlock Decisions:**
- API credentials: Default locked for users, hidden from users (security)
- Search quota: Default locked for users, visible to users (team resource management)
@@ -141,11 +272,15 @@ This sophisticated system determines the exact configuration experience for team
}
```
-**Your Categorization:**
+**Your Manual Categorization:**
- **π Template**: System commands (`npx`, `-y`, package name) - locked forever
- **π§ Team Configurable**: `TEAM_API_KEY` (shared credential, team controls access)
- **π User Configurable**: `DEBUG` (personal preference, always available to users)
+**If From Official Registry**: Would be automatically mapped:
+- `TEAM_API_KEY` with `isRequired: true, isSecret: true` β Team level (encrypted, hidden)
+- `DEBUG` with `isRequired: false` β User level (unlocked)
+
**Lock/Unlock Decisions:**
- `TEAM_API_KEY`: Default locked for users, hidden from users (security)
- `DEBUG`: Default unlocked for users, visible to users (personal preference)
@@ -172,9 +307,9 @@ DeployStack provides suggestions to help with categorization:
- Cache and performance settings
- Personal API preferences
-### Configuration Schema Step Interface
+### Configuration Schema Step Interface (Manual Creation)
-The Configuration Schema Step presents a sophisticated interface for categorizing and controlling every configuration element:
+The Configuration Schema Step presents a sophisticated interface for manually categorizing and controlling every configuration element:
#### Arguments Categorization Interface
```
@@ -244,18 +379,20 @@ This matrix shows how your schema categorization creates precise boundaries for
## What Happens Next
-After you complete the sophisticated schema categorization:
+After you complete the sophisticated schema categorization for manual servers:
1. **Schema Generation** - System creates the complete three-tier schema structure with lock/unlock metadata
2. **Catalog Addition** - MCP server is added to the global catalog with precise configuration boundaries
3. **Team Access** - Teams can install and configure only the elements you designated as team-configurable
4. **User Experience** - Users see only the elements you made available, with lock/unlock states controlled by teams
+**For Official Registry Servers**: This process happens automatically during sync, with schemas generated from registry metadata.
+
Your categorization and lock/unlock decisions directly shape how teams and users interact with the MCP server across the entire three-tier system.
## Security Validation
-The system automatically validates your sophisticated schema categorization:
+The system automatically validates schema categorization for both manual and synced servers:
**Security Checks:**
- β
Secrets properly categorized as team/user level with appropriate visibility controls
@@ -263,6 +400,8 @@ The system automatically validates your sophisticated schema categorization:
- β οΈ Warns if secrets might be visible inappropriately or unlocked by default
- β
Validates lock inheritance logic across all three tiers
+**For Official Registry Servers**: Security validation happens automatically during the transformation process, ensuring registry metadata is properly interpreted.
+
**Schema Validation:**
- β
All extracted elements are properly categorized
- β
Required fields have appropriate defaults and validation rules
@@ -277,21 +416,23 @@ The system automatically validates your sophisticated schema categorization:
## Key Benefits
-Your sophisticated schema categorization provides:
+The schema system (whether manual or automatic) provides:
**Security** - Sensitive data properly protected at the right tier with appropriate visibility controls
**Precision** - Users see only what they can configure, teams control exactly what they need
**Flexibility** - Teams can lock/unlock elements based on their specific organizational needs
**Consistency** - Predictable configuration experience across all MCP servers in the catalog
**Governance** - Complete audit trail and control over configuration inheritance across all tiers
+**Automation** - Official registry servers get instant schema transformation without manual work
## Related Documentation
-For understanding how your sophisticated schemas are used across the three-tier system:
+For understanding how schemas work across the system:
-- [MCP Configuration System](/mcp-configuration) - Overview of the three-tier system your schemas enable
-- [Team Installation](/mcp-team-installation) - How teams use your schemas to configure installations
-- [User Configuration](/mcp-user-configuration) - How users interact with the boundaries you define
-- [MCP Catalog](/mcp-catalog) - Where your categorized schemas are stored and managed
+- [MCP Configuration System](/mcp-configuration) - Overview of the three-tier system schemas enable
+- [MCP Catalog - Official Registry Integration](/mcp-catalog#official-mcp-registry-integration) - How automatic schema transformation works
+- [Team Installation](/mcp-team-installation) - How teams use schemas to configure installations
+- [User Configuration](/mcp-user-configuration) - How users interact with schema boundaries
+- [MCP Catalog](/mcp-catalog) - Where schemas are stored and managed
-The sophisticated schema creation workflow is the foundation that enables secure, flexible MCP server configuration with precise control over configuration inheritance across all teams and users.
+The schema creation systemβwhether through sophisticated manual categorization or automatic registry transformationβis the foundation that enables secure, flexible MCP server configuration with precise control over configuration inheritance across all teams and users.
diff --git a/docs/mcp-catalog.mdx b/docs/mcp-catalog.mdx
index ade199b..41b3970 100644
--- a/docs/mcp-catalog.mdx
+++ b/docs/mcp-catalog.mdx
@@ -13,6 +13,7 @@ The MCP (Model Context Protocol) Server Catalog is DeployStack's comprehensive s
The MCP Catalog serves as a marketplace and management system for MCP servers, offering:
- **Server Discovery**: Browse available MCP servers by category, language, and functionality
+- **Official Registry Integration**: Automatic synchronization with the official MCP Registry at registry.modelcontextprotocol.io
- **Team-Based Management**: Organize servers within your teams with proper access control
- **Version Management**: Track different versions of MCP servers with changelog support
- **GitHub Integration**: Automatic synchronization with GitHub repositories
@@ -36,6 +37,23 @@ The catalog supports two types of servers:
- **Purpose**: Custom integrations, private tools, team-specific configurations
- **Examples**: Internal API integrations, custom business logic, proprietary tools
+### Server Sources
+
+The catalog contains servers from two sources:
+
+#### Official Registry Servers
+- **Source**: Synced automatically from registry.modelcontextprotocol.io
+- **Identification**: Marked with `synced_from_official_registry` flag
+- **Updates**: Periodically refreshed via background jobs
+- **Enhanced Data**: Enriched with GitHub metadata (stars, README, topics)
+- **Official Name**: Stored in reverse-DNS format (e.g., "io.github.upstash/context7")
+
+#### Manual Servers
+- **Source**: Created manually by administrators
+- **Purpose**: Custom integrations not in official registry
+- **Flexibility**: Full control over all server properties
+- **Team Servers**: Can be created by team administrators
+
### Categories
Servers are organized into categories for easy discovery and filtering. Categories are simple organizational labels that group servers by their purpose or functionality.
@@ -120,13 +138,19 @@ Each server in the catalog includes comprehensive metadata:
#### Basic Information
- **Name & Description**: Clear identification and purpose
+- **Official Name**: Reverse-DNS identifier from official registry (for synced servers)
+- **Version**: Current version number from official registry or manual entry
- **Category**: Organizational classification
- **Tags**: Searchable keywords and labels
- **Status**: Active, deprecated, or maintenance mode
+- **Sync Status**: Whether server is synced from official registry
#### Technical Specifications
-- **Language**: Programming language (Node.js, Python, etc.)
-- **Runtime**: Specific runtime requirements
+- **Language**: Programming language (TypeScript, Python, etc.)
+- **Runtime**: Specific runtime requirements (Node.js, Python, etc.)
+- **Transport Type**: Communication method (stdio, HTTP, SSE)
+- **Packages**: npm, pip, docker package definitions
+- **Remotes**: HTTP/SSE endpoint configurations
- **Minimum Version**: Required runtime version
- **Dependencies**: External dependencies and requirements
@@ -139,10 +163,16 @@ Each server in the catalog includes comprehensive metadata:
For details on how configuration schemas work in DeployStack's three-tier system, see [MCP Configuration System](/mcp-configuration).
#### Repository Integration
-- **GitHub URL**: Source code repository
+- **Repository URL**: Source code repository (GitHub, GitLab, etc.)
+- **Repository Source**: Platform identifier (github, gitlab)
+- **Repository ID**: Platform-specific repository identifier
+- **Repository Subfolder**: Monorepo subfolder path (if applicable)
- **Branch**: Target branch for synchronization
+- **GitHub Stars**: Star count from GitHub
+- **GitHub README**: Automatically fetched and stored README content
- **Last Sync**: When repository was last synchronized
- **Version Tracking**: Automatic version detection from repository
+- **Organization**: Repository owner organization
### Version Management
@@ -150,10 +180,12 @@ The catalog supports comprehensive version tracking:
#### Version Information
- **Version Numbers**: Semantic versioning (e.g., 1.2.3)
+- **Official Registry Version**: Version ID from official registry (for synced servers)
- **Git Commits**: Linked to specific repository commits
- **Changelog**: Detailed change descriptions
- **Stability**: Stable vs. beta/alpha versions
- **Latest Flag**: Automatic latest version detection
+- **Registry Updates**: Automatic version updates from official registry sync
#### Version Operations
- **Create Version**: Add new versions manually or via GitHub sync
@@ -161,14 +193,35 @@ The catalog supports comprehensive version tracking:
- **Version History**: Complete timeline of all versions
- **Rollback Support**: Deploy specific versions as needed
+### Official MCP Registry Integration
+
+DeployStack automatically syncs with the official MCP Registry at registry.modelcontextprotocol.io to provide instant access to all publicly available MCP servers.
+
+**Sync Process**: The registry sync uses DeployStack's background job queue system to process servers sequentially with rate limiting. This prevents API overload and ensures reliable synchronization. For technical details on how the job queue system works, see [Job Queue System](/development/backend/job-queue).
+
+**Key Features:**
+- **Automatic Sync**: One-click sync from official registry
+- **Background Processing**: Non-blocking sync via job queue
+- **Rate Limiting**: Sequential processing with configurable delays
+- **Progress Tracking**: Real-time monitoring of sync operations
+- **GitHub Enhancement**: Automatic enrichment with GitHub metadata
+- **Version Tracking**: Syncs version information from registry
+- **Transport Detection**: Automatic detection of stdio, HTTP, or SSE transport
+- **Environment Variable Mapping**: Intelligent mapping to three-tier configuration system
+
+**For Global Administrators**: Use the "Sync Registry" button in the MCP Server Catalog admin interface to trigger a sync operation.
+
### GitHub Integration
Integration with GitHub repositories for automatic synchronization and metadata extraction. For complete details on setting up and using GitHub integration, see the [GitHub App Integration Guide](/github-application).
**Key Features:**
- **Automatic Repository Sync**: Pull server metadata from GitHub repositories
+- **README Fetching**: Automatically fetch and store GitHub README content
+- **Star Tracking**: Track GitHub stars for social proof
- **Version Detection**: Automatic version tracking from repository tags
-- **Metadata Extraction**: Import descriptions, licenses, and topics
+- **Metadata Extraction**: Import descriptions, licenses, topics, and organization info
+- **Security**: XSS prevention and content sanitization (2MB size limit)
- **Manual and Scheduled Sync**: Flexible synchronization options
## Browsing and Discovery
diff --git a/docs/mcp-configuration.mdx b/docs/mcp-configuration.mdx
index edeff47..bd88825 100644
--- a/docs/mcp-configuration.mdx
+++ b/docs/mcp-configuration.mdx
@@ -100,6 +100,17 @@ Key workflow: Browse Catalog β Configure Team Settings β Set Lock Controls
Key workflow: Access Team Installation β Configure Personal Settings β Save Configuration
+## Official Registry Configuration Mapping
+
+When MCP servers are synced from the official MCP Registry, their environment variables are automatically mapped to the appropriate tier based on their properties:
+
+**Mapping Rules:**
+- **Template Level (Locked)**: Fixed environment variables with preset values
+- **Team Level**: Required credentials marked as secrets (`isRequired: true` + `isSecret: true`)
+- **User Level**: Optional configurations and personal preferences (`isRequired: false`)
+
+This intelligent mapping ensures that synced servers work seamlessly with DeployStack's three-tier system, with credentials at the team level and personal customizations at the user level.
+
## Configuration Assembly Example
Here's how the three tiers combine into a final runtime configuration:
@@ -148,6 +159,42 @@ Here's how the three tiers combine into a final runtime configuration:
*Note: Secret values are automatically decrypted only for runtime execution. In all other contexts (API responses, user interfaces), secrets appear masked as `*****`.*
+### Example: Official Registry Server Configuration
+
+Here's how an official registry server (Context7) is mapped to the three-tier system:
+
+**Official Registry Environment Variables:**
+```json
+[
+ {
+ "name": "UPSTASH_REDIS_URL",
+ "isRequired": true,
+ "isSecret": false
+ },
+ {
+ "name": "UPSTASH_REDIS_TOKEN",
+ "isRequired": true,
+ "isSecret": true
+ },
+ {
+ "name": "DEBUG",
+ "isRequired": false,
+ "default": "false"
+ }
+]
+```
+
+**After Automatic Mapping:**
+
+*Team Level Schema (team_env_schema):*
+- `UPSTASH_REDIS_URL` (required credential)
+- `UPSTASH_REDIS_TOKEN` (required secret, encrypted)
+
+*User Level Schema (user_env_schema):*
+- `DEBUG` (optional personal preference)
+
+This automatic mapping enables synced servers from the official registry to work immediately with DeployStack's security and collaboration features.
+
## Key Benefits
**Security:** Sensitive credentials managed at appropriate tiers with encryption and access controls
@@ -168,6 +215,18 @@ Here's how the three tiers combine into a final runtime configuration:
**Support Teams:** Share customer service API keys while allowing personal workspace customization
+## Official Registry Transport Types
+
+Servers synced from the official MCP Registry can use different transport mechanisms:
+
+**STDIO Transport (via packages):**
+Servers that run as local processes using standard input/output. Arguments are configured in the template level (locked), with runtime arguments at team/user levels.
+
+**HTTP/SSE Transport (via remotes):**
+Servers accessed via HTTP endpoints. Headers are mapped to appropriate tiers - authentication headers at team level, optional headers at user level.
+
+The three-tier system adapts automatically based on the transport type detected from the official registry.
+
## Related Documentation
For complete system understanding:
diff --git a/docs/mcp-installation.mdx b/docs/mcp-installation.mdx
index 846a25c..bc0c88f 100644
--- a/docs/mcp-installation.mdx
+++ b/docs/mcp-installation.mdx
@@ -16,7 +16,7 @@ Installations represent the team layer in DeployStack's three-tier configuration
DeployStack uses a three-layer system to manage MCP servers:
-1. **Global MCP Catalog**: A centralized library of all available MCP servers
+1. **Global MCP Catalog**: A centralized library of all available MCP servers (including both official registry servers synced from registry.modelcontextprotocol.io and manually created custom integrations)
2. **Team Access**: Your team can browse and select servers you have permission to use
3. **Team Installations**: Your team's actual configured instances of MCP servers
@@ -104,6 +104,7 @@ Behind the scenes, your team's installations are stored securely:
- **Your Environment**: Runs in your team's deployment environment
- **Direct Control**: Full control over the server instance
+- **Multiple Sources**: Install servers from the official MCP Registry or use custom manually-created servers
- **Flexible Configuration**: Team-level and user-level configuration options
- **Secure Setup**: Uses DeployStack's three-tier configuration system
diff --git a/docs/mcp-team-installation.mdx b/docs/mcp-team-installation.mdx
index 3d7b018..6efc8a9 100644
--- a/docs/mcp-team-installation.mdx
+++ b/docs/mcp-team-installation.mdx
@@ -23,7 +23,7 @@ For an overview of the three-tier system, see [MCP Configuration System](/mcp-co
**The Installation Flow:**
-1. **Browse Catalog** - Find MCP servers in the global catalog
+1. **Browse Catalog** - Find MCP servers in the global catalog (includes both official registry servers synced from registry.modelcontextprotocol.io and manually created servers)
2. **Select Server** - Choose a server that meets your team's needs
3. **Configure Team Settings** - Set shared credentials and parameters
4. **Set Lock Controls** - Decide what users can and cannot modify
@@ -31,6 +31,8 @@ For an overview of the three-tier system, see [MCP Configuration System](/mcp-co
Each installation gets a meaningful name like "DevOps Team Filesystem" or "Customer Support Database" to help team members understand its purpose.
+**Server Sources**: When browsing the catalog, you'll see servers from multiple sources - official registry servers (automatically synced and marked with badges) and manually created custom integrations. Both types work identically with DeployStack's three-tier configuration system.
+
The configuration options available to you are determined by how the global administrator categorized elements during schema creation. You can only configure elements that were designated as "Team Configurable" in the original schema definition.
## Lock/Unlock Controls
diff --git a/docs/self-hosted/docker-compose.mdx b/docs/self-hosted/docker-compose.mdx
index 920d745..f31348f 100644
--- a/docs/self-hosted/docker-compose.mdx
+++ b/docs/self-hosted/docker-compose.mdx
@@ -18,7 +18,11 @@ Deploy DeployStack using Docker Compose for a production-ready, self-hosted inst
## Overview
-This guide provides step-by-step instructions to install and configure DeployStack using Docker Compose. The setup includes both frontend and backend services with persistent data storage and proper networking.
+This guide provides step-by-step instructions to install and configure DeployStack using Docker Compose. The setup includes frontend, backend, and **required satellite service** with persistent data storage and proper networking.
+
+
+ **Satellites are required**: DeployStack cannot manage MCP servers without at least one satellite. This guide includes satellite deployment as a mandatory step.
+
**Important:** Only modify settings explicitly mentioned in this guide. Altering other configurations may lead to issues.
@@ -84,9 +88,13 @@ cat > .env << EOF
# DeployStack Configuration
DEPLOYSTACK_ENCRYPTION_SECRET=your-generated-secret-here
-# Optional: Customize ports (default: frontend=8080, backend=3000)
+# Satellite Registration (required - obtain from admin panel after setup)
+DEPLOYSTACK_REGISTRATION_TOKEN=
+
+# Optional: Customize ports (default: frontend=8080, backend=3000, satellite=3001)
# FRONTEND_PORT=8080
# BACKEND_PORT=3000
+# SATELLITE_PORT=3001
# Optional: Custom app title
# VITE_APP_TITLE=My DeployStack Instance
@@ -95,6 +103,10 @@ EOF
Replace `your-generated-secret-here` with the secret you generated in Step 2.
+
+ **Important**: Leave `DEPLOYSTACK_REGISTRATION_TOKEN` empty for now. You'll add it after completing the admin setup in Step 6.
+
+
### Step 4: Launch DeployStack
Start the Docker containers:
@@ -125,6 +137,39 @@ Open your browser and navigate to:
- **Frontend**: [http://localhost:8080](http://localhost:8080)
- **Backend API**: [http://localhost:3000](http://localhost:3000)
+### Step 7: Deploy Satellite Service (Required)
+
+
+ **Satellites are required** - Without at least one satellite, DeployStack cannot manage MCP servers. Complete this step to make your deployment functional.
+
+
+The satellite service is **already included** in the docker-compose.yml file. You just need to configure the registration token:
+
+1. **Generate Registration Token** (via admin interface after backend setup):
+ - Log in to DeployStack as admin (complete Step 6 first)
+ - Navigate to Admin β Satellites β Pairing
+ - Click "Generate Token" and copy it
+
+2. **Add token to your `.env` file**:
+ ```bash
+ # Satellite Configuration (required)
+ DEPLOYSTACK_REGISTRATION_TOKEN=deploystack_satellite_global_eyJhbGc...
+ ```
+
+3. **Restart services to apply token**:
+ ```bash
+ docker-compose down
+ docker-compose up -d
+ ```
+
+4. **Verify satellite registration**:
+ ```bash
+ docker logs deploystack-satellite
+ # Should show: β
Satellite registered successfully: docker-satellite-001
+ ```
+
+**Note**: After initial registration, the satellite saves its API key to persistent storage and doesn't need the registration token for subsequent starts.
+
## Configuration
### External Access
@@ -228,6 +273,16 @@ docker run --rm -v deploystack_backend_persistent:/data -v $(pwd)/backups/$(date
| `FRONTEND_PORT` | Frontend port mapping | `8080` | `80` |
| `BACKEND_PORT` | Backend port mapping | `3000` | `3001` |
+### Satellite Variables (Required)
+
+| Variable | Description | Example |
+|----------|-------------|----------|
+| `DEPLOYSTACK_REGISTRATION_TOKEN` | JWT registration token from admin (required for initial satellite pairing) | `deploystack_satellite_global_eyJhbGc...` |
+
+
+ **Note**: The satellite name `docker-satellite-001` is pre-configured in docker-compose.yml. You only need to provide the registration token in your `.env` file.
+
+
## Troubleshooting
### Common Issues
diff --git a/docs/self-hosted/index.mdx b/docs/self-hosted/index.mdx
index 158ca5f..741b758 100644
--- a/docs/self-hosted/index.mdx
+++ b/docs/self-hosted/index.mdx
@@ -58,6 +58,7 @@ Run DeployStack on your own infrastructure for maximum control, security, and cu
- **Data Sovereignty**: Keep all your MCP server configurations and deployment data within your infrastructure
- **Custom Integrations**: Integrate with your existing CI/CD pipelines and infrastructure tools
- **Network Security**: Deploy within your private network with custom firewall rules
+- **Satellite Deployment**: Run edge workers for MCP server management with team isolation
### Enterprise Features
- **Single Sign-On**: Integrate with your organization's authentication systems
diff --git a/docs/self-hosted/quick-start.mdx b/docs/self-hosted/quick-start.mdx
index dfb395b..5612b1f 100644
--- a/docs/self-hosted/quick-start.mdx
+++ b/docs/self-hosted/quick-start.mdx
@@ -11,7 +11,11 @@ import { Steps, Step } from 'fumadocs-ui/components/steps';
# Quick Start
-Get DeployStack up and running in minutes. Choose between our recommended Docker Compose setup or individual Docker containers for maximum flexibility.
+Get DeployStack up and running in minutes. This guide covers deploying the core platform (frontend + backend) and the required satellite service for MCP server management.
+
+
+ **Important**: Satellites are required for DeployStack to function. The platform alone cannot manage MCP servers - you must deploy at least one satellite.
+
## Prerequisites
@@ -201,6 +205,78 @@ sudo firewall-cmd --permanent --add-port=8080/tcp
sudo firewall-cmd --reload
```
+## Satellite Service
+
+
+ **Satellites are required** - DeployStack cannot manage MCP servers without satellites. You must deploy at least one satellite for the platform to function.
+
+
+### Adding a Satellite to Your Deployment
+
+After completing the basic backend and frontend setup, deploy at least one satellite:
+
+
+
+ **Generate Registration Token**
+
+ After setting up your admin account, generate a registration token:
+
+ 1. Log in to your DeployStack instance as admin
+ 2. Navigate to Admin β Satellites β Pairing
+ 3. Click "Generate Token" and copy the full token
+
+ The token format will be: `deploystack_satellite_global_eyJhbGciOi...`
+
+
+
+ **Start Satellite Service**
+
+ ```bash
+ docker run -d \
+ --name deploystack-satellite \
+ -p 3001:3001 \
+ -e DEPLOYSTACK_BACKEND_URL="http://localhost:3000" \
+ -e DEPLOYSTACK_SATELLITE_NAME="my-satellite-001" \
+ -e DEPLOYSTACK_REGISTRATION_TOKEN="your-token-here" \
+ -v deploystack_satellite_persistent:/app/persistent_data \
+ deploystack/satellite:latest
+ ```
+
+
+ **Satellite Name Requirements:**
+ - 10-32 characters
+ - Only lowercase letters, numbers, hyphens, and underscores
+ - No spaces or special characters
+
+
+
+
+ **Verify Satellite Registration**
+
+ Check the satellite logs to confirm successful registration:
+
+ ```bash
+ docker logs deploystack-satellite
+ ```
+
+ You should see:
+ ```
+ β
Satellite registered successfully: my-satellite-001
+ π API key received and ready for authenticated communication
+ ```
+
+
+
+### Satellite Persistence
+
+After initial registration, satellites save their API key to persistent storage. This means:
+
+- **First startup**: Uses registration token β Registers β Saves API key
+- **Subsequent startups**: Uses saved API key β No token needed
+- **Container restarts**: Automatic recovery without re-registration
+
+The registration token is only required once during initial pairing.
+
## Environment Variables Reference
### Required Variables
@@ -217,9 +293,17 @@ sudo firewall-cmd --reload
| `VITE_DEPLOYSTACK_BACKEND_URL` | Backend API URL for frontend | `http://localhost:3000` | `https://api.deploystack.company.com` |
| `VITE_APP_TITLE` | Custom application title | `DeployStack` | `Company DeployStack` |
+### Satellite Variables (Required)
+
+| Variable | Description | Example |
+|----------|-------------|----------|
+| `DEPLOYSTACK_BACKEND_URL` | Backend URL for satellite to connect to | `http://localhost:3000` |
+| `DEPLOYSTACK_SATELLITE_NAME` | Unique satellite name (10-32 chars, lowercase only) | `my-satellite-001` |
+| `DEPLOYSTACK_REGISTRATION_TOKEN` | JWT registration token from admin (required for initial pairing) | `deploystack_satellite_global_eyJhbGc...` |
+
## Next Steps
-Once DeployStack is running:
+Once DeployStack is running with at least one satellite:
@@ -230,12 +314,21 @@ Once DeployStack is running:
- Set up user roles and permissions
+
+ **Deploy Satellite Service**
+
+ - Generate registration token from admin panel
+ - Deploy satellite with the token
+ - Verify satellite registration and activation
+ - See [Satellite Service](#satellite-service) section above
+
+
**Deploy Your First MCP Server**
- Browse the MCP server catalog
- Configure credentials and settings
- - Deploy to your preferred cloud provider
+ - Deploy to your satellite
diff --git a/docs/self-hosted/setup.mdx b/docs/self-hosted/setup.mdx
index 5d87c7f..e3e629a 100644
--- a/docs/self-hosted/setup.mdx
+++ b/docs/self-hosted/setup.mdx
@@ -251,8 +251,63 @@ Follow this recommended setup workflow for new DeployStack instances:
- Set up user roles and permissions
- Configure team workspaces
+
+
+ **Set Up Satellites**
+
+ - Navigate to Admin β Satellites β Pairing
+ - Generate registration tokens for new satellites
+ - Deploy satellite services with registration tokens
+ - Verify satellite registration and health
+
+ For satellite deployment instructions, see [Quick Start - Satellite Service](/self-hosted/quick-start#satellite-service).
+
+## Satellite Administration
+
+
+ Satellites are **required** for DeployStack to function. Without satellites, you cannot manage MCP servers. Deploy at least one satellite after completing the platform setup.
+
+
+### Generating Registration Tokens
+
+Administrators can generate registration tokens for new satellites:
+
+1. **Navigate to Satellite Management**:
+ - Log in as `global_admin`
+ - Go to Admin β Satellites β Pairing
+
+2. **Generate Token**:
+ - Click "Generate Token"
+ - Copy the full token (starts with `deploystack_satellite_global_`)
+ - Token expires in 1 hour for security
+
+3. **Deploy Satellite**:
+ - Use the token in `DEPLOYSTACK_REGISTRATION_TOKEN` environment variable
+ - See [Quick Start - Satellite Service](/self-hosted/quick-start#optional-satellite-service) for deployment instructions
+
+### Token Security
+
+- **Single-Use**: Registration tokens are consumed after successful pairing
+- **Expiration**: Global tokens expire after 1 hour
+- **Scope**: All self-hosted satellites are global satellites by default
+- **Admin Only**: Only `global_admin` users can generate registration tokens
+
+### Satellite Status Management
+
+After registration, satellites appear in the admin panel with `inactive` status:
+
+1. **Activate Satellite**:
+ - Navigate to Admin β Satellites
+ - Find your registered satellite
+ - Click "Activate" to enable it
+
+2. **Monitor Health**:
+ - View heartbeat status and system metrics
+ - Check last communication timestamp
+ - Review process status and resource usage
+
## Security Considerations
### Production Security
@@ -261,6 +316,7 @@ Follow this recommended setup workflow for new DeployStack instances:
- **Use HTTPS** for Frontend URL in production environments
- **Restrict Registration** (`Enable Email Registration: No`) for private deployments
- **Use Strong SMTP Passwords** and enable 2FA on email accounts
+- **Secure Satellite Tokens**: Store registration tokens securely and don't commit to version control
### Email Security
diff --git a/package-lock.json b/package-lock.json
index 11582a9..46ea064 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -10,7 +10,7 @@
"hasInstallScript": true,
"dependencies": {
"@types/mdx": "^2.0.13",
- "fumadocs-core": "^15.7.13",
+ "fumadocs-core": "^15.8.1",
"fumadocs-mdx": "^12.0.1",
"fumadocs-ui": "^15.7.7",
"lucide-react": "^0.544.0",
@@ -23,10 +23,10 @@
},
"devDependencies": {
"@semantic-release/github": "^11.0.5",
- "@tailwindcss/postcss": "^4.1.12",
+ "@tailwindcss/postcss": "^4.1.14",
"@types/node": "24.5.2",
"@types/react": "^19.1.12",
- "@types/react-dom": "^19.1.8",
+ "@types/react-dom": "^19.1.9",
"autoprefixer": "^10.4.21",
"markdownlint-cli": "^0.45.0",
"markdownlint-cli2": "^0.18.1",
@@ -1137,9 +1137,9 @@
"license": "MIT"
},
"node_modules/@jridgewell/trace-mapping": {
- "version": "0.3.30",
- "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.30.tgz",
- "integrity": "sha512-GQ7Nw5G2lTu/BtHTKfXhKHok2WGetd4XYcVKGx00SjAk8GMwgJM3zr6zORiPGuOE+/vkc90KtTosSSvaCjKb2Q==",
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -2594,61 +2594,54 @@
}
},
"node_modules/@tailwindcss/node": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.12.tgz",
- "integrity": "sha512-3hm9brwvQkZFe++SBt+oLjo4OLDtkvlE8q2WalaD/7QWaeM7KEJbAiY/LJZUaCs7Xa8aUu4xy3uoyX4q54UVdQ==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.14.tgz",
+ "integrity": "sha512-hpz+8vFk3Ic2xssIA3e01R6jkmsAhvkQdXlEbRTk6S10xDAtiQiM3FyvZVGsucefq764euO/b8WUW9ysLdThHw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/remapping": "^2.3.4",
"enhanced-resolve": "^5.18.3",
- "jiti": "^2.5.1",
+ "jiti": "^2.6.0",
"lightningcss": "1.30.1",
- "magic-string": "^0.30.17",
+ "magic-string": "^0.30.19",
"source-map-js": "^1.2.1",
- "tailwindcss": "4.1.12"
+ "tailwindcss": "4.1.14"
}
},
- "node_modules/@tailwindcss/node/node_modules/tailwindcss": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.12.tgz",
- "integrity": "sha512-DzFtxOi+7NsFf7DBtI3BJsynR+0Yp6etH+nRPTbpWnS2pZBaSksv/JGctNwSWzbFjp0vxSqknaUylseZqMDGrA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/@tailwindcss/oxide": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.12.tgz",
- "integrity": "sha512-gM5EoKHW/ukmlEtphNwaGx45fGoEmP10v51t9unv55voWh6WrOL19hfuIdo2FjxIaZzw776/BUQg7Pck++cIVw==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.14.tgz",
+ "integrity": "sha512-23yx+VUbBwCg2x5XWdB8+1lkPajzLmALEfMb51zZUBYaYVPDQvBSD/WYDqiVyBIo2BZFa3yw1Rpy3G2Jp+K0dw==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
"detect-libc": "^2.0.4",
- "tar": "^7.4.3"
+ "tar": "^7.5.1"
},
"engines": {
"node": ">= 10"
},
"optionalDependencies": {
- "@tailwindcss/oxide-android-arm64": "4.1.12",
- "@tailwindcss/oxide-darwin-arm64": "4.1.12",
- "@tailwindcss/oxide-darwin-x64": "4.1.12",
- "@tailwindcss/oxide-freebsd-x64": "4.1.12",
- "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.12",
- "@tailwindcss/oxide-linux-arm64-gnu": "4.1.12",
- "@tailwindcss/oxide-linux-arm64-musl": "4.1.12",
- "@tailwindcss/oxide-linux-x64-gnu": "4.1.12",
- "@tailwindcss/oxide-linux-x64-musl": "4.1.12",
- "@tailwindcss/oxide-wasm32-wasi": "4.1.12",
- "@tailwindcss/oxide-win32-arm64-msvc": "4.1.12",
- "@tailwindcss/oxide-win32-x64-msvc": "4.1.12"
+ "@tailwindcss/oxide-android-arm64": "4.1.14",
+ "@tailwindcss/oxide-darwin-arm64": "4.1.14",
+ "@tailwindcss/oxide-darwin-x64": "4.1.14",
+ "@tailwindcss/oxide-freebsd-x64": "4.1.14",
+ "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.14",
+ "@tailwindcss/oxide-linux-arm64-gnu": "4.1.14",
+ "@tailwindcss/oxide-linux-arm64-musl": "4.1.14",
+ "@tailwindcss/oxide-linux-x64-gnu": "4.1.14",
+ "@tailwindcss/oxide-linux-x64-musl": "4.1.14",
+ "@tailwindcss/oxide-wasm32-wasi": "4.1.14",
+ "@tailwindcss/oxide-win32-arm64-msvc": "4.1.14",
+ "@tailwindcss/oxide-win32-x64-msvc": "4.1.14"
}
},
"node_modules/@tailwindcss/oxide-android-arm64": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.12.tgz",
- "integrity": "sha512-oNY5pq+1gc4T6QVTsZKwZaGpBb2N1H1fsc1GD4o7yinFySqIuRZ2E4NvGasWc6PhYJwGK2+5YT1f9Tp80zUQZQ==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.14.tgz",
+ "integrity": "sha512-a94ifZrGwMvbdeAxWoSuGcIl6/DOP5cdxagid7xJv6bwFp3oebp7y2ImYsnZBMTwjn5Ev5xESvS3FFYUGgPODQ==",
"cpu": [
"arm64"
],
@@ -2663,9 +2656,9 @@
}
},
"node_modules/@tailwindcss/oxide-darwin-arm64": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.12.tgz",
- "integrity": "sha512-cq1qmq2HEtDV9HvZlTtrj671mCdGB93bVY6J29mwCyaMYCP/JaUBXxrQQQm7Qn33AXXASPUb2HFZlWiiHWFytw==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.14.tgz",
+ "integrity": "sha512-HkFP/CqfSh09xCnrPJA7jud7hij5ahKyWomrC3oiO2U9i0UjP17o9pJbxUN0IJ471GTQQmzwhp0DEcpbp4MZTA==",
"cpu": [
"arm64"
],
@@ -2680,9 +2673,9 @@
}
},
"node_modules/@tailwindcss/oxide-darwin-x64": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.12.tgz",
- "integrity": "sha512-6UCsIeFUcBfpangqlXay9Ffty9XhFH1QuUFn0WV83W8lGdX8cD5/+2ONLluALJD5+yJ7k8mVtwy3zMZmzEfbLg==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.14.tgz",
+ "integrity": "sha512-eVNaWmCgdLf5iv6Qd3s7JI5SEFBFRtfm6W0mphJYXgvnDEAZ5sZzqmI06bK6xo0IErDHdTA5/t7d4eTfWbWOFw==",
"cpu": [
"x64"
],
@@ -2697,9 +2690,9 @@
}
},
"node_modules/@tailwindcss/oxide-freebsd-x64": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.12.tgz",
- "integrity": "sha512-JOH/f7j6+nYXIrHobRYCtoArJdMJh5zy5lr0FV0Qu47MID/vqJAY3r/OElPzx1C/wdT1uS7cPq+xdYYelny1ww==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.14.tgz",
+ "integrity": "sha512-QWLoRXNikEuqtNb0dhQN6wsSVVjX6dmUFzuuiL09ZeXju25dsei2uIPl71y2Ic6QbNBsB4scwBoFnlBfabHkEw==",
"cpu": [
"x64"
],
@@ -2714,9 +2707,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.12.tgz",
- "integrity": "sha512-v4Ghvi9AU1SYgGr3/j38PD8PEe6bRfTnNSUE3YCMIRrrNigCFtHZ2TCm8142X8fcSqHBZBceDx+JlFJEfNg5zQ==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.14.tgz",
+ "integrity": "sha512-VB4gjQni9+F0VCASU+L8zSIyjrLLsy03sjcR3bM0V2g4SNamo0FakZFKyUQ96ZVwGK4CaJsc9zd/obQy74o0Fw==",
"cpu": [
"arm"
],
@@ -2731,9 +2724,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.12.tgz",
- "integrity": "sha512-YP5s1LmetL9UsvVAKusHSyPlzSRqYyRB0f+Kl/xcYQSPLEw/BvGfxzbH+ihUciePDjiXwHh+p+qbSP3SlJw+6g==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.14.tgz",
+ "integrity": "sha512-qaEy0dIZ6d9vyLnmeg24yzA8XuEAD9WjpM5nIM1sUgQ/Zv7cVkharPDQcmm/t/TvXoKo/0knI3me3AGfdx6w1w==",
"cpu": [
"arm64"
],
@@ -2748,9 +2741,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm64-musl": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.12.tgz",
- "integrity": "sha512-V8pAM3s8gsrXcCv6kCHSuwyb/gPsd863iT+v1PGXC4fSL/OJqsKhfK//v8P+w9ThKIoqNbEnsZqNy+WDnwQqCA==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.14.tgz",
+ "integrity": "sha512-ISZjT44s59O8xKsPEIesiIydMG/sCXoMBCqsphDm/WcbnuWLxxb+GcvSIIA5NjUw6F8Tex7s5/LM2yDy8RqYBQ==",
"cpu": [
"arm64"
],
@@ -2765,9 +2758,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-x64-gnu": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.12.tgz",
- "integrity": "sha512-xYfqYLjvm2UQ3TZggTGrwxjYaLB62b1Wiysw/YE3Yqbh86sOMoTn0feF98PonP7LtjsWOWcXEbGqDL7zv0uW8Q==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.14.tgz",
+ "integrity": "sha512-02c6JhLPJj10L2caH4U0zF8Hji4dOeahmuMl23stk0MU1wfd1OraE7rOloidSF8W5JTHkFdVo/O7uRUJJnUAJg==",
"cpu": [
"x64"
],
@@ -2782,9 +2775,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-x64-musl": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.12.tgz",
- "integrity": "sha512-ha0pHPamN+fWZY7GCzz5rKunlv9L5R8kdh+YNvP5awe3LtuXb5nRi/H27GeL2U+TdhDOptU7T6Is7mdwh5Ar3A==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.14.tgz",
+ "integrity": "sha512-TNGeLiN1XS66kQhxHG/7wMeQDOoL0S33x9BgmydbrWAb9Qw0KYdd8o1ifx4HOGDWhVmJ+Ul+JQ7lyknQFilO3Q==",
"cpu": [
"x64"
],
@@ -2799,9 +2792,9 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.12.tgz",
- "integrity": "sha512-4tSyu3dW+ktzdEpuk6g49KdEangu3eCYoqPhWNsZgUhyegEda3M9rG0/j1GV/JjVVsj+lG7jWAyrTlLzd/WEBg==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.14.tgz",
+ "integrity": "sha512-uZYAsaW/jS/IYkd6EWPJKW/NlPNSkWkBlaeVBi/WsFQNP05/bzkebUL8FH1pdsqx4f2fH/bWFcUABOM9nfiJkQ==",
"bundleDependencies": [
"@napi-rs/wasm-runtime",
"@emnapi/core",
@@ -2817,30 +2810,30 @@
"license": "MIT",
"optional": true,
"dependencies": {
- "@emnapi/core": "^1.4.5",
- "@emnapi/runtime": "^1.4.5",
- "@emnapi/wasi-threads": "^1.0.4",
- "@napi-rs/wasm-runtime": "^0.2.12",
- "@tybys/wasm-util": "^0.10.0",
- "tslib": "^2.8.0"
+ "@emnapi/core": "^1.5.0",
+ "@emnapi/runtime": "^1.5.0",
+ "@emnapi/wasi-threads": "^1.1.0",
+ "@napi-rs/wasm-runtime": "^1.0.5",
+ "@tybys/wasm-util": "^0.10.1",
+ "tslib": "^2.4.0"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
- "version": "1.4.5",
+ "version": "1.5.0",
"dev": true,
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
- "@emnapi/wasi-threads": "1.0.4",
+ "@emnapi/wasi-threads": "1.1.0",
"tslib": "^2.4.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
- "version": "1.4.5",
+ "version": "1.5.0",
"dev": true,
"inBundle": true,
"license": "MIT",
@@ -2850,7 +2843,7 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
- "version": "1.0.4",
+ "version": "1.1.0",
"dev": true,
"inBundle": true,
"license": "MIT",
@@ -2860,19 +2853,19 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
- "version": "0.2.12",
+ "version": "1.0.5",
"dev": true,
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
- "@emnapi/core": "^1.4.3",
- "@emnapi/runtime": "^1.4.3",
- "@tybys/wasm-util": "^0.10.0"
+ "@emnapi/core": "^1.5.0",
+ "@emnapi/runtime": "^1.5.0",
+ "@tybys/wasm-util": "^0.10.1"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
- "version": "0.10.0",
+ "version": "0.10.1",
"dev": true,
"inBundle": true,
"license": "MIT",
@@ -2882,16 +2875,16 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
- "version": "2.8.0",
+ "version": "2.8.1",
"dev": true,
"inBundle": true,
"license": "0BSD",
"optional": true
},
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.12.tgz",
- "integrity": "sha512-iGLyD/cVP724+FGtMWslhcFyg4xyYyM+5F4hGvKA7eifPkXHRAUDFaimu53fpNg9X8dfP75pXx/zFt/jlNF+lg==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.14.tgz",
+ "integrity": "sha512-Az0RnnkcvRqsuoLH2Z4n3JfAef0wElgzHD5Aky/e+0tBUxUhIeIqFBTMNQvmMRSP15fWwmvjBxZ3Q8RhsDnxAA==",
"cpu": [
"arm64"
],
@@ -2906,9 +2899,9 @@
}
},
"node_modules/@tailwindcss/oxide-win32-x64-msvc": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.12.tgz",
- "integrity": "sha512-NKIh5rzw6CpEodv/++r0hGLlfgT/gFN+5WNdZtvh6wpU2BpGNgdjvj6H2oFc8nCM839QM1YOhjpgbAONUb4IxA==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.14.tgz",
+ "integrity": "sha512-ttblVGHgf68kEE4om1n/n44I0yGPkCPbLsqzjvybhpwa6mKKtgFfAzy6btc3HRmuW7nHe0OOrSeNP9sQmmH9XA==",
"cpu": [
"x64"
],
@@ -2923,26 +2916,19 @@
}
},
"node_modules/@tailwindcss/postcss": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.12.tgz",
- "integrity": "sha512-5PpLYhCAwf9SJEeIsSmCDLgyVfdBhdBpzX1OJ87anT9IVR0Z9pjM0FNixCAUAHGnMBGB8K99SwAheXrT0Kh6QQ==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.14.tgz",
+ "integrity": "sha512-BdMjIxy7HUNThK87C7BC8I1rE8BVUsfNQSI5siQ4JK3iIa3w0XyVvVL9SXLWO//CtYTcp1v7zci0fYwJOjB+Zg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
- "@tailwindcss/node": "4.1.12",
- "@tailwindcss/oxide": "4.1.12",
+ "@tailwindcss/node": "4.1.14",
+ "@tailwindcss/oxide": "4.1.14",
"postcss": "^8.4.41",
- "tailwindcss": "4.1.12"
+ "tailwindcss": "4.1.14"
}
},
- "node_modules/@tailwindcss/postcss/node_modules/tailwindcss": {
- "version": "4.1.12",
- "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.12.tgz",
- "integrity": "sha512-DzFtxOi+7NsFf7DBtI3BJsynR+0Yp6etH+nRPTbpWnS2pZBaSksv/JGctNwSWzbFjp0vxSqknaUylseZqMDGrA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/@types/debug": {
"version": "4.1.12",
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
@@ -3033,9 +3019,9 @@
}
},
"node_modules/@types/react-dom": {
- "version": "19.1.8",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.8.tgz",
- "integrity": "sha512-xG7xaBMJCpcK0RpN8jDbAACQo54ycO6h4dSSmgv8+fu6ZIAdANkx/WsawASUjVXYfy+J9AbUpRMNNEsXCDfDBQ==",
+ "version": "19.1.9",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.9.tgz",
+ "integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==",
"devOptional": true,
"license": "MIT",
"peerDependencies": {
@@ -4998,15 +4984,15 @@
}
},
"node_modules/fumadocs-core": {
- "version": "15.7.13",
- "resolved": "https://registry.npmjs.org/fumadocs-core/-/fumadocs-core-15.7.13.tgz",
- "integrity": "sha512-pXSu5/7newNu1nxhz3tp5e0P8jS5oA4jpxWM9o/Rdt6mXjR0FymgHzFDesFVirpSCSjZDTa7RyWDRnyvEOYtvQ==",
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/fumadocs-core/-/fumadocs-core-15.8.1.tgz",
+ "integrity": "sha512-3NBM2U3QlnDr4AwfDCLFaNjRGOj52g3geHSnwC9hU2en34xROe7/I8FI1eLkX68ppGnhSQYm/rIuMAPzvepnsg==",
"license": "MIT",
"dependencies": {
"@formatjs/intl-localematcher": "^0.6.1",
- "@orama/orama": "^3.1.13",
- "@shikijs/rehype": "^3.12.2",
- "@shikijs/transformers": "^3.12.2",
+ "@orama/orama": "^3.1.14",
+ "@shikijs/rehype": "^3.13.0",
+ "@shikijs/transformers": "^3.13.0",
"github-slugger": "^2.0.0",
"hast-util-to-estree": "^3.1.3",
"hast-util-to-jsx-runtime": "^2.3.6",
@@ -5018,7 +5004,7 @@
"remark-gfm": "^4.0.1",
"remark-rehype": "^11.1.2",
"scroll-into-view-if-needed": "^3.1.0",
- "shiki": "^3.12.2",
+ "shiki": "^3.13.0",
"unist-util-visit": "^5.0.0"
},
"peerDependencies": {
@@ -6040,9 +6026,9 @@
}
},
"node_modules/jiti": {
- "version": "2.5.1",
- "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.5.1.tgz",
- "integrity": "sha512-twQoecYPiVA5K/h6SxtORw/Bs3ar+mLUtoPSc7iMXzQzK8d7eJ/R09wmTwAjiamETn1cXYPGfNnu7DMoHgu12w==",
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz",
+ "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==",
"dev": true,
"license": "MIT",
"bin": {
@@ -6542,9 +6528,9 @@
}
},
"node_modules/magic-string": {
- "version": "0.30.18",
- "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.18.tgz",
- "integrity": "sha512-yi8swmWbO17qHhwIBNeeZxTceJMeBvWJaId6dyvTSOwTipqeHhMhOrz6513r1sOKnpvQ7zkhlG8tPrpilwTxHQ==",
+ "version": "0.30.19",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz",
+ "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -7980,9 +7966,9 @@
}
},
"node_modules/minizlib": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz",
- "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==",
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.1.0.tgz",
+ "integrity": "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -7992,22 +7978,6 @@
"node": ">= 18"
}
},
- "node_modules/mkdirp": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz",
- "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==",
- "dev": true,
- "license": "MIT",
- "bin": {
- "mkdirp": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
@@ -13100,16 +13070,16 @@
}
},
"node_modules/tailwindcss": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.13.tgz",
- "integrity": "sha512-i+zidfmTqtwquj4hMEwdjshYYgMbOrPzb9a0M3ZgNa0JMoZeFC6bxZvO8yr8ozS6ix2SDz0+mvryPeBs2TFE+w==",
+ "version": "4.1.14",
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.14.tgz",
+ "integrity": "sha512-b7pCxjGO98LnxVkKjaZSDeNuljC4ueKUddjENJOADtubtdo8llTaJy7HwBMeLNSSo2N5QIAgklslK1+Ir8r6CA==",
"devOptional": true,
"license": "MIT"
},
"node_modules/tapable": {
- "version": "2.2.3",
- "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.3.tgz",
- "integrity": "sha512-ZL6DDuAlRlLGghwcfmSn9sK3Hr6ArtyudlSAiCqQ6IfE+b+HHbydbYDIG15IfS5do+7XQQBdBiubF/cV2dnDzg==",
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz",
+ "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==",
"dev": true,
"license": "MIT",
"engines": {
@@ -13121,17 +13091,16 @@
}
},
"node_modules/tar": {
- "version": "7.4.3",
- "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz",
- "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==",
+ "version": "7.5.1",
+ "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.1.tgz",
+ "integrity": "sha512-nlGpxf+hv0v7GkWBK2V9spgactGOp0qvfWRxUMjqHyzrt3SgwE48DIv/FhqPHJYLHpgW1opq3nERbz5Anq7n1g==",
"dev": true,
"license": "ISC",
"dependencies": {
"@isaacs/fs-minipass": "^4.0.0",
"chownr": "^3.0.0",
"minipass": "^7.1.2",
- "minizlib": "^3.0.1",
- "mkdirp": "^3.0.1",
+ "minizlib": "^3.1.0",
"yallist": "^5.0.0"
},
"engines": {
diff --git a/package.json b/package.json
index c20a18a..e181442 100644
--- a/package.json
+++ b/package.json
@@ -15,7 +15,7 @@
},
"dependencies": {
"@types/mdx": "^2.0.13",
- "fumadocs-core": "^15.7.13",
+ "fumadocs-core": "^15.8.1",
"fumadocs-mdx": "^12.0.1",
"fumadocs-ui": "^15.7.7",
"lucide-react": "^0.544.0",
@@ -28,10 +28,10 @@
},
"devDependencies": {
"@semantic-release/github": "^11.0.5",
- "@tailwindcss/postcss": "^4.1.12",
+ "@tailwindcss/postcss": "^4.1.14",
"@types/node": "24.5.2",
"@types/react": "^19.1.12",
- "@types/react-dom": "^19.1.8",
+ "@types/react-dom": "^19.1.9",
"autoprefixer": "^10.4.21",
"markdownlint-cli": "^0.45.0",
"markdownlint-cli2": "^0.18.1",