From 5f72e96dd953e1ff9f2f6ebb219db4b620477fa6 Mon Sep 17 00:00:00 2001 From: Lasim Date: Tue, 25 Nov 2025 23:03:43 +0100 Subject: [PATCH 1/2] docs: Update database management and backend documentation; clarify OAuth server requirements and user credential management --- development/backend/database/index.mdx | 2 +- development/backend/index.mdx | 6 +- general/mcp-catalog.mdx | 1 + general/mcp-configuration.mdx | 39 ++-- general/mcp-oauth.mdx | 252 +++++++++++++++++++++++++ general/mcp-team-installation.mdx | 133 +++++++++---- general/mcp-user-configuration.mdx | 83 +++++--- 7 files changed, 438 insertions(+), 78 deletions(-) create mode 100644 general/mcp-oauth.mdx diff --git a/development/backend/database/index.mdx b/development/backend/database/index.mdx index eb6dce0..2dd7afa 100644 --- a/development/backend/database/index.mdx +++ b/development/backend/database/index.mdx @@ -498,7 +498,7 @@ turso db shell your-database The environment-based architecture makes it easy to add support for additional databases: -- **PostgreSQL**: Planned for future release +- **PostgreSQL**: Possible future addition - **MySQL**: Possible future addition - **Other SQLite-compatible databases**: Can be added with minimal changes diff --git a/development/backend/index.mdx b/development/backend/index.mdx index 0d1bd50..9a5e6e5 100644 --- a/development/backend/index.mdx +++ b/development/backend/index.mdx @@ -13,7 +13,7 @@ The DeployStack backend is a modern, high-performance Node.js application built - **Framework**: Fastify for high-performance HTTP server - **Language**: TypeScript for type safety -- **Database**: SQLite (default) or PostgreSQL with Drizzle ORM +- **Database**: SQLite with Drizzle ORM - **Validation**: JSON Schema for request/response validation and OpenAPI generation - **Plugin System**: Extensible architecture with security isolation - **Authentication**: Dual authentication system - cookie-based sessions for frontend and OAuth 2.1 for satellite access @@ -44,7 +44,7 @@ The development server starts at `http://localhost:3000` with API documentation href="/development/backend/database/index" title="Database Management" > - SQLite and PostgreSQL setup, schema management, migrations, and Drizzle ORM best practices. + SQLite setup, schema management, migrations, and Drizzle ORM best practices. - - SQLite setup, schema management, migrations, and Drizzle ORM best practices. + PostgreSQL setup, schema management, migrations, and Drizzle ORM best practices. { const rawConn = server.rawDbConnection; if (rawConn) { const status = getDbStatus(); - if (status.dialect === 'sqlite' && 'close' in rawConn) { - (rawConn as SqliteDriver.Database).close(); - server.log.info('SQLite connection closed.'); + if (status.dialect === 'postgresql') { + await (rawConn as Pool).end(); + server.log.info('PostgreSQL connection pool closed.'); } } }); @@ -408,7 +408,7 @@ GROUP BY b.id; ## Database Schema -For the complete database schema, see [schema.sqlite.ts](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.sqlite.ts) in the backend directory. +For the complete database schema, see [schema.ts](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.ts) in the backend directory. ### Jobs Table @@ -472,7 +472,7 @@ CREATE TABLE queue_job_batches ( ### Why Database-Backed? -No additional infrastructure required (Redis, message queues). Uses existing SQLite/Turso database, and jobs persist across server restarts. +No additional infrastructure required (Redis, message queues). Uses existing PostgreSQL database, and jobs persist across server restarts. ### Why Sequential Processing? @@ -525,6 +525,6 @@ Generate complex reports from large datasets without blocking API requests. ## Summary -The background job queue system provides a simple, reliable way to process long-running tasks in DeployStack. Built on familiar SQLite/Turso infrastructure, it requires no additional services while providing persistence, retry logic, and rate limiting. Workers follow a straightforward pattern making them easy to implement and test. +The background job queue system provides a simple, reliable way to process long-running tasks in DeployStack. Built on PostgreSQL infrastructure, it requires no additional services while providing persistence, retry logic, and rate limiting. Workers follow a straightforward pattern making them easy to implement and test. For routine operations, the system handles thousands of jobs efficiently. For specialized needs requiring higher throughput or distributed processing, the architecture supports clear migration paths to more advanced solutions. diff --git a/development/backend/metrics.mdx b/development/backend/metrics.mdx index 8349a38..1b231ec 100644 --- a/development/backend/metrics.mdx +++ b/development/backend/metrics.mdx @@ -37,7 +37,7 @@ Cleanup Worker (cron job + background worker) **MCP Client Activity Metrics** serves as the complete reference implementation. All files are in place and can be used as templates for new metric types. **Key Files**: -- Database: `src/db/schema.sqlite.ts` (table: `mcpClientActivityMetrics`) +- Database: `src/db/schema.ts` (table: `mcpClientActivityMetrics`) - Base Service: `src/services/metrics/TimeSeriesMetricsService.ts` - Metric Service: `src/services/metrics/McpClientActivityMetricsService.ts` - Event Handler: `src/events/satellite/mcp-client-activity.ts` @@ -83,7 +83,7 @@ Permissions: Users view their own, admins view all ### Step 2: Create Database Table -Create your metrics table in `src/db/schema.sqlite.ts` following the `mcpClientActivityMetrics` table pattern. +Create your metrics table in `src/db/schema.ts` following the `mcpClientActivityMetrics` table pattern. **Critical Requirements**: - Use `bucket_timestamp` (integer, Unix seconds) @@ -115,9 +115,9 @@ Create a service that extends the base `TimeSeriesMetricsService`: ```typescript import { eq, gte, lte, and, sql } from 'drizzle-orm'; -import type { LibSQLDatabase } from 'drizzle-orm/libsql'; +import type { PostgresJsDatabase } from 'drizzle-orm/postgres-js'; import { TimeSeriesMetricsService } from './TimeSeriesMetricsService'; -import { serverInstallMetrics } from '../../db/schema.sqlite'; +import { serverInstallMetrics } from '../../db/schema.ts'; import type { QueryParams, BucketData, @@ -131,9 +131,9 @@ interface ServerInstallBucket extends BucketData { } export class ServerInstallMetricsService extends TimeSeriesMetricsService { - private db: LibSQLDatabase; + private db: PostgresJsDatabase; - constructor(db: LibSQLDatabase) { + constructor(db: PostgresJsDatabase) { super(); this.db = db; } @@ -407,16 +407,6 @@ const results = await this.db .orderBy(metrics.bucket_timestamp); ``` -### Database Driver Compatibility - -Handle both SQLite (`changes`) and Turso (`rowsAffected`): - -```typescript -const deletedCount = (result.changes || result.rowsAffected || 0); -``` - -For more details, see [Database Driver Compatibility](/development/backend/database/#database-driver-compatibility). - ## Common Pitfalls @@ -447,15 +437,6 @@ const results = await db.select({ .groupBy(metrics.bucket_timestamp); ``` -### ❌ Not handling both database drivers -```typescript -// WRONG - Only works with SQLite -const deletedCount = result.changes; - -// CORRECT - Works with both SQLite and Turso -const deletedCount = (result.changes || result.rowsAffected || 0); -``` - ## Related Documentation - [Database Management](/development/backend/database/) - Schema design, migrations, Drizzle ORM diff --git a/development/backend/oauth-providers.mdx b/development/backend/oauth-providers.mdx index 1cf7ac3..d536eb9 100644 --- a/development/backend/oauth-providers.mdx +++ b/development/backend/oauth-providers.mdx @@ -38,7 +38,7 @@ services/backend/src/ │ ├── github-oauth.ts # GitHub settings │ └── [provider]-oauth.ts # New provider settings ├── db/ -│ └── schema.sqlite.ts # User table with provider IDs +│ └── schema.ts # User table with provider IDs └── lib/ └── lucia.ts # Session management ``` @@ -135,7 +135,7 @@ Key considerations: Add provider ID field to `authUser` table: ```typescript -// In src/db/schema.sqlite.ts +// In src/db/schema.ts // Add field like: // google_id: text('google_id').unique() // microsoft_id: text('microsoft_id').unique() diff --git a/development/backend/oauth2-server.mdx b/development/backend/oauth2-server.mdx index 14b177a..742317b 100644 --- a/development/backend/oauth2-server.mdx +++ b/development/backend/oauth2-server.mdx @@ -133,7 +133,7 @@ Implements RFC 7591 Dynamic Client Registration: #### Database Storage - **Table**: `dynamic_oauth_clients` -- **Schema**: See `services/backend/src/db/schema.sqlite.ts` +- **Schema**: See `services/backend/src/db/schema.ts` - **Fields**: client_id, client_name, redirect_uris, grant_types, response_types, scope, token_endpoint_auth_method, client_id_issued_at, expires_at - **Persistence**: Survives server restarts and supports multiple instances @@ -178,7 +178,7 @@ Handles token lifecycle: ### Database Schema #### Dynamic OAuth Clients Table -- **File**: `services/backend/src/db/schema.sqlite.ts` +- **File**: `services/backend/src/db/schema.ts` - **Table**: `dynamic_oauth_clients` - **Migration**: `0006_keen_firestar.sql` - **Purpose**: Persistent storage for dynamically registered MCP clients diff --git a/development/backend/plugins.mdx b/development/backend/plugins.mdx index 9ca5db5..e1dbdf4 100644 --- a/development/backend/plugins.mdx +++ b/development/backend/plugins.mdx @@ -103,24 +103,24 @@ Add basic plugin information: ### 3. Define Database Schema (Optional) -If your plugin requires database tables, create a `schema.ts` file: +If your plugin requires database tables, create a `schema.ts` file using PostgreSQL table definitions: ```typescript -import { sqliteTable, text, integer, sql } from 'drizzle-orm/sqlite-core'; +import { pgTable, text, timestamp } from 'drizzle-orm/pg-core'; // Define your plugin's tables -export const myCustomEntities = sqliteTable('my_custom_entities', { +export const myCustomEntities = pgTable('my_custom_entities', { id: text('id').primaryKey(), name: text('name').notNull(), data: text('data'), - createdAt: integer('created_at', { mode: 'timestamp' }).notNull().default(sql`(strftime('%s', 'now'))`), + created_at: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(), }); // You can define multiple tables if needed -export const myCustomRelations = sqliteTable('my_custom_relations', { +export const myCustomRelations = pgTable('my_custom_relations', { id: text('id').primaryKey(), - entityId: text('entity_id').notNull().references(() => myCustomEntities.id), - relationType: text('relation_type').notNull(), + entity_id: text('entity_id').notNull().references(() => myCustomEntities.id), + relation_type: text('relation_type').notNull(), }); ``` @@ -131,30 +131,17 @@ Create a `routes.ts` file for your API routes: ```typescript import { type PluginRouteManager } from '../../plugin-system/route-manager'; import { type AnyDatabase, getSchema } from '../../db'; -import { type BetterSQLite3Database } from 'drizzle-orm/better-sqlite3'; -import { type NodePgDatabase } from 'drizzle-orm/node-postgres'; -import { type SQLiteTable } from 'drizzle-orm/sqlite-core'; -import { type PgTable } from 'drizzle-orm/pg-core'; import { eq } from 'drizzle-orm'; -// Helper type guard for database type checking -function isSQLiteDB(db: AnyDatabase): db is BetterSQLite3Database { - return typeof (db as BetterSQLite3Database).get === 'function' && - typeof (db as BetterSQLite3Database).all === 'function' && - typeof (db as BetterSQLite3Database).run === 'function'; -} - /** * Register all routes for your custom plugin - * + * * All routes registered here will be automatically namespaced under: * /api/plugin/my-custom-plugin/ */ export async function registerRoutes(routeManager: PluginRouteManager, db: AnyDatabase | null): Promise { - // Note: In actual plugin development, you should receive a logger instance - // For this example, we'll show the pattern you should follow - const logger = routeManager.getLogger(); // Assuming this method exists - + const logger = routeManager.getLogger(); + if (!db) { logger?.warn(`Database not available, skipping routes.`); return; @@ -172,36 +159,21 @@ export async function registerRoutes(routeManager: PluginRouteManager, db: AnyDa // Register GET /entities route // This becomes: GET /api/plugin/my-custom-plugin/entities routeManager.get('/entities', async () => { - if (isSQLiteDB(db)) { - const entities = await db.select().from(table as SQLiteTable).all(); - return { entities }; - } else { - const entities = await (db as NodePgDatabase).select().from(table as PgTable); - return { entities }; - } + const entities = await db.select().from(table); + return { entities }; }); // Register GET /entities/:id route // This becomes: GET /api/plugin/my-custom-plugin/entities/:id routeManager.get('/entities/:id', async (request, reply) => { const { id } = request.params as { id: string }; - let entity; - - if (isSQLiteDB(db)) { - const typedTable = table as SQLiteTable & { id: any }; - entity = await db - .select() - .from(typedTable) - .where(eq(typedTable.id, id)) - .get(); - } else { - const typedTable = table as PgTable & { id: any }; - const rows = await (db as NodePgDatabase) - .select() - .from(typedTable) - .where(eq(typedTable.id, id)); - entity = rows[0] ?? null; - } + + const rows = await db + .select() + .from(table) + .where(eq(table.id, id)); + + const entity = rows[0] ?? null; if (!entity) { return reply.status(404).send({ error: 'Entity not found' }); @@ -225,11 +197,7 @@ export async function registerRoutes(routeManager: PluginRouteManager, db: AnyDa data: body.data || null, }; - if (isSQLiteDB(db)) { - await db.insert(table as SQLiteTable).values(entityData).run(); - } else { - await (db as NodePgDatabase).insert(table as PgTable).values(entityData); - } + await db.insert(table).values(entityData); return { id, ...body }; }); @@ -243,32 +211,21 @@ export async function registerRoutes(routeManager: PluginRouteManager, db: AnyDa Create an `index.ts` file that implements the Plugin interface: ```typescript -import { - type Plugin, +import { + type Plugin, type DatabaseExtension, type PluginRouteManager } from '../../plugin-system/types'; import { type AnyDatabase, getSchema } from '../../db'; -import { type BetterSQLite3Database } from 'drizzle-orm/better-sqlite3'; -import { type NodePgDatabase } from 'drizzle-orm/node-postgres'; -import { type SQLiteTable } from 'drizzle-orm/sqlite-core'; -import { type PgTable } from 'drizzle-orm/pg-core'; import { sql } from 'drizzle-orm'; -// Helper type guard for database type checking -function isSQLiteDB(db: AnyDatabase): db is BetterSQLite3Database { - return typeof (db as BetterSQLite3Database).get === 'function' && - typeof (db as BetterSQLite3Database).all === 'function' && - typeof (db as BetterSQLite3Database).run === 'function'; -} - // Table definitions for this plugin const myCustomPluginTableDefinitions = { 'my_custom_entities': { id: (b: any) => b('id').primaryKey(), name: (b: any) => b('name').notNull(), data: (b: any) => b('data'), - createdAt: (b: any) => b('created_at', { mode: 'timestamp' }).notNull().defaultNow(), + created_at: (b: any) => b('created_at', { mode: 'timestamp' }).notNull().defaultNow(), } }; @@ -281,14 +238,13 @@ class MyCustomPlugin implements Plugin { description: 'Adds custom functionality to DeployStack', author: 'Your Name', }; - + // Database extension (optional - remove if not needed) databaseExtension: DatabaseExtension = { tableDefinitions: myCustomPluginTableDefinitions, - + // Optional initialization function for seeding data onDatabaseInit: async (db: AnyDatabase, logger?: FastifyBaseLogger) => { - // Note: In actual implementation, logger should be passed from PluginManager logger?.info(`Initializing database...`); const currentSchema = getSchema(); @@ -300,19 +256,11 @@ class MyCustomPlugin implements Plugin { return; } - let currentCount = 0; - if (isSQLiteDB(db)) { - const result = await db - .select({ count: sql`count(*)` }) - .from(table as SQLiteTable) - .get(); - currentCount = result?.count ?? 0; - } else { - const rows = await (db as NodePgDatabase) - .select({ count: sql`count(*)` }) - .from(table as PgTable); - currentCount = rows[0]?.count ?? 0; - } + // Check if we need to seed initial data + const rows = await db + .select({ count: sql`count(*)` }) + .from(table); + const currentCount = rows[0]?.count ?? 0; if (currentCount === 0) { logger?.info(`Seeding initial data...`); @@ -322,19 +270,14 @@ class MyCustomPlugin implements Plugin { data: JSON.stringify({ initialized: true }), }; - if (isSQLiteDB(db)) { - await db.insert(table as SQLiteTable).values(dataToSeed).run(); - } else { - await (db as NodePgDatabase).insert(table as PgTable).values(dataToSeed); - } + await db.insert(table).values(dataToSeed); logger?.info(`Seeded initial data`); } }, }; - + // Plugin initialization (non-route initialization only) async initialize(db: AnyDatabase | null, logger?: FastifyBaseLogger) { - // Note: In actual implementation, logger should be passed from PluginManager logger?.info(`Initializing...`); // Non-route initialization only - routes are registered via registerRoutes method logger?.info(`Initialized successfully`); @@ -345,10 +288,9 @@ class MyCustomPlugin implements Plugin { const { registerRoutes } = await import('./routes'); await registerRoutes(routeManager, db); } - + // Optional shutdown method for cleanup async shutdown(logger?: FastifyBaseLogger) { - // Note: In actual implementation, logger should be passed from PluginManager logger?.info(`Shutting down...`); // Perform any cleanup needed } @@ -395,8 +337,11 @@ const myPluginTableDefinitions = { **Important Notes:** - Use `created_at` (snake_case) for database column names, not `createdAt` (camelCase) -- Timestamp columns with `{ mode: 'timestamp' }` automatically get `DEFAULT (strftime('%s', 'now'))` -- Column types are auto-detected: `id`/`count` → INTEGER, `*_at`/`*date` → INTEGER (timestamp), others → TEXT +- Timestamp columns with `{ mode: 'timestamp' }` automatically get `TIMESTAMP WITH TIME ZONE DEFAULT NOW()` +- Column types are auto-detected and converted for PostgreSQL: + - `id`/`count` → INTEGER + - `*_at`/`*date` → TIMESTAMP WITH TIME ZONE + - Others → TEXT - Tables are prefixed with your plugin ID: `my-plugin_my_entities` ### API Routes @@ -513,13 +458,13 @@ To test your plugin: Your plugin can access configuration provided by the plugin manager: ```typescript -async initialize(app: FastifyInstance, db: BetterSQLite3Database) { +async initialize(app: FastifyInstance, db: AnyDatabase) { // Access plugin-specific configuration const config = app.pluginManager.getPluginConfig(this.meta.id); - + // Use configuration values const apiKey = config?.apiKey as string; - + // Initialize with configuration } ``` @@ -613,8 +558,8 @@ Plugins can contribute their own global settings to the DeployStack system. Thes ```typescript // In your plugin's index.ts -import { - type Plugin, +import { + type Plugin, type GlobalSettingsExtension, // ... other imports } from '../../plugin-system/types'; @@ -682,9 +627,8 @@ class MyAwesomePlugin implements Plugin { // ... rest of your plugin implementation (databaseExtension, initialize, etc.) async initialize(app: FastifyInstance, db: AnyDatabase | null, logger?: FastifyBaseLogger) { - // Note: In actual implementation, logger should be passed from PluginManager logger?.info(`Initializing...`); - + // You can try to access your plugin's settings here if needed during init, // using GlobalSettingsService.get('myAwesomePlugin.features.enableSuperFeature') // Note: Ensure GlobalSettingsService is available or handle potential errors. diff --git a/development/backend/satellite/communication.mdx b/development/backend/satellite/communication.mdx index ad28cb6..909a34c 100644 --- a/development/backend/satellite/communication.mdx +++ b/development/backend/satellite/communication.mdx @@ -252,7 +252,7 @@ Configuration respects team boundaries and isolation: ### Core Table Structure -The satellite system integrates with existing DeployStack schema through 5 specialized tables. For detailed schema definitions, see [`services/backend/src/db/schema.sqlite.ts`](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.sqlite.ts). +The satellite system integrates with existing DeployStack schema through 5 specialized tables. For detailed schema definitions, see [`services/backend/src/db/schema.ts`](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.ts). **Satellite Registry** (`satellites`): - Central registration of all satellites @@ -412,7 +412,7 @@ server.get('/satellites/:satelliteId/commands', { The satellite system extends the existing database schema with 5 specialized tables: -**Schema Location**: `services/backend/src/db/schema.sqlite.ts` +**Schema Location**: `services/backend/src/db/schema.ts` **Table Relationships**: - `satellites` table links to existing `teams` and `authUser` tables @@ -479,11 +479,11 @@ npm run dev # Starts on http://localhost:3001 **Database Inspection**: ```bash # View registered satellites -sqlite3 services/backend/persistent_data/database/deploystack.db +psql deploystack > SELECT id, name, satellite_type, status FROM satellites; # View MCP server installations -> SELECT installation_name, team_id FROM mcpServerInstallations; +> SELECT installation_name, team_id FROM "mcpServerInstallations"; ``` ## API Documentation diff --git a/development/backend/satellite/events.mdx b/development/backend/satellite/events.mdx index ec35223..bed0292 100644 --- a/development/backend/satellite/events.mdx +++ b/development/backend/satellite/events.mdx @@ -79,7 +79,7 @@ export interface EventHandler { handle: ( satelliteId: string, eventData: Record, - db: LibSQLDatabase, + db: PostgresJsDatabase, eventTimestamp: Date ) => Promise; } @@ -208,8 +208,8 @@ Inserts record into `satelliteUsageLogs` for analytics and audit trails. Create a new file in `services/backend/src/events/satellite/`: ```typescript -import type { LibSQLDatabase } from 'drizzle-orm/libsql'; -import { yourTable } from '../../db/schema.sqlite'; +import type { PostgresJsDatabase } from 'drizzle-orm/postgres-js'; +import { yourTable } from '../../db/schema.ts'; import { eq } from 'drizzle-orm'; export const EVENT_TYPE = 'your.event.type'; @@ -240,7 +240,7 @@ interface YourEventData { export async function handle( satelliteId: string, eventData: Record, - db: LibSQLDatabase, + db: PostgresJsDatabase, eventTimestamp: Date ): Promise { const data = eventData as unknown as YourEventData; @@ -320,17 +320,6 @@ Each event is processed in a separate database transaction: - Maintains data consistency per event - Isolated error handling prevents cascade failures -### Database Driver Compatibility - -When updating records, use the driver-compatible pattern: - -```typescript -const result = await db.update(table).set(data).where(condition); - -// Handle both SQLite (changes) and Turso (rowsAffected) -const updated = (result.changes || result.rowsAffected || 0) > 0; -``` - ## Performance Considerations ### Batch Processing Efficiency @@ -524,7 +513,7 @@ LIMIT 10; **DO**: - Use parameterized queries via Drizzle ORM -- Handle both SQLite and Turso driver differences +- Use PostgreSQL-specific features when needed - Include timestamps for all state changes - Use transactions for multi-step operations - Index frequently queried fields diff --git a/development/backend/test.mdx b/development/backend/test.mdx index 53f9ad9..f40cdb0 100644 --- a/development/backend/test.mdx +++ b/development/backend/test.mdx @@ -85,8 +85,8 @@ The test suite uses a sophisticated database isolation strategy to ensure comple ### Timestamp-Based Isolation -Each test run creates a unique SQLite database file with a millisecond timestamp: -- Example: `deploystack-1704369600000.db` +Each test run creates a unique PostgreSQL database with a millisecond timestamp: +- Example: `deploystack-1704369600000` - This ensures complete isolation between parallel test runs - No conflicts when multiple developers run tests simultaneously - Automatic cleanup through directory removal @@ -173,10 +173,10 @@ When adding new E2E tests: - **Purpose**: Verifies the initial database setup functionality. - **Key Checks**: - Ensures the test database directory does not exist before setup. - - Calls `POST /api/db/setup` with `{"type": "sqlite"}`. - - Verifies the API response indicates successful setup initiation and includes `database_type: "sqlite"`. - - Checks that the SQLite database file is created in the test database directory (`persistent_data/database-test/deploystack-{timestamp}.db`). - - Calls `GET /api/db/status` and verifies the response shows `configured: true`, `initialized: true`, and `dialect: "sqlite"`. + - Calls `POST /api/db/setup` with `{"type": "postgresql"}`. + - Verifies the API response indicates successful setup initiation and includes `database_type: "postgresql"`. + - Checks that the PostgreSQL test database is created. + - Calls `GET /api/db/status` and verifies the response shows `configured: true`, `initialized: true`, and `dialect: "postgresql"`. - Validates global settings initialization without errors. - Confirms all migrations are applied successfully. - Tests proper error handling for duplicate setup attempts. diff --git a/development/backend/user-preferences-system.mdx b/development/backend/user-preferences-system.mdx index 7e23158..cc1a172 100644 --- a/development/backend/user-preferences-system.mdx +++ b/development/backend/user-preferences-system.mdx @@ -268,7 +268,7 @@ If you need to rename or remove preferences: - [API Security](/development/backend/api/security) - Security patterns and authorization - [Role Management](/development/backend/roles) - Permission system details -- [Database Schema](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.sqlite.ts) - Complete database schema reference +- [Database Schema](https://github.com/deploystackio/deploystack/blob/main/services/backend/src/db/schema.ts) - Complete database schema reference ## Key Benefits diff --git a/development/index.mdx b/development/index.mdx index 386303a..a67af86 100644 --- a/development/index.mdx +++ b/development/index.mdx @@ -123,7 +123,7 @@ deploystack/ ### Backend Stack - **Fastify** for high-performance cloud control plane - **TypeScript** with full type safety -- **Drizzle ORM** supporting SQLite and PostgreSQL +- **Drizzle ORM** with PostgreSQL - **Plugin System** with isolated routes (`/api/plugin//`) - **Role-Based Access Control** with session management diff --git a/development/satellite/backend-communication.mdx b/development/satellite/backend-communication.mdx index 2735103..90577ed 100644 --- a/development/satellite/backend-communication.mdx +++ b/development/satellite/backend-communication.mdx @@ -278,7 +278,7 @@ The Backend maintains satellite state in five tables: - `satelliteUsageLogs` - Usage analytics and audit - `satelliteHeartbeats` - Health monitoring data -See `services/backend/src/db/schema.sqlite.ts` for complete schema definitions. +See `services/backend/src/db/schema.ts` for complete schema definitions. ## Security Implementation diff --git a/development/satellite/registration.mdx b/development/satellite/registration.mdx index 7331a11..ea48907 100644 --- a/development/satellite/registration.mdx +++ b/development/satellite/registration.mdx @@ -253,7 +253,7 @@ The Backend maintains satellite state across five database tables: - **satelliteUsageLogs**: Usage analytics and audit trails - **satelliteHeartbeats**: Health monitoring and status updates -See `services/backend/src/db/schema.sqlite.ts` for complete schema definitions. +See `services/backend/src/db/schema.ts` for complete schema definitions. ### Registration Database Operations diff --git a/docs.json b/docs.json index 0114c5f..af0ad0b 100644 --- a/docs.json +++ b/docs.json @@ -32,7 +32,8 @@ "/general/mcp-catalog", "/general/mcp-installation", "/general/mcp-categories", - "/general/mcp-admin-schema-workflow" + "/general/mcp-admin-schema-workflow", + "/general/mcp-oauth" ] }, { @@ -142,8 +143,7 @@ "group": "Database", "pages": [ "/development/backend/database/index", - "/development/backend/database/sqlite", - "/development/backend/database/turso" + "/development/backend/database/postgresql" ] }, { diff --git a/general/local-setup.mdx b/general/local-setup.mdx index 7c53130..5906e32 100644 --- a/general/local-setup.mdx +++ b/general/local-setup.mdx @@ -18,7 +18,7 @@ This guide is for contributors and developers who want to run DeployStack locall # - Git: Version control system # - Node.js v18+: JavaScript runtime (v18 or higher required) # - npm v8+: Package manager (comes with Node.js) -# - Docker: For running databases (optional but recommended) +# - Docker: For running PostgreSQL database # Verify Installation git --version @@ -110,6 +110,14 @@ DEPLOYSTACK_ENCRYPTION_SECRET=your-32-character-secret-here # Frontend URL (for CORS and redirects) DEPLOYSTACK_FRONTEND_URL=http://localhost:5173 +# PostgreSQL Configuration (matches postgres:local defaults) +POSTGRES_HOST=localhost +POSTGRES_PORT=5432 +POSTGRES_DATABASE=deploystack +POSTGRES_USER=deploystack +POSTGRES_PASSWORD=deploystack +POSTGRES_SSL=false + # Development settings NODE_ENV=development PORT=3000 @@ -155,34 +163,38 @@ node -e "console.log(require('crypto').randomBytes(16).toString('hex'))" ``` -## Step 4: Set Up Database (Optional) - -DeployStack uses SQLite by default for development, but you can optionally set up PostgreSQL: +## Step 4: Start PostgreSQL Database - -```text SQLite (Default) -No additional setup required. DeployStack will create a SQLite database automatically in services/backend/persistent_data/. +DeployStack uses PostgreSQL as its database backend. For local development, we provide a convenient script to start PostgreSQL in Docker: -The database file will be created on first run: -services/backend/persistent_data/database/deploystack.db +```bash +# Start PostgreSQL 18 in Docker +npm run postgres:local ``` -```bash PostgreSQL (Optional) -# If you prefer PostgreSQL for development: +This command will: +- Pull PostgreSQL 18 Docker image +- Create a local PostgreSQL container with default credentials: + - **Host**: localhost + - **Port**: 5432 + - **Database**: deploystack + - **User**: deploystack + - **Password**: deploystack +- Create a persistent volume for database data + + + The PostgreSQL container will persist data between restarts. To reset the database, remove the `postgres_data` volume: `docker volume rm postgres_data` + + +### Verify PostgreSQL is Running -# Start PostgreSQL with Docker -docker run -d \ - --name deploystack-postgres \ - -e POSTGRES_DB=deploystack \ - -e POSTGRES_USER=deploystack \ - -e POSTGRES_PASSWORD=deploystack \ - -p 5432:5432 \ - postgres:16 +```bash +# Check if PostgreSQL container is running +docker ps | grep postgres-local -# Update your services/backend/.env: -DATABASE_URL=postgresql://deploystack:deploystack@localhost:5432/deploystack +# Test connection +psql -h localhost -U deploystack -d deploystack -c "SELECT version();" ``` - ## Step 5: Running the Development Servers @@ -244,13 +256,16 @@ Once both services are running: curl http://localhost:5173 # Frontend dev server ``` - + Open [http://localhost:5173](http://localhost:5173) in your browser. You should see the DeployStack interface. - - - Follow the on-screen setup wizard to create your first admin user and configure basic settings. + + + Follow the on-screen setup wizard to: + - Configure PostgreSQL database connection + - Create your first admin user + - Set up basic platform settings @@ -268,6 +283,9 @@ Both services support hot reloading: From the project root: ```bash +# Database +npm run postgres:local # Start PostgreSQL in Docker + # Development npm run dev:frontend # Start frontend dev server npm run dev:backend # Start backend dev server @@ -281,6 +299,9 @@ npm run lint:frontend # Lint frontend code npm run lint:backend # Lint backend code npm run lint:md # Lint markdown files +# Database Migrations +npm run db:generate # Generate new migrations + # Testing npm run test:backend:unit # Run backend unit tests npm run test:backend:e2e # Run backend e2e tests @@ -304,7 +325,7 @@ deploystack/ │ ├── backend/ # Fastify backend API │ │ ├── src/ # Source code │ │ ├── tests/ # Test files -│ │ ├── persistent_data/ # SQLite database and uploads +│ │ ├── persistent_data/ # Database and application data │ │ ├── package.json # Backend dependencies │ │ └── tsconfig.json # TypeScript configuration │ └── shared/ # Shared utilities and types @@ -323,6 +344,7 @@ deploystack/ # Check what's using the port lsof -i :3000 # Backend port lsof -i :5173 # Frontend port +lsof -i :5432 # PostgreSQL port # Kill process using the port kill -9 @@ -355,17 +377,24 @@ Run your terminal as Administrator or ensure you have write permissions to the p ``` -#### Database Connection Issues +#### PostgreSQL Connection Issues ```bash -# Check if database directory exists -ls -la services/backend/persistent_data/ +# Check if PostgreSQL container is running +docker ps | grep postgres-local -# Create directory if missing -mkdir -p services/backend/persistent_data/database +# View PostgreSQL logs +docker logs postgres-local -# Check database file permissions -ls -la services/backend/persistent_data/database/ +# Restart PostgreSQL +docker stop postgres-local +npm run postgres:local + +# Reset PostgreSQL (removes all data) +docker stop postgres-local +docker rm postgres-local +docker volume rm postgres_data +npm run postgres:local ``` #### Environment Variable Issues @@ -377,6 +406,9 @@ ls -la services/frontend/.env # Check if encryption secret is set grep DEPLOYSTACK_ENCRYPTION_SECRET services/backend/.env + +# Check PostgreSQL configuration +grep POSTGRES services/backend/.env ``` ### Getting Help diff --git a/self-hosted/database-setup.mdx b/self-hosted/database-setup.mdx index 1bd74b4..5198c32 100644 --- a/self-hosted/database-setup.mdx +++ b/self-hosted/database-setup.mdx @@ -1,6 +1,6 @@ --- title: Database Setup for Self-Hosting -description: Step-by-step guide to configure your database when self-hosting DeployStack - designed for non-technical users. +description: Step-by-step guide to configure PostgreSQL for your self-hosted DeployStack instance. Sidebar: Database Setup Icon: Database --- @@ -8,161 +8,361 @@ Icon: Database ## Overview -When you first start your self-hosted DeployStack instance, you'll need to choose and configure a database. This guide will walk you through the process step-by-step. +DeployStack uses PostgreSQL as its database backend, providing enterprise-grade reliability with ACID compliance, connection pooling, and advanced features for production deployments. -**Important**: This setup only needs to be done once when you first install DeployStack. +**Important**: PostgreSQL must be running and accessible before starting your DeployStack instance. ## What You'll Need -- Your DeployStack instance running (backend and frontend) -- Access to your server's environment variables (if choosing cloud databases) -- About 5-10 minutes to complete the setup +- PostgreSQL 13+ installed and running (or included in Docker Compose) +- Database connection details (host, port, username, password) +- About 5-10 minutes to complete the configuration -## Step 1: Access the Setup Page +## Deployment Options -1. **Start your DeployStack instance** following your installation guide -2. **Open your web browser** and navigate to your DeployStack URL -3. **You'll be automatically redirected** to the setup page at `/setup` +### Option 1: Docker Compose (Recommended) -If you see a message like "Database setup required" or are redirected to a setup page, you're in the right place! +If you're using our Docker Compose setup, PostgreSQL is included and automatically configured. No manual database setup required! -## Step 2: Choose Your Database +```bash +# PostgreSQL is automatically included +docker-compose up -d +``` -You'll see two database options. Here's what each one means: +The Docker Compose setup includes: +- PostgreSQL 18 Alpine +- Automatic health checks +- Persistent data volume +- Pre-configured connection details -### Option 1: SQLite (Recommended for Most Users) -- **Best for**: Small to medium teams, development, testing -- **Pros**: - - No additional setup required - - Works immediately - - No external dependencies - - Perfect for getting started -- **Cons**: - - Single server only (no clustering) - - Limited to one database file +### Option 2: External PostgreSQL Server -**Choose this if**: You're just getting started, have a small team, or want the simplest setup. +For production deployments with existing PostgreSQL infrastructure: -### Option 2: Turso (For Advanced Users) -- **Best for**: Advanced users needing distributed databases -- **Pros**: - - Multi-region replication - - Advanced SQLite features - - Good performance -- **Cons**: - - Requires Turso account - - More complex setup +## Step 1: Prepare PostgreSQL Database -**Choose this if**: You need advanced database features or multi-region deployment. +### Create Database and User -## Step 3: Configure Your Chosen Database +Connect to your PostgreSQL server and create a dedicated database and user: -### If You Chose SQLite (Easiest) +```sql +-- Connect to PostgreSQL as admin +psql -U postgres -1. **Select "SQLite"** from the options -2. **Click "Setup Database"** -3. **Wait for confirmation** (usually takes 10-30 seconds) -4. **Done!** You'll be redirected to the main application +-- Create database +CREATE DATABASE deploystack; -No additional configuration needed - SQLite works out of the box! +-- Create user with password +CREATE USER deploystack_user WITH ENCRYPTED PASSWORD 'your_secure_password_here'; -### If You Chose Turso +-- Grant privileges +GRANT ALL PRIVILEGES ON DATABASE deploystack TO deploystack_user; -Before you can use Turso, you need to set up environment variables: +-- Grant schema privileges (PostgreSQL 15+) +\c deploystack +GRANT ALL ON SCHEMA public TO deploystack_user; +GRANT ALL ON ALL TABLES IN SCHEMA public TO deploystack_user; +GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO deploystack_user; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO deploystack_user; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO deploystack_user; -#### Prerequisites -1. **Create a Turso account** at [turso.tech](https://turso.tech) -2. **Install Turso CLI** and create a database: - ```bash - turso db create deploystack-db +-- Exit +\q +``` + +### Verify Connection + +Test the database connection: + +```bash +# Test connection +psql -h localhost -U deploystack_user -d deploystack -c "SELECT version();" + +# You should see PostgreSQL version information +``` + +## Step 2: Configure Environment Variables + +Set PostgreSQL connection details in your environment: + +### For Docker Deployments + +Add to your `.env` file: + +```bash +# PostgreSQL Configuration +POSTGRES_HOST=your-postgres-host # e.g., localhost or postgres.example.com +POSTGRES_PORT=5432 # Default PostgreSQL port +POSTGRES_DATABASE=deploystack # Database name +POSTGRES_USER=deploystack_user # Database user +POSTGRES_PASSWORD=your_secure_password_here +POSTGRES_SSL=false # Set to 'true' for SSL connections +``` + +### For Local Development + +Edit `services/backend/.env`: + +```bash +# PostgreSQL Configuration +POSTGRES_HOST=localhost +POSTGRES_PORT=5432 +POSTGRES_DATABASE=deploystack +POSTGRES_USER=deploystack +POSTGRES_PASSWORD=deploystack +POSTGRES_SSL=false +``` + +## Step 3: Start DeployStack + +Once PostgreSQL is configured, start your DeployStack instance: + +### Docker Compose + +```bash +docker-compose up -d +``` + +### Individual Containers + +```bash +# Start backend with PostgreSQL configuration +docker run -d \ + --name deploystack-backend \ + -p 3000:3000 \ + -e POSTGRES_HOST=your-postgres-host \ + -e POSTGRES_PORT=5432 \ + -e POSTGRES_DATABASE=deploystack \ + -e POSTGRES_USER=deploystack_user \ + -e POSTGRES_PASSWORD=your_secure_password_here \ + -e POSTGRES_SSL=false \ + -e DEPLOYSTACK_ENCRYPTION_SECRET=your-secret-here \ + -v deploystack_backend_persistent:/app/persistent_data \ + deploystack/backend:latest +``` + +## Step 4: Complete Setup Wizard + +1. **Access DeployStack**: Navigate to your frontend URL (e.g., `http://localhost:8080`) +2. **Automatic Redirect**: You'll be redirected to `/setup` +3. **Database Initialization**: The wizard will: + - Test PostgreSQL connection + - Apply database migrations + - Create necessary tables + - Initialize system data +4. **Create Admin Account**: Set up your administrator account +5. **Configuration**: Complete basic platform settings + +## SSL/TLS Connection + +For secure connections to PostgreSQL: + +### Enable SSL in PostgreSQL + +1. **Configure PostgreSQL** (`postgresql.conf`): + ```conf + ssl = on + ssl_cert_file = '/path/to/server.crt' + ssl_key_file = '/path/to/server.key' + ssl_ca_file = '/path/to/root.crt' ``` -3. **Get your database URL and auth token**: + +2. **Set Environment Variable**: ```bash - turso db show deploystack-db - turso db tokens create deploystack-db + POSTGRES_SSL=true ``` -#### Server Configuration -Add these environment variables to your server: +3. **Restart PostgreSQL** and DeployStack backend + +## Production Considerations + +### Connection Pooling + +DeployStack uses `node-postgres` with connection pooling: + +- Default max connections: 20 +- Idle timeout: 30 seconds +- Connection timeout: 2 seconds + +### Database Maintenance + +```bash +# Vacuum database (reclaim storage) +psql -U deploystack_user -d deploystack -c "VACUUM ANALYZE;" + +# Check database size +psql -U deploystack_user -d deploystack -c "SELECT pg_size_pretty(pg_database_size('deploystack'));" + +# View active connections +psql -U deploystack_user -d deploystack -c "SELECT count(*) FROM pg_stat_activity WHERE datname = 'deploystack';" +``` + +### Backup Strategy ```bash -TURSO_DATABASE_URL=libsql://your-database-url -TURSO_AUTH_TOKEN=your_auth_token_here +# Create backup +pg_dump -h localhost -U deploystack_user deploystack > backup.sql + +# Compressed backup +pg_dump -h localhost -U deploystack_user deploystack | gzip > backup.sql.gz + +# Custom format (supports parallel restore) +pg_dump -h localhost -U deploystack_user -Fc deploystack > backup.dump + +# Restore from backup +psql -h localhost -U deploystack_user deploystack < backup.sql + +# Restore from custom format +pg_restore -h localhost -U deploystack_user -d deploystack backup.dump ``` -#### Complete Setup -1. **Restart your DeployStack instance** after setting the environment variables -2. **Go back to the setup page** (`/setup`) -3. **Select "Turso"** -4. **Click "Setup Database"** -5. **Wait for confirmation** +### Performance Tuning -## Step 4: Verify Setup +Edit PostgreSQL configuration (`postgresql.conf`): -After successful setup, you should: +```conf +# Memory settings +shared_buffers = 256MB # 25% of RAM +effective_cache_size = 1GB # 50-75% of RAM +maintenance_work_mem = 64MB +work_mem = 16MB -1. **See a success message** confirming database initialization -2. **Be redirected to the main application** -3. **Be able to create your first user account** +# Connections +max_connections = 100 -If you see any errors, check the troubleshooting section below. +# Write-ahead log +wal_buffers = 16MB +checkpoint_completion_target = 0.9 + +# Query planner +random_page_cost = 1.1 # For SSD storage +effective_io_concurrency = 200 # For SSD storage +``` ## Troubleshooting -### "Database setup has already been performed" -- This means your database is already configured -- You can proceed to use the application normally -- If you need to change databases, contact your system administrator +### "Connection refused" or "Cannot connect" + +**Solutions**: +1. **Check PostgreSQL is running**: + ```bash + # For system service + sudo systemctl status postgresql + + # For Docker + docker ps | grep postgres + ``` + +2. **Check PostgreSQL is listening**: + ```bash + netstat -an | grep 5432 + ``` + +3. **Check PostgreSQL configuration** (`postgresql.conf`): + ```conf + listen_addresses = '*' # Or specific IP + ``` + +4. **Check firewall rules**: + ```bash + # Allow PostgreSQL port + sudo ufw allow 5432 + ``` + +### "Authentication failed" + +**Solutions**: +1. **Verify credentials**: Double-check username and password +2. **Check pg_hba.conf**: + ```conf + # Allow password authentication + host all all 0.0.0.0/0 md5 + ``` +3. **Reload PostgreSQL** after config changes: + ```bash + sudo systemctl reload postgresql + ``` + +### "Database does not exist" + +**Solutions**: +1. **Create database** as shown in Step 1 +2. **Check database name** matches environment variable +3. **Verify user has access**: + ```sql + \l -- List all databases + ``` -### "Configuration incomplete" or "Missing environment variables" -- **For Turso**: Check that both Turso environment variables are set correctly -- **Restart your server** after setting environment variables +### "Permission denied" -### "Failed to connect" or "Network error" -- **Check your internet connection** -- **For Turso**: Verify your database URL and auth token are correct -- **Check server logs** for more detailed error messages +**Solutions**: +1. **Grant proper privileges** as shown in Step 1 +2. **Check user permissions**: + ```sql + \du -- List user permissions + ``` + +### Migration Errors -### Setup page keeps loading -- **Check that your backend server is running** -- **Verify the backend is accessible** from your browser -- **Check browser console** for any JavaScript errors +**Solutions**: +1. **Check PostgreSQL version**: DeployStack requires PostgreSQL 13+ +2. **Verify user privileges**: User needs CREATE, ALTER, DROP permissions +3. **Check logs**: Review backend logs for detailed error messages +4. **Manual migration reset** (development only): + ```sql + -- Connect to database + psql -U deploystack_user -d deploystack -## Changing Databases Later + -- Drop all tables + DROP SCHEMA public CASCADE; + CREATE SCHEMA public; + GRANT ALL ON SCHEMA public TO deploystack_user; -**Important**: Once you've set up a database, changing to a different type requires: + -- Restart backend to re-apply migrations + ``` -1. **Backing up your data** (if you have important information) -2. **Stopping your DeployStack instance** -3. **Removing the database selection file** (`persistent_data/db.selection.json`) -4. **Updating environment variables** for the new database type -5. **Restarting and going through setup again** +## Monitoring -**Note**: This will reset your application data, so make sure to backup anything important first. +### Check Database Health -## Getting Help +```sql +-- Check active connections +SELECT count(*) FROM pg_stat_activity WHERE datname = 'deploystack'; -If you're having trouble with database setup: +-- Check table sizes +SELECT schemaname, tablename, pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) +FROM pg_tables +WHERE schemaname = 'public' +ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC; -1. **Check the server logs** for detailed error messages -2. **Verify environment variables** are set correctly -3. **Ensure your server has internet access** (for cloud databases) -4. **Contact support** with your error messages and setup details +-- Check index usage +SELECT schemaname, tablename, indexname, idx_scan as scans +FROM pg_stat_user_indexes +ORDER BY idx_scan DESC; + +-- Check slow queries +SELECT pid, now() - query_start as duration, query +FROM pg_stat_activity +WHERE state = 'active' AND now() - query_start > interval '1 second' +ORDER BY duration DESC; +``` ## Security Notes -- **Keep your API tokens secure** - never share them publicly -- **Use environment variables** - don't put credentials directly in code -- **Regularly rotate API tokens** for cloud databases -- **Backup your SQLite database file** if using SQLite +- **Use strong passwords** for database users +- **Enable SSL/TLS** for production deployments +- **Restrict network access** using pg_hba.conf +- **Regular backups** are essential for data protection +- **Rotate passwords** periodically +- **Monitor access logs** for suspicious activity ## Next Steps After successful database setup: -1. **Create your administrator account** -2. **Configure your application settings** -3. **Set up user authentication** (email, GitHub, etc.) -4. **Invite your team members** +1. **Complete Setup Wizard** - Create your admin account +2. **Configure Global Settings** - Set up email, authentication, etc. +3. **Deploy Satellites** - Set up MCP server management infrastructure +4. **Create Teams** - Invite team members and set up workspaces Your DeployStack instance is now ready to use! diff --git a/self-hosted/setup.mdx b/self-hosted/setup.mdx index 5a75cf6..2bfc1f3 100644 --- a/self-hosted/setup.mdx +++ b/self-hosted/setup.mdx @@ -25,12 +25,13 @@ Configure your self-hosted DeployStack instance with essential settings to custo If this is a fresh installation, first visit `https:///setup` to complete the database initialization wizard. This creates: **For Docker deployments:** - - Database configuration stored in the Docker volume `deploystack_backend_persistent` + - PostgreSQL database configuration stored in the Docker volume `deploystack_backend_persistent` + - PostgreSQL data stored in `deploystack_postgres_data` volume - Access the setup wizard at `http://localhost:8080/setup` (or your configured frontend URL) - + **For local development:** - - `services/backend/persistent_data/db.selection.json` (database type configuration) - - `services/backend/persistent_data/database/deploystack.db` (if using SQLite) + - PostgreSQL connection configured via environment variables in `services/backend/.env` + - `services/backend/persistent_data/db.selection.json` (database initialization status) @@ -185,8 +186,8 @@ Follow this recommended setup workflow for new DeployStack instances: - Navigate to `https:///setup` (Docker: `http://localhost:8080/setup` by default) - - Complete the database setup wizard (SQLite or Turso) - - This initializes the database and saves configuration + - Complete the database setup wizard (PostgreSQL) + - This initializes the PostgreSQL database and applies migrations - Create your admin account - Log in to the platform @@ -371,9 +372,9 @@ docker run --rm -v deploystack_backend_persistent:/data \ tar xzf /backup/deploystack-backup-20250108.tar.gz -C / # The volume contains: -# - database/deploystack.db - SQLite database (if using SQLite) -# - db.selection.json - Database type configuration +# - db.selection.json - Database initialization status # - Any other persistent application data +# Note: PostgreSQL data is stored separately in deploystack_postgres_data volume ``` ```bash Local Development @@ -389,9 +390,9 @@ tar czf deploystack-backup-$(date +%Y%m%d).tar.gz \ tar xzf deploystack-backup-20250108.tar.gz # The directory contains: -# - database/deploystack.db - SQLite database (if using SQLite) -# - db.selection.json - Database type configuration +# - db.selection.json - Database initialization status # - Any other persistent application data +# Note: PostgreSQL runs separately via Docker (npm run postgres:local) ```