diff --git a/.github/workflows/branch-preview.yaml b/.github/workflows/branch-preview.yaml new file mode 100644 index 00000000..30fc5da7 --- /dev/null +++ b/.github/workflows/branch-preview.yaml @@ -0,0 +1,430 @@ +name: Build and publish preview version +on: + workflow_dispatch: + +# Cancel previous runs for the same branch when a new one is triggered +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: write + +jobs: + prepare: + name: Prepare preview build + runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.version }} + steps: + - uses: actions/checkout@v4 + - name: Get next version + uses: reecetech/version-increment@2023.10.2 + id: base-version + with: + scheme: "calver" + increment: "patch" + use_api: "false" + - name: Build preview version string + id: version + run: | + SHORT_SHA=$(git rev-parse --short=7 HEAD) + VERSION="${{ steps.base-version.outputs.version }}-preview.${SHORT_SHA}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + + build-setup: + name: Build Setup (clean, tools, schemas, lint, fmt) + runs-on: blacksmith-8vcpu-ubuntu-2204 + needs: prepare + outputs: + cicd-bot-telegram-token: ${{ steps.telegram-secrets.outputs.cicd-bot-telegram-token }} + cicd-bot-telegram-chat-id: ${{ steps.telegram-secrets.outputs.cicd-bot-telegram-chat-id }} + steps: + - uses: actions/checkout@v4 + - uses: fregante/setup-git-user@v2 + - name: Set up Go with Blacksmith caching + uses: useblacksmith/setup-go@v6 + with: + go-version: '1.25' + - name: install sc tool (latest release) + shell: bash + run: |- + curl -s "https://dist.simple-container.com/sc.sh" | bash + - name: prepare secrets for build + run: | + cat << EOF > ./.sc/cfg.default.yaml + ${{ secrets.SC_CONFIG }} + EOF + cat << EOF > ./.sc/cfg.test.yaml + ${{ secrets.SC_CONFIG }} + EOF + sc secrets reveal + - name: get openai key + id: get-openai-key + run: | + echo "openai-key=$(sc stack secret-get -s dist openai-api-key 2>/dev/null || echo '')" >> $GITHUB_OUTPUT + - name: prepare sc tool (rebuild) + shell: bash + env: + OPENAI_API_KEY: ${{ steps.get-openai-key.outputs.openai-key }} + SKIP_EMBEDDINGS: "true" + run: |- + git remote set-url origin https://${{ secrets.GITHUB_TOKEN }}@github.com/simple-container-com/api.git + bash <(curl -Ls "https://welder.simple-container.com/welder.sh") run rebuild + - name: clean + run: | + mkdir -p dist + rm -fR dist/* + mkdir -p .sc/stacks/dist/bundle + rm -fR .sc/stacks/dist/bundle/* + mkdir -p docs/site + rm -fR docs/site/* + mkdir -p docs/schemas + rm -fR docs/schemas/* + - name: tools + run: | + cat tools.go | grep _ | awk -F'"' '{print $2}' | xargs -tI % go get % + go mod download + go generate -tags tools + go mod tidy + - name: generate-schemas + run: | + echo "Generating JSON Schema files for Simple Container resources..." + go build -o bin/schema-gen ./cmd/schema-gen + bin/schema-gen docs/schemas + echo "Successfully generated JSON Schema files in docs/schemas/" + - name: fmt + run: | + go mod tidy + bin/gofumpt -l -w ./ + bin/golangci-lint run --fix --timeout 3m -v + - name: get telegram secrets + id: telegram-secrets + run: | + echo "cicd-bot-telegram-token=$(./bin/sc stack secret-get -s dist cicd-bot-telegram-token)" >> $GITHUB_OUTPUT + echo "cicd-bot-telegram-chat-id=$(./bin/sc stack secret-get -s dist cicd-bot-telegram-chat-id)" >> $GITHUB_OUTPUT + - name: upload bin directory artifacts + uses: actions/upload-artifact@v4 + with: + name: bin-tools + path: bin + retention-days: 1 + + build-platforms: + name: Build sc for ${{ matrix.os }}/${{ matrix.arch }} + runs-on: blacksmith-8vcpu-ubuntu-2204 + needs: [prepare, build-setup] + strategy: + matrix: + include: + - os: linux + arch: amd64 + - os: darwin + arch: arm64 + - os: darwin + arch: amd64 + steps: + - uses: actions/checkout@v4 + - name: Set up Go with Blacksmith caching + uses: useblacksmith/setup-go@v6 + with: + go-version: '1.25' + - name: create build directories + run: | + mkdir -p dist + mkdir -p .sc/stacks/dist/bundle + - name: build sc for ${{ matrix.os }}/${{ matrix.arch }} + env: + GOOS: ${{ matrix.os }} + GOARCH: ${{ matrix.arch }} + CGO_ENABLED: "0" + VERSION: ${{ needs.prepare.outputs.version }} + run: | + echo "Building for ${GOOS}/${GOARCH}..." + if [ "${GOOS}" = "windows" ]; then export EXT=".exe"; else export EXT=""; fi + go build -ldflags "-s -w -X=github.com/simple-container-com/api/internal/build.Version=${VERSION}" -o dist/${GOOS}-${GOARCH}/sc${EXT} ./cmd/sc + tar -czf .sc/stacks/dist/bundle/sc-${GOOS}-${GOARCH}.tar.gz -C dist/${GOOS}-${GOARCH} sc${EXT} + cp .sc/stacks/dist/bundle/sc-${GOOS}-${GOARCH}.tar.gz .sc/stacks/dist/bundle/sc-${GOOS}-${GOARCH}-v${VERSION}.tar.gz + - name: upload build artifacts + uses: actions/upload-artifact@v4 + with: + name: sc-${{ matrix.os }}-${{ matrix.arch }} + path: .sc/stacks/dist/bundle/sc-${{ matrix.os }}-${{ matrix.arch }}-v*.tar.gz + retention-days: 1 + + build-binaries: + name: Build github-actions binary + runs-on: blacksmith-8vcpu-ubuntu-2204 + needs: [prepare, build-setup] + steps: + - uses: actions/checkout@v4 + - name: Set up Go with Blacksmith caching + uses: useblacksmith/setup-go@v6 + with: + go-version: '1.25' + - name: create build directories + run: | + mkdir -p dist + - name: build github-actions + env: + VERSION: ${{ needs.prepare.outputs.version }} + CGO_ENABLED: "0" + run: | + go build -a -installsuffix cgo -ldflags "-s -w -X=github.com/simple-container-com/api/internal/build.Version=${VERSION}" -o dist/github-actions ./cmd/github-actions + - name: upload github-actions binary + uses: actions/upload-artifact@v4 + with: + name: github-actions-binary + path: dist/github-actions + retention-days: 1 + + test: + name: Run tests + runs-on: blacksmith-8vcpu-ubuntu-2204 + needs: [prepare, build-setup] + steps: + - uses: actions/checkout@v4 + - name: Set up Go with Blacksmith caching + uses: useblacksmith/setup-go@v6 + with: + go-version: '1.25' + - name: test + run: | + go test ./... + + docker-build: + name: Docker build and push preview github-actions image + runs-on: blacksmith-8vcpu-ubuntu-2204 + # Does not need build-platforms — SC binaries are not used in the github-actions Docker image. + # Starts as soon as build-binaries + test finish, in parallel with build-platforms. + needs: [prepare, build-setup, build-binaries, test] + steps: + - uses: actions/checkout@v4 + - name: download github-actions binary + uses: actions/download-artifact@v4 + with: + name: github-actions-binary + path: dist + - name: fix binary permissions + run: chmod +x dist/github-actions + - name: install sc tool (latest release) + shell: bash + run: |- + curl -s "https://dist.simple-container.com/sc.sh" | bash + - name: prepare secrets for build + run: | + cat << EOF > ./.sc/cfg.default.yaml + ${{ secrets.SC_CONFIG }} + EOF + sc secrets reveal + - name: Setup Docker Buildx with advanced caching + uses: docker/setup-buildx-action@v3 + with: + driver-opts: | + image=moby/buildkit:buildx-stable-1 + buildkitd-flags: --allow-insecure-entitlement security.insecure + - name: Docker login using SC secrets + run: | + sc stack secret-get -s dist dockerhub-cicd-token | docker login --username simplecontainer --password-stdin + - name: Build and push preview github-actions image + env: + DOCKER_BUILDKIT: 1 + VERSION: ${{ needs.prepare.outputs.version }} + run: | + docker buildx build \ + --platform linux/amd64 \ + --cache-from type=gha \ + --cache-to type=gha,mode=max \ + --file github-actions.Dockerfile \ + --tag "simplecontainer/github-actions:${VERSION}" \ + --push \ + . + + publish-sc-preview: + name: Publish preview SC binaries to dist + runs-on: blacksmith-8vcpu-ubuntu-2204 + # Does not need docker-build — SC binary publishing is independent of the Docker image. + # Runs in parallel with publish-git-tag. + needs: [prepare, build-setup, build-platforms, test] + steps: + - uses: actions/checkout@v4 + - name: download all sc platform artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + pattern: sc-* + - name: download bin tools artifact + uses: actions/download-artifact@v4 + with: + name: bin-tools + path: bin + - name: fix bin tools permissions + run: chmod +x bin/* + - name: install sc tool (latest release) + shell: bash + run: |- + curl -s "https://dist.simple-container.com/sc.sh" | bash + - name: prepare secrets for build + run: | + cat << EOF > ./.sc/cfg.default.yaml + ${{ secrets.SC_CONFIG }} + EOF + cat << EOF > ./.sc/cfg.test.yaml + ${{ secrets.SC_CONFIG }} + EOF + sc secrets reveal + - name: assemble preview dist bundle (versioned tarballs only) + env: + VERSION: ${{ needs.prepare.outputs.version }} + run: | + mkdir -p .sc/stacks/dist/bundle + rm -fR .sc/stacks/dist/bundle/* + # Copy only versioned tarballs — do NOT add sc.sh or the version file. + # This prevents overwriting the latest pointer for users running sc.sh without a version pin. + cp artifacts/sc-*/*-v${VERSION}.tar.gz .sc/stacks/dist/bundle/ + echo "Bundle contents:" + ls -la .sc/stacks/dist/bundle/ + - name: publish preview sc binaries + shell: bash + env: + VERSION: ${{ needs.prepare.outputs.version }} + run: |- + bash <(curl -Ls "https://welder.simple-container.com/welder.sh") deploy -e prod --timestamps + + publish-git-tag: + name: Create release commit and push git tag + runs-on: blacksmith-8vcpu-ubuntu-2204 + # Only needs docker-build — the tag must point to a commit referencing a published Docker image. + # Does not need build-platforms or publish-sc-preview. Runs in parallel with publish-sc-preview. + needs: [prepare, docker-build] + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: fregante/setup-git-user@v2 + - name: configure git remote with token + run: | + git remote set-url origin https://${{ secrets.GITHUB_TOKEN }}@github.com/simple-container-com/api.git + - name: create release branch and update action.yml image tags + env: + VERSION: ${{ needs.prepare.outputs.version }} + run: | + RELEASE_BRANCH="release/${VERSION}" + git checkout -b "${RELEASE_BRANCH}" + + # Replace :staging image tag with the preview version tag in all action.yml files + find .github/actions -name "action.yml" | while read f; do + sed -i "s|docker://simplecontainer/github-actions:staging|docker://simplecontainer/github-actions:${VERSION}|g" "$f" + done + + echo "Updated action.yml files:" + grep -r "docker://simplecontainer/github-actions:" .github/actions/ + - name: commit, tag and push + env: + VERSION: ${{ needs.prepare.outputs.version }} + run: | + RELEASE_BRANCH="release/${VERSION}" + git add .github/actions/*/action.yml + git commit -m "chore: release preview v${VERSION} - update github-actions image tag" + git tag "v${VERSION}" + git push origin "${RELEASE_BRANCH}" + git push origin "v${VERSION}" + + finalize: + name: Finalize preview build + runs-on: ubuntu-latest + if: ${{ always() }} + permissions: + contents: write + needs: + - prepare + - build-setup + - publish-sc-preview + - publish-git-tag + steps: + - uses: actions/checkout@v4 + if: ${{ always() }} + + - name: Extract git reference + id: extract_git_ref + if: ${{ always() }} + shell: bash + run: |- + cat <<'EOF' > /tmp/commit_message.txt + ${{ github.event.head_commit.message || github.event.workflow_run.head_commit.message }} + EOF + message="$(cat /tmp/commit_message.txt | tr -d '\n')" + if [ ${#message} -gt 200 ]; then + truncated_message="${message:0:80}...${message: -80}" + message="$truncated_message" + fi + echo "branch=$GITHUB_REF_NAME" >> $GITHUB_OUTPUT + echo "message=$message" >> $GITHUB_OUTPUT + echo "author=$GITHUB_ACTOR" >> $GITHUB_OUTPUT + echo "url=$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" >> $GITHUB_OUTPUT + + - name: Write build summary + if: ${{ !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') }} + env: + VERSION: ${{ needs.prepare.outputs.version }} + BRANCH: ${{ github.ref_name }} + # GHA evaluates ${{ '${{' }} to the literal string ${{ so SC_CONFIG_EXPR becomes + # the printable text "${{ secrets.SC_CONFIG }}" for use in markdown examples. + SC_CONFIG_EXPR: "${{ '${{' }} secrets.SC_CONFIG }}" + run: | + cat >> "$GITHUB_STEP_SUMMARY" << ENDSUMMARY + ## Preview build \`v${VERSION}\` + + ### Use SC GitHub Actions at this version + + Reference any SC action with the \`@v${VERSION}\` tag: + + \`\`\`yaml + - uses: simple-container-com/api/.github/actions/deploy-client-stack@v${VERSION} + with: + stack-name: my-stack + sc-config: ${SC_CONFIG_EXPR} + + - uses: simple-container-com/api/.github/actions/provision-parent-stack@v${VERSION} + with: + stack-name: my-stack + sc-config: ${SC_CONFIG_EXPR} + + - uses: simple-container-com/api/.github/actions/destroy@v${VERSION} + with: + stack-name: my-stack + sc-config: ${SC_CONFIG_EXPR} + \`\`\` + + Docker image: \`simplecontainer/github-actions:${VERSION}\` + + ### Install this SC version via CLI + + \`\`\`bash + SIMPLE_CONTAINER_VERSION=${VERSION} curl -s "https://dist.simple-container.com/sc.sh" | bash + \`\`\` + + > Preview build from branch \`${BRANCH}\`. Running \`sc.sh\` without \`SIMPLE_CONTAINER_VERSION\` will **not** pick up this version. + ENDSUMMARY + + - uses: yanzay/notify-telegram@v0.1.0 + if: ${{ success() && !contains(needs.*.result, 'failure') }} + continue-on-error: true + with: + chat: ${{ needs.build-setup.outputs.cicd-bot-telegram-chat-id }} + token: ${{ needs.build-setup.outputs.cicd-bot-telegram-token }} + status: ✅ preview published (${{ steps.extract_git_ref.outputs.branch }}) (v${{ needs.prepare.outputs.version }}) - ${{ steps.extract_git_ref.outputs.message }} by ${{ steps.extract_git_ref.outputs.author }} + - uses: yanzay/notify-telegram@v0.1.0 + if: ${{ failure() || contains(needs.*.result, 'failure') }} + continue-on-error: true + with: + chat: ${{ needs.build-setup.outputs.cicd-bot-telegram-chat-id }} + token: ${{ needs.build-setup.outputs.cicd-bot-telegram-token }} + status: ❗ preview failed (${{ steps.extract_git_ref.outputs.branch }}) - ${{ steps.extract_git_ref.outputs.message }} by ${{ steps.extract_git_ref.outputs.author }} + + - name: Build failed due to previously failed steps + id: fail_if_needed + if: ${{ failure() || contains(needs.*.result, 'failure') }} + shell: bash + run: |- + exit 1 diff --git a/docs/design/2026-04-07/branch-preview-workflow/architecture.md b/docs/design/2026-04-07/branch-preview-workflow/architecture.md new file mode 100644 index 00000000..df7d19f3 --- /dev/null +++ b/docs/design/2026-04-07/branch-preview-workflow/architecture.md @@ -0,0 +1,501 @@ +# Branch Preview Workflow — Architecture Design + +**Date**: 2026-04-07 +**Branch**: `feature/branch-builds-for-preview-versions` +**Status**: Draft + +--- + +## 1. Overview + +A new GitHub Actions workflow (`branch-preview.yaml`) that builds and publishes a fully testable preview release of Simple Container from any feature branch. Unlike the main `push.yaml` release, a preview build: + +- Does **not** overwrite the `sc.sh` installer or the global `version` file at `dist.simple-container.com` +- Publishes versioned SC binaries downloadable by pinning `SIMPLE_CONTAINER_VERSION` +- Publishes a branch-specific `simplecontainer/github-actions:{version}` Docker image +- Creates a dedicated release commit (on a separate `release/{version}` branch) where all `action.yml` files reference the preview Docker image tag +- Pushes a git tag `v{version}` pointing to that release commit, making the SC GitHub Actions usable at that exact version via `@v{version}` + +--- + +## 2. Version Format + +Preview versions follow the CalVer pattern already used by `push.yaml`, extended with a commit suffix: + +``` +{YYYY}.{MM}.{DD}.{patch}-preview.{short_sha} +``` + +**Example**: `2026.04.07.3-preview.abc1234` + +### Computation logic + +The `prepare` job uses the same `reecetech/version-increment@2023.10.2` action as `push.yaml` to compute the next CalVer version, but with `use_api: "false"` so it only calculates the version without creating a real git tag (the actual tag is created later by `publish-git-tag` with the preview suffix): + +```yaml +- name: Get next version + uses: reecetech/version-increment@2023.10.2 + id: base-version + with: + scheme: "calver" + increment: "patch" + use_api: "false" # compute only — no git tag created here + +- name: Build preview version string + id: version + run: | + SHORT_SHA=$(git rev-parse --short=7 HEAD) + VERSION="${{ steps.base-version.outputs.version }}-preview.${SHORT_SHA}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "short-sha=${SHORT_SHA}" >> $GITHUB_OUTPUT +``` + +This ensures: +- Version increment logic is identical to production (no custom counting scripts) +- Preview versions sort below the corresponding release version (`2026.04.07.3-preview.abc1234` < `2026.04.07.3` in pre-release ordering) +- The version is globally unique due to the `short_sha` suffix + +--- + +## 3. Workflow File + +**Location**: `.github/workflows/branch-preview.yaml` + +### 3.1 Triggers + +```yaml +on: + workflow_dispatch: # manual trigger from any branch +``` + +`workflow_dispatch` only — no automatic push triggers. Preview builds are opt-in to avoid noise on every branch push. + +### 3.2 Concurrency + +```yaml +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true # cancel old runs for same branch +``` + +### 3.3 Permissions + +```yaml +permissions: + contents: write # needed to create and push git tags and release branch +``` + +--- + +## 4. Job Graph + +``` + prepare + │ + ▼ + build-setup + ┌───────────┼───────────┐ + ▼ ▼ ▼ + build-platforms build-binaries test + │ │ │ + │ └───┬────┘ + │ ▼ + │ docker-build + │ │ + └──────┬────────────┘ + │ + ┌────────────┴────────────┐ + ▼ ▼ +publish-sc-preview publish-git-tag + └────────────┬────────────┘ + │ + finalize +``` + +### Parallelism rationale + +| Job | Waits for | Reason | +|---|---|---| +| `build-platforms` | `build-setup` | Go tools + schemas needed | +| `build-binaries` | `build-setup` | Go tools needed | +| `test` | `build-setup` | Go tools needed | +| `docker-build` | `build-binaries`, `test` | Needs the binary artifact; tests must pass. Does **not** need `build-platforms` (SC binaries not used in Docker image) | +| `publish-sc-preview` | `build-platforms`, `test` | Needs versioned SC tarballs; tests must pass. Does **not** need `docker-build` | +| `publish-git-tag` | `docker-build` | Docker image must be published before the git tag references it. Does **not** need `build-platforms` or `publish-sc-preview` | +| `finalize` | `publish-sc-preview`, `publish-git-tag` | Gates notifications on both publish jobs completing | + +`publish-sc-preview` and `publish-git-tag` run **in parallel** — they have no dependency on each other. + +### Critical path + +``` +prepare → build-setup → build-binaries ─┐ + ├─ docker-build → publish-git-tag → finalize + test ──────┘ +``` + +`build-platforms → publish-sc-preview` runs alongside this path and feeds into `finalize`. + +--- + +## 5. Job Specifications + +### 5.1 `prepare` + +**Runner**: `ubuntu-latest` +**Outputs**: `version`, `short-sha` + +Steps: +1. `actions/checkout@v4` (default depth is sufficient; `reecetech` uses the GitHub API via `use_api: false` to inspect tags) +2. `reecetech/version-increment@2023.10.2` with `use_api: "false"` — computes next CalVer patch without creating a tag +3. Append commit suffix to form the full preview version + +```yaml +- uses: actions/checkout@v4 +- name: Get next version + uses: reecetech/version-increment@2023.10.2 + id: base-version + with: + scheme: "calver" + increment: "patch" + use_api: "false" +- name: Build preview version string + id: version + run: | + SHORT_SHA=$(git rev-parse --short=7 HEAD) + VERSION="${{ steps.base-version.outputs.version }}-preview.${SHORT_SHA}" + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "short-sha=${SHORT_SHA}" >> $GITHUB_OUTPUT +``` + +--- + +### 5.2 `build-setup` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `prepare` +**Outputs**: `cicd-bot-telegram-token`, `cicd-bot-telegram-chat-id` + +Identical to `branch.yaml`'s `build-setup` job: +- Install SC (latest release for secrets access) +- Reveal secrets +- `welder run rebuild` (rebuilds SC binary from branch source, `SKIP_EMBEDDINGS=true`) +- Run `clean`, `tools`, `generate-schemas`, `fmt` +- Extract Telegram secrets +- Upload `bin-tools` artifact + +> **Why `welder run rebuild` uses `latest` SC**: The rebuild step installs a fresh SC binary from the branch source and injects it into `./bin/sc`. From that point forward, the branch's own SC binary is used. + +--- + +### 5.3 `build-platforms` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `[prepare, build-setup]` +**Matrix**: `linux/amd64`, `darwin/arm64`, `darwin/amd64` + +Same as `push.yaml` — builds versioned SC binaries with `VERSION` injected via ldflags: + +```bash +go build \ + -ldflags "-s -w -X=github.com/simple-container-com/api/internal/build.Version=${VERSION}" \ + -o dist/${GOOS}-${GOARCH}/sc ./cmd/sc + +tar -czf .sc/stacks/dist/bundle/sc-${GOOS}-${GOARCH}.tar.gz -C dist/${GOOS}-${GOARCH} sc +cp .sc/stacks/dist/bundle/sc-${GOOS}-${GOARCH}.tar.gz \ + .sc/stacks/dist/bundle/sc-${GOOS}-${GOARCH}-v${VERSION}.tar.gz +``` + +Uploads artifact: `sc-{os}-{arch}` containing only the versioned tarball (`sc-{os}-{arch}-v{version}.tar.gz`). + +--- + +### 5.4 `build-binaries` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `[prepare, build-setup]` — parallel with `build-platforms` and `test` +**Matrix**: `github-actions` target only (cloud-helpers is not needed for preview) + +```bash +go build \ + -a -installsuffix cgo \ + -ldflags "-s -w -X=github.com/simple-container-com/api/internal/build.Version=${VERSION}" \ + -o dist/github-actions ./cmd/github-actions +``` + +Uploads artifact: `github-actions-binary`. + +--- + +### 5.5 `test` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `[prepare, build-setup]` — parallel with `build-platforms` and `build-binaries` + +```bash +go test ./... +``` + +--- + +### 5.6 `docker-build` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `[prepare, build-setup, build-binaries, test]` + +Does **not** need `build-platforms` — SC binaries are not used in the Docker image. Starts as soon as `build-binaries` and `test` complete, in parallel with `build-platforms` tail work. + +Builds and pushes **only** the preview-versioned `github-actions` image. No `latest`, `staging`, or other tags are written. + +```yaml +tags: simplecontainer/github-actions:${{ needs.prepare.outputs.version }} +``` + +Steps: +1. Download `github-actions-binary` artifact → `dist/` +2. Install SC (latest), reveal secrets +3. Docker Buildx setup +4. Docker Hub login via `sc stack secret-get -s dist dockerhub-cicd-token` +5. Build and push `github-actions.Dockerfile` with single preview tag + +--- + +### 5.7 `publish-sc-preview` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `[prepare, build-setup, build-platforms, test]` + +Does **not** need `docker-build` — SC binary publishing is independent of the Docker image build. Runs in parallel with `publish-git-tag`. + +Uploads versioned SC tarballs to `dist.simple-container.com` **without** modifying `sc.sh` or the `version` file. + +Steps: +1. Download all `sc-*` platform artifacts → `artifacts/` +2. Download `bin-tools` artifact, fix permissions +3. Install SC (latest), reveal secrets +4. Assemble the dist bundle — **versioned tarballs only**: + +```bash +mkdir -p .sc/stacks/dist/bundle +cp artifacts/sc-*/*-v${VERSION}.tar.gz .sc/stacks/dist/bundle/ + +# IMPORTANT: do NOT copy sc.sh or write a version file +# This prevents overwriting the latest pointer for anyone running sc.sh without pinning a version +``` + +5. Deploy via SC: + +```bash +bash <(curl -Ls "https://welder.simple-container.com/welder.sh") deploy -e prod --timestamps +``` + +> **Contract**: The `dist` stack deploy must be idempotent and additive — uploading files to the CDN bucket without deleting files that are not in the bundle. If the current stack implementation does a full sync (potentially deleting `sc.sh`), the deploy step must be adjusted to use a targeted upload or the stack must support a `--no-delete` mode. This is a prerequisite risk to validate during implementation. + +After this step, users can install a preview version with: + +```bash +SIMPLE_CONTAINER_VERSION=2026.04.07.3-preview.abc1234 \ + curl -s "https://dist.simple-container.com/sc.sh" | bash +``` + +--- + +### 5.8 `publish-git-tag` + +**Runner**: `blacksmith-8vcpu-ubuntu-2204` +**Needs**: `[prepare, docker-build]` + +Does **not** need `build-platforms` or `publish-sc-preview` — the git tag only references the Docker image, which must already exist. Runs in parallel with `publish-sc-preview`. + +This job creates a dedicated release commit with updated `action.yml` files and pushes a git tag. It does **not** modify the working branch. + +Steps: + +#### Step 1 — Checkout and configure git + +```bash +git remote set-url origin https://${{ secrets.GITHUB_TOKEN }}@github.com/simple-container-com/api.git +git fetch --tags +``` + +Uses `fregante/setup-git-user@v2` for bot identity. + +#### Step 2 — Create release branch from current HEAD + +```bash +RELEASE_BRANCH="release/${VERSION}" +git checkout -b "${RELEASE_BRANCH}" +``` + +#### Step 3 — Update all action.yml docker image references + +Replace `docker://simplecontainer/github-actions:staging` → `docker://simplecontainer/github-actions:{version}` in all four action files: + +```bash +find .github/actions -name "action.yml" | while read f; do + sed -i "s|docker://simplecontainer/github-actions:staging|docker://simplecontainer/github-actions:${VERSION}|g" "$f" +done +``` + +**Files affected**: +- `.github/actions/cancel-stack/action.yml` +- `.github/actions/deploy-client-stack/action.yml` +- `.github/actions/destroy/action.yml` +- `.github/actions/provision-parent-stack/action.yml` + +#### Step 4 — Commit and tag + +```bash +git add .github/actions/*/action.yml +git commit -m "chore: release preview v${VERSION} - update github-actions image tag" +git tag "v${VERSION}" +git push origin "v${VERSION}" +# Optionally push the release branch for traceability: +git push origin "${RELEASE_BRANCH}" +``` + +> The tag `v{version}` now points to a commit where all SC GitHub Actions reference the exact preview Docker image. Users of this repo can reference actions at that tag: +> +> ```yaml +> uses: simple-container-com/api/.github/actions/deploy-client-stack@v2026.04.07.3-preview.abc1234 +> ``` +> +> This will pull the Docker image `simplecontainer/github-actions:2026.04.07.3-preview.abc1234`. + +--- + +### 5.9 `finalize` + +**Runner**: `ubuntu-latest` +**Needs**: `[prepare, build-setup, publish-sc-preview, publish-git-tag]` +**Condition**: `always()` — runs regardless of upstream success/failure to send Telegram notifications + +#### Build summary + +On success, writes a GitHub Actions job summary (`$GITHUB_STEP_SUMMARY`) with copy-paste instructions: + +```yaml +- name: Write build summary + if: ${{ !contains(needs.*.result, 'failure') }} + env: + VERSION: ${{ needs.prepare.outputs.version }} + run: | + cat >> $GITHUB_STEP_SUMMARY << EOF + ## Preview build v${VERSION} + + ### Use SC GitHub Actions at this version + + Reference any SC action with the \`@v${VERSION}\` tag: + + \`\`\`yaml + - uses: simple-container-com/api/.github/actions/deploy-client-stack@v${VERSION} + with: + stack-name: my-stack + sc-config: \${{ secrets.SC_CONFIG }} + + - uses: simple-container-com/api/.github/actions/provision-parent-stack@v${VERSION} + with: + stack-name: my-stack + sc-config: \${{ secrets.SC_CONFIG }} + + - uses: simple-container-com/api/.github/actions/destroy@v${VERSION} + with: + stack-name: my-stack + sc-config: \${{ secrets.SC_CONFIG }} + \`\`\` + + This uses Docker image: \`simplecontainer/github-actions:${VERSION}\` + + ### Install this SC version + + \`\`\`bash + SIMPLE_CONTAINER_VERSION=${VERSION} curl -s "https://dist.simple-container.com/sc.sh" | bash + \`\`\` + + > **Note**: this is a preview build from branch \`$GITHUB_REF_NAME\`. It will not be picked up by anyone running \`sc.sh\` without the version pin. + EOF +``` + +Telegram notification pattern is identical to `branch.yaml`, with the preview version included in the status message. + +--- + +## 6. Data Flow Diagram + +``` +Branch HEAD + │ + ├─── build SC binaries ──────────────────────► dist.simple-container.com + │ (VERSION injected) sc-linux-amd64-v{version}.tar.gz + │ sc-darwin-arm64-v{version}.tar.gz + │ sc-darwin-amd64-v{version}.tar.gz + │ (sc.sh NOT updated) + │ + ├─── build github-actions binary + │ └─── docker build ──────────────────► Docker Hub + │ (github-actions.Dockerfile) simplecontainer/github-actions:{version} + │ (latest/staging NOT updated) + │ + └─── release branch ─────────────────────────► GitHub Repo + action.yml files updated tag: v{version} + to use :{version} docker tag branch: release/{version} +``` + +--- + +## 7. What Is NOT Done (Preserving `latest`) + +| Artifact | Preview behavior | Production behavior | +|---|---|---| +| `sc.sh` | **NOT updated** — stays at last released version | Updated with new version number | +| `dist.simple-container.com/version` | **NOT updated** | Updated to new version | +| `simplecontainer/github-actions:latest` | **NOT pushed** | Pushed | +| `simplecontainer/github-actions:staging` | **NOT pushed** | N/A | +| `action.yml` on working branch | **NOT modified** — changes go to `release/{version}` | N/A | + +--- + +## 8. Security Considerations + +- `GITHUB_TOKEN` with `contents: write` is used for git push. The `release/{version}` branch and tag push are the only write operations. +- Docker Hub credentials come from SC secrets (same pattern as production builds). +- The preview tag uses a content-addressed suffix (commit SHA) preventing tag collisions or silent overwrites. +- Preview Docker images do not receive the `latest` tag, so they are not accidentally pulled by users without explicit pinning. + +--- + +## 9. Prerequisites and Risks + +| Item | Risk | Mitigation | +|---|---|---| +| `welder deploy` full-sync behavior | Could delete `sc.sh` if bundle doesn't include it | Audit `dist` stack Pulumi code; use `--no-delete` flag or targeted file upload if needed | +| `reecetech/version-increment` tag creation | Would create a real CalVer tag for a preview build | Not used; version computed manually via `git tag -l` count | +| `GITHUB_TOKEN` push permissions | Branch protection on `main` may block push to `release/*` | Release branch is a new branch, not protected; git tag push should be allowed | +| Parallel preview runs same day | Two concurrent runs could compute the same `patch` number | Acceptable — `short_sha` still makes version unique; worst case is a tag conflict that surfaces immediately | + +--- + +## 10. Example End-to-End Usage + +After a preview build of `feature/my-feature` completes: + +**Install preview SC CLI**: +```bash +SIMPLE_CONTAINER_VERSION=2026.04.07.3-preview.abc1234 \ + curl -s "https://dist.simple-container.com/sc.sh" | bash +``` + +**Use preview SC GitHub Actions**: +```yaml +steps: + - uses: simple-container-com/api/.github/actions/deploy-client-stack@v2026.04.07.3-preview.abc1234 + with: + stack-name: my-stack + sc-config: ${{ secrets.SC_CONFIG }} +``` +This references Docker image `simplecontainer/github-actions:2026.04.07.3-preview.abc1234` directly. + +**Verify published Docker image**: +```bash +docker pull simplecontainer/github-actions:2026.04.07.3-preview.abc1234 +``` diff --git a/docs/design/cloud-api/01-system-architecture.md b/docs/design/cloud-api/01-system-architecture.md new file mode 100644 index 00000000..1adc2094 --- /dev/null +++ b/docs/design/cloud-api/01-system-architecture.md @@ -0,0 +1,499 @@ +# Simple Container Cloud API - System Architecture + +## Overview + +The Simple Container Cloud API is a RESTful web service that provides multi-tenant, web-based management for Simple Container infrastructure and application deployments. It transforms the CLI-based workflow into a scalable, organization-friendly service with proper authentication, authorization, and cloud provider integrations. + +## High-Level Architecture + +```mermaid +graph TB + subgraph "Frontend Clients" + WEB[Web Dashboard] + CLI[SC CLI Integration] + API_CLIENTS[Third-party Clients] + end + + subgraph "API Gateway Layer" + LB[Load Balancer] + GATEWAY[API Gateway] + AUTH[Auth Middleware] + RBAC[RBAC Middleware] + end + + subgraph "Cloud API Service" + subgraph "API Layer" + AUTH_SVC[Authentication Service] + STACK_SVC[Stack Management Service] + RESOURCE_SVC[Resource Discovery Service] + CLOUD_SVC[Cloud Integration Service] + GITHUB_SVC[GitHub Integration Service] + end + + subgraph "Orchestration Layer" + WORKFLOW_MGR[Workflow Manager] + TOKEN_MGR[Token Manager] + CONFIG_MGR[Configuration Manager] + SECRET_MGR[Secrets Manager] + end + end + + subgraph "CI/CD Execution Platform" + GITHUB[GitHub Actions] + INFRA_REPOS[Infrastructure Repositories] + APP_REPOS[Application Repositories] + end + + subgraph "Data Layer" + MONGODB[(MongoDB)] + REDIS[(Redis Cache)] + end + + subgraph "External Services" + GOOGLE[Google OAuth] + AWS[AWS APIs] + GCP[GCP APIs] + end + + subgraph "SC Engine (in CI/CD)" + SC_CLI[SC CLI with Cloud API Source] + SC_ENGINE[SC Provisioning Engine] + end + + WEB --> LB + CLI --> LB + API_CLIENTS --> LB + LB --> GATEWAY + GATEWAY --> AUTH + AUTH --> RBAC + RBAC --> AUTH_SVC + RBAC --> STACK_SVC + RBAC --> RESOURCE_SVC + RBAC --> CLOUD_SVC + RBAC --> GITHUB_SVC + + AUTH_SVC --> GOOGLE + STACK_SVC --> WORKFLOW_MGR + RESOURCE_SVC --> AWS + RESOURCE_SVC --> GCP + CLOUD_SVC --> AWS + CLOUD_SVC --> GCP + GITHUB_SVC --> GITHUB + WORKFLOW_MGR --> TOKEN_MGR + WORKFLOW_MGR --> GITHUB + GITHUB --> INFRA_REPOS + GITHUB --> APP_REPOS + INFRA_REPOS --> SC_CLI + APP_REPOS --> SC_CLI + SC_CLI --> CONFIG_MGR + SC_CLI --> SECRET_MGR + SC_CLI --> SC_ENGINE + SC_ENGINE --> AWS + SC_ENGINE --> GCP + + AUTH_SVC --> MONGODB + STACK_SVC --> MONGODB + RESOURCE_SVC --> MONGODB + CLOUD_SVC --> MONGODB + GITHUB_SVC --> MONGODB + AUTH_SVC --> REDIS + STACK_SVC --> REDIS +``` + +## Technology Stack + +### Backend Service +- **Language**: Go (1.21+) +- **Framework**: Gin HTTP framework +- **Architecture**: Microservices-ready monolith with clear service boundaries +- **Configuration**: Viper for configuration management +- **Logging**: Structured logging with logrus/zap + +### Database & Storage +- **Primary Database**: MongoDB 7.0+ + - Document-based storage for flexible configuration schemas + - Transaction support for multi-document operations + - Built-in sharding for horizontal scaling +- **Caching Layer**: Redis 7.0+ + - Session storage + - API rate limiting + - Configuration caching +- **File Storage**: Local filesystem with cloud backup integration + +### Authentication & Security +- **OAuth Provider**: Google OAuth 2.0 +- **Token Management**: JWT with refresh token rotation +- **Session Storage**: Redis-based sessions +- **RBAC**: Custom role-based access control system +- **API Security**: Rate limiting, input validation, CORS + +### Cloud Integrations +- **AWS SDK**: v2 for service account management and resource discovery +- **Google Cloud SDK**: Latest for GCP service account automation +- **Simple Container Engine**: Direct integration with existing provisioning engine + +### Deployment & Operations +- **Containerization**: Docker with multi-stage builds +- **Orchestration**: Kubernetes-ready with health checks +- **Monitoring**: Prometheus metrics, health endpoints +- **Documentation**: OpenAPI 3.0 specification + +## Core Components + +### 1. Authentication Service + +**Responsibilities:** +- User registration and authentication via Google OAuth +- JWT token management and validation +- Session lifecycle management +- Multi-factor authentication support (future) + +**Key Features:** +- Google OAuth 2.0 integration +- Automatic GCP service account creation upon first login +- JWT token issuance with role-based claims +- Session management with Redis backing + +### 2. Stack Management Service + +**Responsibilities:** +- Parent stack lifecycle management (CRUD operations) +- Client stack lifecycle management (CRUD operations) +- Stack relationship management (parent-client linking) +- Configuration validation and schema enforcement +- GitHub workflow orchestration for deployments + +**Key Features:** +- Configuration management with SC API package validation +- Support for all existing SC configuration patterns +- Real-time validation using SC's configuration schemas +- GitHub Actions workflow generation and triggering +- Short-lived token generation for CI/CD access + +### 3. Resource Discovery Service + +**Responsibilities:** +- Cloud resource discovery and cataloging +- Resource adoption workflow management +- Resource relationship mapping +- Cost and usage tracking integration + +**Key Features:** +- Multi-cloud resource discovery (AWS, GCP) +- Existing infrastructure adoption workflows +- Resource tagging and organization +- Integration with SC's compute processors + +### 4. Cloud Integration Service + +**Responsibilities:** +- Automated cloud service account provisioning +- IAM role and policy management +- Cloud provider API integrations +- Resource discovery and cataloging + +**Key Features:** +- Automated GCP service account creation with proper IAM roles +- AWS IAM user/role provisioning +- Service account key management and rotation +- Cloud resource discovery for adoption workflows + +### 5. GitHub Integration Service + +**Responsibilities:** +- GitHub repository authorization and management +- Infrastructure repository creation and maintenance +- GitHub Actions workflow generation +- Workflow orchestration and status tracking + +**Key Features:** +- GitHub App integration for secure repository access +- Automated infrastructure repository setup +- Dynamic workflow generation based on stack configurations +- Repository scanning and deployment configuration assistance + +## Data Architecture + +### Document Storage Strategy + +```yaml +# MongoDB Collections Structure +organizations/ # Customer/company entities +users/ # Individual user accounts +projects/ # Logical groupings of stacks +parent_stacks/ # Infrastructure definitions (server.yaml) +client_stacks/ # Application configurations (client.yaml) +stack_secrets/ # Encrypted secrets (secrets.yaml) +cloud_accounts/ # Cloud provider service accounts +resources/ # Discovered and managed resources +audit_logs/ # Activity and change tracking +sessions/ # User session data (also cached in Redis) +``` + +### Configuration Storage + +Simple Container configurations are stored as flexible documents that maintain full compatibility with existing SC schemas: + +```go +type StoredParentStack struct { + ID primitive.ObjectID `bson:"_id,omitempty"` + OrganizationID primitive.ObjectID `bson:"organization_id"` + ProjectID primitive.ObjectID `bson:"project_id"` + Name string `bson:"name"` + Description string `bson:"description"` + + // Direct SC server.yaml storage + ServerConfig api.ServerDescriptor `bson:"server_config"` + + // Metadata + CreatedBy primitive.ObjectID `bson:"created_by"` + CreatedAt time.Time `bson:"created_at"` + UpdatedAt time.Time `bson:"updated_at"` + Version int32 `bson:"version"` + + // Access control + Owners []primitive.ObjectID `bson:"owners"` + Permissions map[string][]string `bson:"permissions"` +} +``` + +## Integration with Simple Container Core + +### GitHub Actions Orchestration + +The Cloud API orchestrates provisioning through GitHub Actions rather than direct execution: + +```go +// GitHub Actions orchestration with SC engine +import ( + "github.com/simple-container-com/api/pkg/api" + "github.com/google/go-github/v57/github" +) + +type StackService struct { + github *GitHubIntegrationService + tokenMgr *TokenService + configMgr *api.ConfigManager +} + +func (s *StackService) ProvisionStack(ctx context.Context, stackID string, environment string) error { + // 1. Generate short-lived workflow token + token, err := s.tokenMgr.GenerateWorkflowToken(ctx, &WorkflowTokenRequest{ + Purpose: "infrastructure", + StackID: stackID, + Environment: environment, + Permissions: []string{"parent_stacks.read", "stack_secrets.read"}, + }) + if err != nil { + return err + } + + // 2. Trigger GitHub Actions workflow + return s.github.DispatchWorkflow(ctx, &WorkflowDispatchRequest{ + StackID: stackID, + Environment: environment, + EventType: "provision-infrastructure", + Token: token.Token, + }) +} +``` + +### Configuration Management + +Manages configurations centrally while enabling GitHub Actions access: + +```go +// Configuration management with CI/CD integration +func (s *StackService) ValidateStackConfig(config *api.ServerDescriptor) error { + // SC provides built-in validation + return api.ValidateServerDescriptor(config) +} + +// Configuration delivery to CI/CD workflows +func (s *ConfigService) GetStackConfigForWorkflow(ctx context.Context, token string, stackID string) (*api.ServerDescriptor, error { + // 1. Validate workflow token and permissions + claims, err := s.validateWorkflowToken(ctx, token) + if err != nil { + return nil, err + } + + // 2. Check token scope allows access to this stack + if claims.StackID != stackID { + return nil, ErrInsufficientPermissions + } + + // 3. Return configuration for CI/CD consumption + return s.loadStackConfig(ctx, stackID, claims.Environment) +} +``` + +## Security Architecture + +### Authentication Flow + +```mermaid +sequenceDiagram + participant User + participant API + participant Google + participant MongoDB + participant GCP + + User->>API: Login Request + API->>Google: OAuth 2.0 Authorization + Google->>User: Authorization Code + User->>API: Authorization Code + API->>Google: Exchange Code for Tokens + Google->>API: Access Token + ID Token + API->>MongoDB: Create/Update User + API->>GCP: Create Service Account (First Login) + GCP->>API: Service Account Details + API->>MongoDB: Store Service Account Info + API->>User: JWT Token + Refresh Token +``` + +### Authorization Model + +```yaml +# RBAC Permission Structure +Permissions: + # Infrastructure Management (Parent Stacks) + parent_stacks: + - create + - read + - update + - delete + - provision + + # Application Management (Client Stacks) + client_stacks: + - create + - read + - update + - delete + - deploy + + # Resource Management + resources: + - discover + - adopt + - manage + - delete + + # Organization Management + organization: + - manage_users + - manage_billing + - manage_settings + +# Built-in Roles +Roles: + infrastructure_manager: + permissions: [parent_stacks.*, resources.*, organization.manage_settings] + + developer: + permissions: [client_stacks.*, resources.read, resources.discover] + + admin: + permissions: ["*"] +``` + +## API Design Principles + +### RESTful Resource Design +- Resource-based URLs (e.g., `/api/v1/organizations/{org_id}/parent-stacks`) +- HTTP methods for operations (GET, POST, PUT, DELETE) +- Consistent response formats with proper HTTP status codes +- Pagination for list operations + +### Error Handling +```go +type APIError struct { + Code string `json:"code"` + Message string `json:"message"` + Details any `json:"details,omitempty"` + TraceID string `json:"trace_id"` +} +``` + +### Versioning Strategy +- URL path versioning (`/api/v1/`) +- Backward compatibility guarantee within major versions +- Deprecation notices with migration paths + +## Scalability Considerations + +### Horizontal Scaling +- Stateless service design with external session storage +- Database connection pooling and read replicas +- Redis clustering for cache layer +- Load balancer with health checks + +### Performance Optimizations +- Configuration caching in Redis +- Lazy loading of stack relationships +- Async operations for long-running provisioning tasks +- Database indexing strategy for common queries + +### Monitoring & Observability +- Structured logging with correlation IDs +- Prometheus metrics for service health +- Health check endpoints for load balancers +- Audit trail for all configuration changes + +## Development & Deployment + +### Project Structure +``` +cmd/ + cloud-api/ # Main service binary +internal/ + auth/ # Authentication service + stacks/ # Stack management service + resources/ # Resource discovery service + cloud/ # Cloud integration service + models/ # Database models + middleware/ # HTTP middleware + config/ # Configuration management +pkg/ + api/ # Public API types + client/ # API client library +docs/ + api/ # OpenAPI specifications + deployment/ # Deployment guides +``` + +### Configuration Management +```yaml +# config.yaml +server: + port: 8080 + read_timeout: 30s + write_timeout: 30s + +database: + mongodb: + url: "mongodb://localhost:27017" + database: "simple_container_cloud" + redis: + url: "redis://localhost:6379" + +auth: + google: + client_id: "${GOOGLE_CLIENT_ID}" + client_secret: "${GOOGLE_CLIENT_SECRET}" + jwt: + secret: "${JWT_SECRET}" + expiry: 24h + +cloud: + gcp: + project_id: "${GCP_PROJECT_ID}" + credentials_path: "${GCP_CREDENTIALS_PATH}" + aws: + region: "${AWS_REGION}" + access_key_id: "${AWS_ACCESS_KEY_ID}" + secret_access_key: "${AWS_SECRET_ACCESS_KEY}" +``` + +This architecture provides a solid foundation for building a scalable, secure, and maintainable Simple Container Cloud API that leverages existing SC components while adding essential multi-tenant and web-based capabilities. diff --git a/docs/design/cloud-api/02-database-design.md b/docs/design/cloud-api/02-database-design.md new file mode 100644 index 00000000..3f24e4f4 --- /dev/null +++ b/docs/design/cloud-api/02-database-design.md @@ -0,0 +1,774 @@ +# Simple Container Cloud API - Database Design + +## Overview + +The Simple Container Cloud API uses MongoDB as its primary database to store multi-tenant configuration data, user management, and Simple Container stack definitions. The design leverages MongoDB's flexible document model to store complex configurations while maintaining strong consistency through transactions. + +## Database Schema Design + +### Core Collections + +#### 1. Organizations Collection + +Represents companies/customers using the Simple Container Cloud API. + +```javascript +// organizations +{ + _id: ObjectId, + name: String, // Organization name + slug: String, // URL-friendly identifier (unique) + description: String, // Organization description + + // Subscription & Billing + subscription: { + plan: String, // "free", "pro", "enterprise" + status: String, // "active", "suspended", "cancelled" + limits: { + max_users: Number, + max_projects: Number, + max_parent_stacks: Number, + max_client_stacks: Number + } + }, + + // Settings + settings: { + default_cloud_providers: [String], // ["aws", "gcp"] + require_mfa: Boolean, + audit_retention_days: Number + }, + + // Metadata + created_at: Date, + updated_at: Date, + created_by: ObjectId, // Reference to users collection + + // Indexes for performance + // Index: { slug: 1 } (unique) + // Index: { created_at: 1 } +} +``` + +#### 2. Users Collection + +Individual user accounts with authentication and role information. + +```javascript +// users +{ + _id: ObjectId, + + // Identity + email: String, // Primary identifier (unique) + name: String, // Full name + avatar_url: String, // Profile picture URL + + // Authentication + google_id: String, // Google OAuth ID + auth_providers: [{ + provider: String, // "google", "github", etc. + provider_id: String, // External ID + connected_at: Date + }], + + // Organization Membership + organizations: [{ + organization_id: ObjectId, + role: String, // "admin", "infrastructure_manager", "developer" + permissions: [String], // Custom permissions array + joined_at: Date, + invited_by: ObjectId + }], + + // Cloud Provider Service Accounts + cloud_accounts: [{ + provider: String, // "aws", "gcp" + account_id: String, // Cloud provider account/project ID + service_account_email: String, // For GCP + service_account_key_id: String, + iam_role_arn: String, // For AWS + created_at: Date, + last_validated: Date, + status: String // "active", "invalid", "expired" + }], + + // Preferences + preferences: { + default_organization: ObjectId, + theme: String, // "light", "dark" + timezone: String, + notifications: { + email_deployments: Boolean, + email_failures: Boolean, + browser_notifications: Boolean + } + }, + + // Security + last_login: Date, + mfa_enabled: Boolean, + mfa_secret: String, // Encrypted TOTP secret + + // Metadata + created_at: Date, + updated_at: Date, + status: String, // "active", "suspended", "deactivated" + + // Indexes for performance + // Index: { email: 1 } (unique) + // Index: { google_id: 1 } (unique, sparse) + // Index: { "organizations.organization_id": 1 } +} +``` + +#### 3. Projects Collection + +Logical groupings of parent and client stacks within organizations. + +```javascript +// projects +{ + _id: ObjectId, + organization_id: ObjectId, + + // Project Identity + name: String, // Project name + slug: String, // URL-friendly identifier (unique within org) + description: String, // Project description + + // Git Integration + git_repository: { + url: String, // Git repository URL + branch: String, // Default branch + path: String, // Path within repository + credentials_id: ObjectId, // Reference to stored git credentials + auto_sync: Boolean, // Automatic sync enabled + last_sync: Date + }, + + // Access Control + owners: [ObjectId], // User IDs with owner access + collaborators: [{ + user_id: ObjectId, + role: String, // "read", "write", "admin" + permissions: [String], // Custom permissions + added_at: Date, + added_by: ObjectId + }], + + // Project Settings + settings: { + environments: [String], // ["development", "staging", "production"] + default_cloud_provider: String, + auto_deploy: Boolean, + require_approval: Boolean + }, + + // Metadata + created_at: Date, + updated_at: Date, + created_by: ObjectId, + status: String, // "active", "archived", "deleted" + + // Indexes for performance + // Index: { organization_id: 1, slug: 1 } (unique) + // Index: { organization_id: 1, created_at: -1 } + // Index: { owners: 1 } +} +``` + +#### 4. Parent Stacks Collection + +Infrastructure definitions (server.yaml) managed by DevOps/Infrastructure teams. + +```javascript +// parent_stacks +{ + _id: ObjectId, + organization_id: ObjectId, + project_id: ObjectId, + + // Stack Identity + name: String, // Stack name (unique within project) + display_name: String, // Human-readable name + description: String, // Stack description + + // Simple Container Configuration + // This stores the complete server.yaml content + server_config: { + schemaVersion: String, // "1.0" + provisioner: { + type: String, // "pulumi" + config: Object // Provisioner configuration + }, + secrets: Object, // Secrets configuration + cicd: Object, // CI/CD configuration + templates: Object, // Deployment templates + resources: Object, // Resource definitions + variables: Object // Variables configuration + }, + + // Deployment Information + environments: [{ + name: String, // "production", "staging", etc. + status: String, // "deployed", "deploying", "failed", "not_deployed" + last_deployed: Date, + deployed_by: ObjectId, + deployment_id: String, // Reference to deployment logs + resource_count: Number, + estimated_cost: Number, + + // Cloud Provider Information + cloud_resources: [{ + resource_id: String, // Cloud provider resource ID + resource_type: String, // "aws-s3-bucket", "gcp-gke-autopilot-cluster" + resource_name: String, + status: String, // "healthy", "unhealthy", "unknown" + created_at: Date, + last_checked: Date, + metadata: Object // Provider-specific metadata + }] + }], + + // Access Control (Infrastructure Manager Role Required) + owners: [ObjectId], // Users with full access + editors: [ObjectId], // Users who can modify + viewers: [ObjectId], // Users who can view + + // Version Control + version: Number, // Incremental version number + git_commit: String, // Last git commit hash + change_history: [{ + version: Number, + changed_by: ObjectId, + changed_at: Date, + change_summary: String, + git_commit: String + }], + + // Metadata + created_at: Date, + updated_at: Date, + created_by: ObjectId, + last_modified_by: ObjectId, + + // Indexes for performance + // Index: { organization_id: 1, project_id: 1, name: 1 } (unique) + // Index: { organization_id: 1, created_at: -1 } + // Index: { owners: 1 } + // Index: { "environments.status": 1 } +} +``` + +#### 5. Client Stacks Collection + +Application configurations (client.yaml) managed by developers. + +```javascript +// client_stacks +{ + _id: ObjectId, + organization_id: ObjectId, + project_id: ObjectId, + + // Stack Identity + name: String, // Stack name (unique within project) + display_name: String, // Human-readable name + description: String, // Stack description + + // Parent Stack Relationship + parent_stack_id: ObjectId, // Reference to parent_stacks + parent_environment: String, // Which parent environment to use + + // Simple Container Configuration + // This stores the complete client.yaml content + client_config: { + schemaVersion: String, // "1.0" + defaults: Object, // Default values and YAML anchors + stacks: Object // Stack configurations by environment + }, + + // Docker Compose Configuration + docker_compose: Object, // docker-compose.yaml content + dockerfile_content: String, // Dockerfile content + + // Deployment Information + environments: [{ + name: String, // "production", "staging", etc. + status: String, // "deployed", "deploying", "failed", "not_deployed" + last_deployed: Date, + deployed_by: ObjectId, + deployment_id: String, + + // Application-specific metrics + replicas: { + desired: Number, + running: Number, + ready: Number + }, + + // Service endpoints + endpoints: [{ + name: String, // Service name + url: String, // Public URL + internal_url: String, // Internal URL + health_status: String // "healthy", "unhealthy", "unknown" + }], + + // Resource consumption + resources_used: [{ + resource_name: String, // Parent resource name + resource_type: String, + connection_status: String, // "connected", "error" + last_used: Date + }] + }], + + // Git Integration + git_repository: { + url: String, // Application git repository + branch: String, // Deployment branch + commit: String, // Last deployed commit + dockerfile_path: String, // Path to Dockerfile + compose_path: String // Path to docker-compose.yaml + }, + + // Access Control (Developer Role Sufficient) + owners: [ObjectId], // Users with full access + collaborators: [{ + user_id: ObjectId, + role: String, // "read", "write" + added_at: Date + }], + + // Version Control + version: Number, + change_history: [{ + version: Number, + changed_by: ObjectId, + changed_at: Date, + change_summary: String, + config_diff: String // JSON diff of configuration changes + }], + + // Metadata + created_at: Date, + updated_at: Date, + created_by: ObjectId, + last_modified_by: ObjectId, + + // Indexes for performance + // Index: { organization_id: 1, project_id: 1, name: 1 } (unique) + // Index: { parent_stack_id: 1 } + // Index: { organization_id: 1, created_at: -1 } + // Index: { owners: 1 } +} +``` + +#### 6. Stack Secrets Collection + +Encrypted secrets (secrets.yaml) with proper access control. + +```javascript +// stack_secrets +{ + _id: ObjectId, + organization_id: ObjectId, + + // Associated Stack + stack_id: ObjectId, // References parent_stacks or client_stacks + stack_type: String, // "parent" or "client" + environment: String, // Environment these secrets apply to + + // Encrypted Secret Data + // This stores the complete secrets.yaml content, encrypted + encrypted_secrets: { + schemaVersion: String, // "1.0" + auth: Object, // Authentication configurations (encrypted) + values: Object // Secret values (encrypted) + }, + + // Encryption Information + encryption: { + algorithm: String, // "AES-256-GCM" + key_version: Number, // For key rotation + encrypted_at: Date, + encrypted_by: ObjectId + }, + + // Access Control (Strict - Only Infrastructure Managers + Stack Owners) + accessible_by: [ObjectId], // User IDs who can decrypt these secrets + access_history: [{ + user_id: ObjectId, + accessed_at: Date, + action: String, // "read", "write", "rotate" + ip_address: String + }], + + // Metadata + created_at: Date, + updated_at: Date, + created_by: ObjectId, + + // Indexes for performance + // Index: { stack_id: 1, environment: 1 } (unique) + // Index: { accessible_by: 1 } + // Index: { organization_id: 1 } +} +``` + +#### 7. Cloud Accounts Collection + +Cloud provider service accounts and their management. + +```javascript +// cloud_accounts +{ + _id: ObjectId, + organization_id: ObjectId, + user_id: ObjectId, // User who owns this account + + // Cloud Provider Information + provider: String, // "aws", "gcp" + account_id: String, // Cloud account/project ID + region: String, // Default region + + // Service Account Details + service_account: { + // For GCP + email: String, // Service account email + project_id: String, // GCP project ID + key_id: String, // Service account key ID + + // For AWS + user_name: String, // IAM user name + access_key_id: String, // AWS access key ID + role_arn: String, // Assumed role ARN + + // Common + created_at: Date, + last_rotated: Date, + expires_at: Date + }, + + // Permissions & Roles + permissions: { + iam_roles: [String], // Assigned IAM roles/policies + custom_permissions: [String], + permission_boundary: String, + last_validated: Date, + validation_status: String // "valid", "invalid", "pending" + }, + + // Usage Tracking + usage: { + last_used: Date, + operations_count: Number, + estimated_cost: Number, + resource_count: Number + }, + + // Status & Health + status: String, // "active", "suspended", "deleted", "error" + health_check: { + last_checked: Date, + status: String, // "healthy", "unhealthy", "unknown" + error_message: String + }, + + // Metadata + created_at: Date, + updated_at: Date, + + // Indexes for performance + // Index: { organization_id: 1, user_id: 1, provider: 1 } + // Index: { user_id: 1 } + // Index: { status: 1 } +} +``` + +#### 8. Resources Collection + +Discovered and managed cloud resources. + +```javascript +// resources +{ + _id: ObjectId, + organization_id: ObjectId, + + // Resource Identity + cloud_provider: String, // "aws", "gcp" + cloud_account_id: String, // Cloud account/project ID + resource_id: String, // Cloud provider resource ID (unique per account) + resource_type: String, // "aws-s3-bucket", "gcp-gke-autopilot-cluster" + resource_name: String, // Human-readable name + region: String, // Cloud region + + // Simple Container Integration + managed_by_sc: Boolean, // Whether SC manages this resource + parent_stack_id: ObjectId, // If managed by SC, which parent stack + stack_environment: String, // Which environment + + // Resource Details + configuration: Object, // Cloud-specific configuration + tags: Object, // Resource tags/labels + metadata: Object, // Additional metadata + + // Status & Monitoring + status: String, // "healthy", "unhealthy", "unknown", "deleted" + health_check: { + last_checked: Date, + status_details: String, + metrics: Object + }, + + // Cost Information + cost: { + estimated_monthly: Number, + last_calculated: Date, + currency: String + }, + + // Discovery Information + discovered_at: Date, + discovered_by: String, // "sc_discovery", "manual_import", "provisioning" + last_synced: Date, + + // Relationships + dependencies: [ObjectId], // Other resources this depends on + dependents: [ObjectId], // Other resources that depend on this + + // Metadata + created_at: Date, + updated_at: Date, + + // Indexes for performance + // Index: { organization_id: 1, cloud_provider: 1, resource_id: 1 } (unique) + // Index: { parent_stack_id: 1 } + // Index: { managed_by_sc: 1 } + // Index: { status: 1 } +} +``` + +#### 9. Audit Logs Collection + +Comprehensive activity and change tracking. + +```javascript +// audit_logs +{ + _id: ObjectId, + organization_id: ObjectId, + + // Event Information + event_type: String, // "stack_created", "resource_provisioned", "user_login" + event_category: String, // "authentication", "stack_management", "resource_management" + + // Actor Information + actor: { + user_id: ObjectId, + user_email: String, + user_name: String, + ip_address: String, + user_agent: String + }, + + // Target Information + target: { + resource_type: String, // "parent_stack", "client_stack", "user", "resource" + resource_id: ObjectId, + resource_name: String + }, + + // Change Details + changes: { + action: String, // "create", "update", "delete", "provision", "deploy" + before: Object, // Previous state (for updates) + after: Object, // New state (for creates/updates) + diff: String // Human-readable change summary + }, + + // Request Context + request: { + method: String, // HTTP method + endpoint: String, // API endpoint + request_id: String, // Correlation ID + duration_ms: Number + }, + + // Result Information + result: String, // "success", "failure", "partial" + error_message: String, // If result is failure + + // Metadata + timestamp: Date, // Event timestamp + severity: String, // "info", "warning", "error" + + // Indexes for performance + // Index: { organization_id: 1, timestamp: -1 } + // Index: { event_type: 1, timestamp: -1 } + // Index: { "actor.user_id": 1, timestamp: -1 } + // Index: { "target.resource_id": 1, timestamp: -1 } +} +``` + +#### 10. Sessions Collection + +User session management (also cached in Redis). + +```javascript +// sessions +{ + _id: ObjectId, + + // Session Identity + session_id: String, // Random session identifier (unique) + user_id: ObjectId, + organization_id: ObjectId, + + // Authentication Tokens + access_token: String, // JWT access token (encrypted) + refresh_token: String, // JWT refresh token (encrypted) + + // Session Details + created_at: Date, + expires_at: Date, + last_accessed: Date, + + // Client Information + ip_address: String, + user_agent: String, + device_fingerprint: String, + + // Status + status: String, // "active", "expired", "revoked" + revoked_at: Date, + revoked_by: ObjectId, + revoked_reason: String, + + // Indexes for performance + // Index: { session_id: 1 } (unique) + // Index: { user_id: 1, status: 1 } + // Index: { expires_at: 1 } (TTL index for automatic cleanup) +} +``` + +## Indexing Strategy + +### Primary Indexes + +```javascript +// Performance-critical indexes +db.organizations.createIndex({ "slug": 1 }, { unique: true }) +db.users.createIndex({ "email": 1 }, { unique: true }) +db.users.createIndex({ "organizations.organization_id": 1 }) + +db.projects.createIndex({ "organization_id": 1, "slug": 1 }, { unique: true }) + +db.parent_stacks.createIndex({ + "organization_id": 1, + "project_id": 1, + "name": 1 +}, { unique: true }) + +db.client_stacks.createIndex({ + "organization_id": 1, + "project_id": 1, + "name": 1 +}, { unique: true }) +db.client_stacks.createIndex({ "parent_stack_id": 1 }) + +db.stack_secrets.createIndex({ "stack_id": 1, "environment": 1 }, { unique: true }) + +db.resources.createIndex({ + "organization_id": 1, + "cloud_provider": 1, + "resource_id": 1 +}, { unique: true }) + +db.audit_logs.createIndex({ "organization_id": 1, "timestamp": -1 }) +db.sessions.createIndex({ "session_id": 1 }, { unique: true }) +db.sessions.createIndex({ "expires_at": 1 }, { expireAfterSeconds: 0 }) +``` + +## Data Relationships + +### Organization Hierarchy +``` +Organization +├── Users (many-to-many via organizations array in users) +├── Projects (one-to-many) +│ ├── Parent Stacks (one-to-many) +│ │ ├── Stack Secrets (one-to-many) +│ │ └── Resources (one-to-many) +│ └── Client Stacks (one-to-many) +│ └── Stack Secrets (one-to-many) +├── Cloud Accounts (one-to-many) +└── Audit Logs (one-to-many) +``` + +### Stack Relationships +``` +Parent Stack +├── Client Stacks (one-to-many via parent_stack_id) +├── Resources (one-to-many via parent_stack_id) +└── Stack Secrets (one-to-many via stack_id) + +Client Stack +├── Parent Stack (many-to-one via parent_stack_id) +└── Stack Secrets (one-to-many via stack_id) +``` + +## Transaction Patterns + +### Multi-Document Operations + +MongoDB transactions ensure data consistency for operations that span multiple collections: + +```javascript +// Example: Creating a new parent stack with initial secrets +session.withTransaction(async () => { + // 1. Create parent stack + const stack = await db.parent_stacks.insertOne({...}, { session }) + + // 2. Create associated secrets + await db.stack_secrets.insertOne({ + stack_id: stack.insertedId, + stack_type: "parent", + ... + }, { session }) + + // 3. Log the action + await db.audit_logs.insertOne({ + event_type: "parent_stack_created", + target: { resource_id: stack.insertedId }, + ... + }, { session }) +}) +``` + +## Security Considerations + +### Data Encryption + +- **Secrets Encryption**: All secret data encrypted using AES-256-GCM +- **PII Encryption**: Sensitive user data encrypted at rest +- **Database Encryption**: MongoDB encryption at rest enabled +- **Connection Encryption**: TLS 1.3 for all database connections + +### Access Control + +- **Database Authentication**: Strong authentication with role-based access +- **Connection Limits**: Connection pooling with limits +- **Query Monitoring**: Slow query logging and monitoring +- **Backup Encryption**: All backups encrypted with separate keys + +### Compliance + +- **GDPR Compliance**: User data deletion and export capabilities +- **SOC 2 Type II**: Audit trail and access controls +- **Data Residency**: Configurable data location based on organization requirements + +This database design provides a robust foundation for the Simple Container Cloud API, maintaining full compatibility with existing Simple Container configurations while adding the necessary multi-tenancy and access control features. diff --git a/docs/design/cloud-api/03-authentication-rbac.md b/docs/design/cloud-api/03-authentication-rbac.md new file mode 100644 index 00000000..a559e828 --- /dev/null +++ b/docs/design/cloud-api/03-authentication-rbac.md @@ -0,0 +1,660 @@ +# Simple Container Cloud API - Authentication & RBAC + +## Overview + +The Simple Container Cloud API implements a comprehensive authentication and authorization system designed to support multi-tenant organizations while maintaining the critical distinction between infrastructure management and application development roles. The system ensures that DevOps teams maintain control over shared infrastructure while enabling developers to independently manage their application deployments. + +## Authentication Architecture + +### OAuth 2.0 Integration + +The API uses Google OAuth 2.0 as the primary authentication provider with automatic cloud service account provisioning. + +```mermaid +sequenceDiagram + participant User + participant Frontend + participant API + participant Google + participant GCP + participant MongoDB + + User->>Frontend: Click "Login with Google" + Frontend->>Google: Redirect to OAuth consent + Google->>User: Show consent screen + User->>Google: Grant permissions + Google->>Frontend: Authorization code + Frontend->>API: POST /auth/google/callback + code + API->>Google: Exchange code for tokens + Google->>API: Access token + ID token + User info + + alt First time user + API->>MongoDB: Create user record + API->>GCP: Create service account + GCP->>API: Service account details + API->>MongoDB: Store service account info + end + + API->>API: Generate JWT tokens + API->>MongoDB: Store session + API->>Frontend: JWT access + refresh tokens + Frontend->>User: Authenticated session +``` + +### JWT Token Management + +```go +type TokenClaims struct { + jwt.RegisteredClaims + + // User Information + UserID string `json:"user_id"` + Email string `json:"email"` + Name string `json:"name"` + + // Organization Context + OrganizationID string `json:"org_id"` + Role string `json:"role"` + Permissions []string `json:"permissions"` + + // Session Information + SessionID string `json:"session_id"` + TokenType string `json:"token_type"` // "access" or "refresh" + + // Security + IPAddress string `json:"ip_address"` + UserAgent string `json:"user_agent"` +} + +// Token Configuration +type TokenConfig struct { + AccessTokenTTL time.Duration // 1 hour + RefreshTokenTTL time.Duration // 30 days + SigningKey []byte + Algorithm string // HS256 or RS256 +} +``` + +### Automatic GCP Service Account Provisioning + +When a user authenticates for the first time, the system automatically provisions a GCP service account: + +```go +func (s *AuthService) provisionGCPServiceAccount(ctx context.Context, user *User) error { + // Generate service account name based on user email + saName := generateServiceAccountName(user.Email, user.OrganizationID) + + // Create service account in GCP + sa, err := s.gcpClient.CreateServiceAccount(ctx, &gcpiam.CreateServiceAccountRequest{ + Name: fmt.Sprintf("projects/%s", s.gcpProjectID), + ServiceAccount: &gcpiam.ServiceAccount{ + Name: saName, + DisplayName: fmt.Sprintf("Simple Container - %s", user.Name), + Description: "Automatically provisioned service account for Simple Container Cloud API", + }, + }) + if err != nil { + return fmt.Errorf("failed to create service account: %w", err) + } + + // Apply default IAM roles for Simple Container operations + roles := []string{ + "roles/storage.admin", // GCS bucket management + "roles/container.admin", // GKE cluster management + "roles/compute.admin", // Compute resource management + "roles/cloudsql.admin", // Cloud SQL management + "roles/redis.admin", // Redis management + } + + for _, role := range roles { + if err := s.assignRole(ctx, sa.Email, role); err != nil { + return fmt.Errorf("failed to assign role %s: %w", role, err) + } + } + + // Generate service account key + key, err := s.gcpClient.CreateServiceAccountKey(ctx, &gcpiam.CreateServiceAccountKeyRequest{ + Name: sa.Name, + ServiceAccountKey: &gcpiam.ServiceAccountKey{ + KeyAlgorithm: gcpiam.ServiceAccountKeyAlgorithm_KEY_ALG_RSA_2048, + }, + }) + if err != nil { + return fmt.Errorf("failed to create service account key: %w", err) + } + + // Store encrypted service account details + cloudAccount := &CloudAccount{ + OrganizationID: user.OrganizationID, + UserID: user.ID, + Provider: "gcp", + ServiceAccount: ServiceAccountDetails{ + Email: sa.Email, + ProjectID: s.gcpProjectID, + KeyID: extractKeyID(key.Name), + KeyData: s.encryptServiceAccountKey(key.PrivateKeyData), + }, + CreatedAt: time.Now(), + } + + return s.db.CloudAccounts().InsertOne(ctx, cloudAccount) +} +``` + +## Role-Based Access Control (RBAC) + +### Core Principles + +1. **Infrastructure vs Application Separation**: Clear distinction between infrastructure management (parent stacks) and application deployment (client stacks) +2. **Organization Isolation**: Complete data isolation between organizations +3. **Principle of Least Privilege**: Users receive minimum permissions necessary for their role +4. **Hierarchical Permissions**: Permissions can be inherited and extended at project level + +### Built-in Roles + +#### Infrastructure Manager +Full control over infrastructure and the ability to manage parent stacks that define shared resources. + +```yaml +infrastructure_manager: + description: "DevOps engineers responsible for infrastructure management" + permissions: + # Parent Stack Management (Full Access) + - parent_stacks.create + - parent_stacks.read + - parent_stacks.update + - parent_stacks.delete + - parent_stacks.provision + - parent_stacks.destroy + + # Resource Management (Full Access) + - resources.discover + - resources.adopt + - resources.manage + - resources.delete + - resources.provision + + # Client Stack Management (Read + Limited Write) + - client_stacks.read + - client_stacks.create # Can help developers set up stacks + - client_stacks.update # Can modify client configurations + + # Secrets Management (Full Access to Infrastructure Secrets) + - secrets.read_parent + - secrets.write_parent + - secrets.rotate_parent + + # Organization Management + - organization.manage_settings + - organization.view_usage + - organization.manage_billing + + # Advanced Operations + - deployments.approve # Can approve production deployments + - audit_logs.read + - cloud_accounts.manage +``` + +#### Developer +Application-focused role with ability to manage client stacks and consume infrastructure. + +```yaml +developer: + description: "Software developers deploying applications" + permissions: + # Client Stack Management (Full Access) + - client_stacks.create + - client_stacks.read + - client_stacks.update + - client_stacks.delete + - client_stacks.deploy + - client_stacks.rollback + + # Parent Stack Management (Read Only) + - parent_stacks.read # Can see available infrastructure + + # Resource Management (Read + Limited Access) + - resources.read # Can see available resources + - resources.discover # Can discover resources for integration + + # Secrets Management (Limited to Client Secrets) + - secrets.read_client + - secrets.write_client + + # Deployment Management + - deployments.create + - deployments.monitor + + # Basic Organization Access + - organization.view_basic +``` + +#### Project Admin +Enhanced permissions within a specific project context. + +```yaml +project_admin: + description: "Project leaders with enhanced permissions within their project" + inherits: [developer] + additional_permissions: + # User Management within Project + - project.manage_users + - project.manage_permissions + + # Advanced Client Stack Operations + - client_stacks.manage_all # Can manage all client stacks in project + + # Project Configuration + - project.manage_settings + - project.manage_integrations + + # Approval Workflows + - deployments.approve_staging +``` + +#### Organization Admin +Top-level administrative access across the organization. + +```yaml +organization_admin: + description: "Organization administrators with full access" + permissions: + - "*" # Full access to all resources + restrictions: + # Even admins cannot access certain sensitive operations without MFA + - requires_mfa: [secrets.write_parent, cloud_accounts.delete, organization.delete] +``` + +### Permission System Architecture + +```go +type Permission struct { + Resource string `json:"resource"` // "parent_stacks", "client_stacks", "resources" + Action string `json:"action"` // "create", "read", "update", "delete", "provision" + Context string `json:"context"` // "organization", "project", "own" +} + +type Role struct { + ID string `bson:"_id"` + Name string `bson:"name"` + Description string `bson:"description"` + Permissions []Permission `bson:"permissions"` + Inherits []string `bson:"inherits"` // Role inheritance + IsSystemRole bool `bson:"is_system_role"` // Built-in vs custom + CreatedAt time.Time `bson:"created_at"` +} + +type UserPermission struct { + UserID primitive.ObjectID `bson:"user_id"` + OrganizationID primitive.ObjectID `bson:"organization_id"` + ProjectID *primitive.ObjectID `bson:"project_id,omitempty"` // Project-specific permissions + Role string `bson:"role"` + CustomPerms []Permission `bson:"custom_permissions"` // Additional permissions + GrantedBy primitive.ObjectID `bson:"granted_by"` + GrantedAt time.Time `bson:"granted_at"` + ExpiresAt *time.Time `bson:"expires_at,omitempty"` +} +``` + +### Permission Evaluation Engine + +```go +type PermissionEvaluator struct { + db *mongo.Database + cache *redis.Client +} + +func (pe *PermissionEvaluator) HasPermission(ctx context.Context, userID, orgID string, resource, action string, targetID *string) (bool, error) { + // 1. Load user permissions from cache or database + userPerms, err := pe.getUserPermissions(ctx, userID, orgID) + if err != nil { + return false, err + } + + // 2. Evaluate organization-level permissions + if pe.evaluatePermission(userPerms.OrganizationPermissions, resource, action) { + return true, nil + } + + // 3. If target is project-specific, check project permissions + if targetID != nil { + projectPerms := pe.getProjectPermissions(userPerms, *targetID) + if pe.evaluatePermission(projectPerms, resource, action) { + return true, nil + } + } + + // 4. Check ownership permissions + if pe.isOwner(ctx, userID, targetID) { + ownerPerms := pe.getOwnerPermissions(resource) + return pe.evaluatePermission(ownerPerms, resource, action), nil + } + + return false, nil +} + +func (pe *PermissionEvaluator) evaluatePermission(permissions []Permission, resource, action string) bool { + for _, perm := range permissions { + // Wildcard matching + if perm.Resource == "*" || perm.Resource == resource { + if perm.Action == "*" || perm.Action == action { + return true + } + } + } + return false +} +``` + +### Permission Middleware + +```go +func RequirePermission(resource, action string) gin.HandlerFunc { + return func(c *gin.Context) { + // Extract user context from JWT + userCtx, exists := c.Get("user") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "Authentication required"}) + c.Abort() + return + } + + user := userCtx.(*UserContext) + + // Extract target resource ID from URL parameters + var targetID *string + if id := c.Param("id"); id != "" { + targetID = &id + } + + // Check permission + hasPermission, err := permissionEvaluator.HasPermission( + c.Request.Context(), + user.UserID, + user.OrganizationID, + resource, + action, + targetID, + ) + + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Permission check failed"}) + c.Abort() + return + } + + if !hasPermission { + c.JSON(http.StatusForbidden, gin.H{ + "error": "Insufficient permissions", + "required": fmt.Sprintf("%s.%s", resource, action), + }) + c.Abort() + return + } + + c.Next() + } +} + +// Usage in routes +r.POST("/parent-stacks", + RequireAuth(), + RequirePermission("parent_stacks", "create"), + createParentStackHandler) + +r.DELETE("/parent-stacks/:id", + RequireAuth(), + RequirePermission("parent_stacks", "delete"), + deleteParentStackHandler) +``` + +## Security Features + +### Multi-Factor Authentication (MFA) + +```go +type MFAConfig struct { + Enabled bool `bson:"enabled"` + Secret string `bson:"secret"` // Encrypted TOTP secret + BackupCodes []string `bson:"backup_codes"` // Encrypted backup codes + LastUsed time.Time `bson:"last_used"` +} + +func (s *AuthService) EnableMFA(ctx context.Context, userID string) (*MFASetupResponse, error) { + // Generate TOTP secret + secret := make([]byte, 32) + rand.Read(secret) + + // Generate backup codes + backupCodes := s.generateBackupCodes(10) + + // Encrypt sensitive data + encryptedSecret := s.encrypt(secret) + encryptedBackupCodes := s.encryptBackupCodes(backupCodes) + + // Store MFA configuration + mfaConfig := &MFAConfig{ + Enabled: true, + Secret: encryptedSecret, + BackupCodes: encryptedBackupCodes, + } + + // Return setup information to user + return &MFASetupResponse{ + Secret: base32.StdEncoding.EncodeToString(secret), + QRCode: s.generateQRCode(userID, secret), + BackupCodes: backupCodes, // Show once, then encrypt + }, nil +} +``` + +### Session Management + +```go +type SessionManager struct { + redis *redis.Client + db *mongo.Database + config *SessionConfig +} + +type SessionConfig struct { + MaxActiveSessions int // 5 concurrent sessions per user + IdleTimeout time.Duration // 30 minutes + AbsoluteTimeout time.Duration // 8 hours + RequireReauth []string // Operations requiring re-authentication +} + +func (sm *SessionManager) CreateSession(ctx context.Context, user *User, clientInfo *ClientInfo) (*Session, error) { + // Check concurrent session limit + activeSessions := sm.getActiveSessions(ctx, user.ID) + if len(activeSessions) >= sm.config.MaxActiveSessions { + // Invalidate oldest session + sm.invalidateOldestSession(ctx, user.ID) + } + + // Create new session + session := &Session{ + SessionID: generateSecureID(), + UserID: user.ID, + IPAddress: clientInfo.IPAddress, + UserAgent: clientInfo.UserAgent, + CreatedAt: time.Now(), + ExpiresAt: time.Now().Add(sm.config.AbsoluteTimeout), + LastAccessed: time.Now(), + Status: "active", + } + + // Store in both Redis (for fast access) and MongoDB (for persistence) + sm.redis.Set(ctx, "session:"+session.SessionID, session, sm.config.AbsoluteTimeout) + sm.db.Collection("sessions").InsertOne(ctx, session) + + return session, nil +} +``` + +### API Rate Limiting + +```go +type RateLimiter struct { + redis *redis.Client + limits map[string]RateLimit +} + +type RateLimit struct { + Requests int // Number of requests + Window time.Duration // Time window + BurstLimit int // Burst allowance +} + +// Rate limits by user role and operation type +var DefaultRateLimits = map[string]RateLimit{ + "auth": {Requests: 5, Window: time.Minute, BurstLimit: 10}, + "read_operations": {Requests: 1000, Window: time.Minute, BurstLimit: 200}, + "write_operations": {Requests: 100, Window: time.Minute, BurstLimit: 50}, + "provision_operations": {Requests: 10, Window: time.Minute, BurstLimit: 5}, +} + +func RateLimitMiddleware() gin.HandlerFunc { + return gin.HandlerFunc(func(c *gin.Context) { + user := getUserFromContext(c) + operation := getOperationType(c) + + key := fmt.Sprintf("rate_limit:%s:%s", user.ID, operation) + + allowed, err := rateLimiter.Allow(c.Request.Context(), key, operation) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Rate limit check failed"}) + c.Abort() + return + } + + if !allowed { + c.JSON(http.StatusTooManyRequests, gin.H{ + "error": "Rate limit exceeded", + "retry_after": rateLimiter.GetRetryAfter(key), + }) + c.Abort() + return + } + + c.Next() + }) +} +``` + +## Audit and Compliance + +### Comprehensive Audit Logging + +Every operation is logged with complete context: + +```go +type AuditLogger struct { + db *mongo.Database +} + +func (al *AuditLogger) LogEvent(ctx context.Context, event *AuditEvent) error { + // Enrich event with request context + if reqCtx := getRequestContext(ctx); reqCtx != nil { + event.RequestID = reqCtx.RequestID + event.IPAddress = reqCtx.IPAddress + event.UserAgent = reqCtx.UserAgent + event.Endpoint = reqCtx.Endpoint + } + + // Add timestamp and correlation ID + event.Timestamp = time.Now() + event.CorrelationID = getCorrelationID(ctx) + + // Store audit event + _, err := al.db.Collection("audit_logs").InsertOne(ctx, event) + return err +} + +// Audit middleware for automatic logging +func AuditMiddleware() gin.HandlerFunc { + return gin.HandlerFunc(func(c *gin.Context) { + start := time.Now() + + // Process request + c.Next() + + // Log the operation + user := getUserFromContext(c) + auditEvent := &AuditEvent{ + EventType: getEventType(c), + EventCategory: getEventCategory(c), + Actor: user.ToAuditActor(), + Target: extractTarget(c), + Result: getResult(c), + Duration: time.Since(start), + OrganizationID: user.OrganizationID, + } + + auditLogger.LogEvent(c.Request.Context(), auditEvent) + }) +} +``` + +### Privacy and Data Protection + +```go +type DataProtectionService struct { + encryptor *crypto.AESEncryptor + db *mongo.Database +} + +// GDPR-compliant data export +func (dps *DataProtectionService) ExportUserData(ctx context.Context, userID string) (*UserDataExport, error) { + // Collect all user data across collections + userData := &UserDataExport{ + UserID: userID, + ExportedAt: time.Now(), + } + + // User profile data + user, _ := dps.getUser(ctx, userID) + userData.Profile = user.ToExportFormat() + + // Stack configurations (sanitized) + stacks, _ := dps.getUserStacks(ctx, userID) + userData.Stacks = sanitizeStacksForExport(stacks) + + // Audit logs + auditLogs, _ := dps.getUserAuditLogs(ctx, userID) + userData.AuditLogs = auditLogs + + return userData, nil +} + +// GDPR-compliant data deletion +func (dps *DataProtectionService) DeleteUserData(ctx context.Context, userID string) error { + // This requires careful orchestration to maintain referential integrity + session, err := dps.db.Client().StartSession() + if err != nil { + return err + } + defer session.EndSession(ctx) + + return mongo.WithSession(ctx, session, func(sc mongo.SessionContext) error { + // 1. Remove user from organizations + dps.removeFromOrganizations(sc, userID) + + // 2. Transfer ownership of stacks to organization admins + dps.transferStackOwnership(sc, userID) + + // 3. Anonymize audit logs (keep for compliance) + dps.anonymizeAuditLogs(sc, userID) + + // 4. Delete user record + dps.deleteUser(sc, userID) + + // 5. Delete associated cloud accounts + dps.deleteCloudAccounts(sc, userID) + + return nil + }) +} +``` + +This authentication and RBAC system provides enterprise-grade security while maintaining the flexibility needed for Simple Container's infrastructure-application separation model. The automatic cloud service account provisioning ensures users can immediately begin managing cloud resources upon authentication, while the comprehensive audit system ensures full compliance with security and regulatory requirements. diff --git a/docs/design/cloud-api/04-rest-api-specification.md b/docs/design/cloud-api/04-rest-api-specification.md new file mode 100644 index 00000000..3492c346 --- /dev/null +++ b/docs/design/cloud-api/04-rest-api-specification.md @@ -0,0 +1,977 @@ +# Simple Container Cloud API - REST API Specification + +## Overview + +The Simple Container Cloud API provides a comprehensive RESTful interface for managing multi-tenant Simple Container deployments. The API follows OpenAPI 3.0 specifications and implements standard HTTP methods with JSON request/response bodies. + +## API Design Principles + +### Base URL Structure +``` +https://api.simple-container.com/api/v1 +``` + +### Versioning Strategy +- **URL Path Versioning**: `/api/v1/`, `/api/v2/` +- **Backward Compatibility**: Maintained within major versions +- **Deprecation Policy**: 6-month notice with migration guides + +### Resource Naming Conventions +- **Plural Nouns**: `/organizations`, `/parent-stacks`, `/client-stacks` +- **Hierarchical Structure**: `/organizations/{org_id}/projects/{project_id}/parent-stacks` +- **Kebab Case**: Multi-word resources use kebab-case (`parent-stacks`, not `parentStacks`) + +### HTTP Status Codes +- **200 OK**: Successful GET, PUT, PATCH operations +- **201 Created**: Successful POST operations +- **204 No Content**: Successful DELETE operations +- **400 Bad Request**: Invalid request format or parameters +- **401 Unauthorized**: Authentication required or invalid +- **403 Forbidden**: Authenticated but insufficient permissions +- **404 Not Found**: Resource not found +- **409 Conflict**: Resource conflict (e.g., duplicate name) +- **422 Unprocessable Entity**: Valid format but business logic error +- **429 Too Many Requests**: Rate limit exceeded +- **500 Internal Server Error**: Server-side error + +## Authentication + +### Bearer Token Authentication +All API endpoints require authentication via JWT Bearer tokens in the Authorization header. + +```http +Authorization: Bearer +``` + +### Token Structure +```json +{ + "user_id": "user_123", + "email": "user@example.com", + "org_id": "org_456", + "role": "infrastructure_manager", + "permissions": ["parent_stacks.*", "resources.*"], + "exp": 1640995200 +} +``` + +## Global Response Format + +### Success Response +```json +{ + "success": true, + "data": { + // Response payload + }, + "meta": { + "timestamp": "2024-01-15T10:30:00Z", + "request_id": "req_789abc123", + "api_version": "v1" + } +} +``` + +### Error Response +```json +{ + "success": false, + "error": { + "code": "VALIDATION_ERROR", + "message": "Invalid stack configuration", + "details": { + "field": "server_config.resources", + "reason": "Missing required resource definition" + } + }, + "meta": { + "timestamp": "2024-01-15T10:30:00Z", + "request_id": "req_789abc123", + "api_version": "v1" + } +} +``` + +### Pagination Response +```json +{ + "success": true, + "data": [ + // Array of resources + ], + "pagination": { + "page": 1, + "per_page": 20, + "total": 156, + "total_pages": 8, + "has_next": true, + "has_prev": false + }, + "meta": { + "timestamp": "2024-01-15T10:30:00Z", + "request_id": "req_789abc123" + } +} +``` + +## Core API Endpoints + +### 1. Authentication & User Management + +#### Login with Google OAuth +```http +POST /api/v1/auth/google +Content-Type: application/json + +{ + "authorization_code": "4/0AX4XfWh...", + "redirect_uri": "https://app.simple-container.com/auth/callback" +} +``` + +**Response:** +```json +{ + "success": true, + "data": { + "access_token": "eyJhbGciOiJIUzI1NiIs...", + "refresh_token": "eyJhbGciOiJIUzI1NiIs...", + "expires_in": 3600, + "token_type": "Bearer", + "user": { + "id": "user_123", + "email": "john@example.com", + "name": "John Doe", + "avatar_url": "https://...", + "organizations": [ + { + "id": "org_456", + "name": "Acme Corp", + "role": "infrastructure_manager", + "permissions": ["parent_stacks.*", "resources.*"] + } + ] + } + } +} +``` + +#### Refresh Token +```http +POST /api/v1/auth/refresh +Content-Type: application/json + +{ + "refresh_token": "eyJhbGciOiJIUzI1NiIs..." +} +``` + +#### Get Current User +```http +GET /api/v1/user +Authorization: Bearer +``` + +#### Update User Profile +```http +PATCH /api/v1/user +Authorization: Bearer +Content-Type: application/json + +{ + "name": "John Smith", + "preferences": { + "theme": "dark", + "timezone": "America/New_York", + "notifications": { + "email_deployments": true, + "email_failures": true + } + } +} +``` + +### 2. Organization Management + +#### List Organizations +```http +GET /api/v1/organizations +Authorization: Bearer +``` + +#### Get Organization Details +```http +GET /api/v1/organizations/{org_id} +Authorization: Bearer +``` + +#### Update Organization +```http +PUT /api/v1/organizations/{org_id} +Authorization: Bearer +Content-Type: application/json + +{ + "name": "Acme Corporation", + "description": "Software development company", + "settings": { + "default_cloud_providers": ["aws", "gcp"], + "require_mfa": true, + "audit_retention_days": 365 + } +} +``` + +#### List Organization Members +```http +GET /api/v1/organizations/{org_id}/members +Authorization: Bearer +``` + +#### Invite User to Organization +```http +POST /api/v1/organizations/{org_id}/members +Authorization: Bearer +Content-Type: application/json + +{ + "email": "newuser@example.com", + "role": "developer", + "permissions": ["client_stacks.*"] +} +``` + +### 3. Project Management + +#### List Projects +```http +GET /api/v1/organizations/{org_id}/projects +Authorization: Bearer +Query Parameters: + - page (optional): Page number (default: 1) + - per_page (optional): Items per page (default: 20, max: 100) + - status (optional): Filter by status ("active", "archived") +``` + +#### Create Project +```http +POST /api/v1/organizations/{org_id}/projects +Authorization: Bearer +Content-Type: application/json + +{ + "name": "E-commerce Platform", + "slug": "ecommerce-platform", + "description": "Main e-commerce application and infrastructure", + "settings": { + "environments": ["development", "staging", "production"], + "default_cloud_provider": "gcp", + "auto_deploy": false, + "require_approval": true + }, + "git_repository": { + "url": "https://github.com/company/ecommerce-platform", + "branch": "main", + "path": ".sc", + "auto_sync": true + } +} +``` + +#### Get Project Details +```http +GET /api/v1/organizations/{org_id}/projects/{project_id} +Authorization: Bearer +``` + +#### Update Project +```http +PUT /api/v1/organizations/{org_id}/projects/{project_id} +Authorization: Bearer +Content-Type: application/json + +{ + "name": "E-commerce Platform Updated", + "description": "Updated description", + "settings": { + "environments": ["development", "staging", "production", "preview"], + "require_approval": false + } +} +``` + +#### Delete Project +```http +DELETE /api/v1/organizations/{org_id}/projects/{project_id} +Authorization: Bearer +``` + +### 4. Parent Stack Management + +#### List Parent Stacks +```http +GET /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks +Authorization: Bearer +Query Parameters: + - page (optional): Page number + - per_page (optional): Items per page + - environment (optional): Filter by deployment environment + - status (optional): Filter by deployment status +``` + +#### Create Parent Stack +```http +POST /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks +Authorization: Bearer +Content-Type: application/json + +{ + "name": "infrastructure", + "display_name": "Main Infrastructure Stack", + "description": "Shared infrastructure for all environments", + "server_config": { + "schemaVersion": "1.0", + "provisioner": { + "type": "pulumi", + "config": { + "state-storage": { + "type": "gcp-bucket", + "config": { + "credentials": "${auth:gcloud}", + "projectId": "${auth:gcloud.projectId}", + "name": "infrastructure-state", + "location": "us-central1" + } + } + } + }, + "templates": { + "gke-stack": { + "type": "gcp-gke-autopilot", + "config": { + "projectId": "${auth:gcloud.projectId}", + "credentials": "${auth:gcloud}", + "gkeClusterResource": "main-cluster", + "artifactRegistryResource": "main-registry" + } + } + }, + "resources": { + "resources": { + "production": { + "template": "gke-stack", + "resources": { + "main-cluster": { + "type": "gcp-gke-autopilot-cluster", + "config": { + "projectId": "${auth:gcloud.projectId}", + "credentials": "${auth:gcloud}", + "location": "us-central1", + "gkeMinVersion": "1.33.4-gke.1245000" + } + }, + "main-registry": { + "type": "gcp-artifact-registry", + "config": { + "projectId": "${auth:gcloud.projectId}", + "credentials": "${auth:gcloud}", + "location": "us-central1" + } + } + } + } + } + } + } +} +``` + +#### Get Parent Stack +```http +GET /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id} +Authorization: Bearer +``` + +#### Update Parent Stack Configuration +```http +PUT /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id} +Authorization: Bearer +Content-Type: application/json + +{ + "display_name": "Updated Infrastructure Stack", + "description": "Updated description", + "server_config": { + // Complete updated server.yaml configuration + } +} +``` + +#### Delete Parent Stack +```http +DELETE /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id} +Authorization: Bearer +Query Parameters: + - force (optional): Force deletion even if client stacks depend on it +``` + +### 5. Client Stack Management + +#### List Client Stacks +```http +GET /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks +Authorization: Bearer +Query Parameters: + - parent_stack_id (optional): Filter by parent stack + - environment (optional): Filter by deployment environment + - status (optional): Filter by deployment status +``` + +#### Create Client Stack +```http +POST /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks +Authorization: Bearer +Content-Type: application/json + +{ + "name": "web-app", + "display_name": "E-commerce Web Application", + "description": "Frontend web application for e-commerce platform", + "parent_stack_id": "parent_stack_456", + "parent_environment": "production", + "client_config": { + "schemaVersion": "1.0", + "stacks": { + "production": { + "type": "cloud-compose", + "parent": "infrastructure", + "config": { + "uses": ["main-cluster", "main-registry"], + "runs": ["web-app"], + "domain": "app.example.com", + "env": { + "NODE_ENV": "production", + "API_URL": "https://api.example.com" + } + } + } + } + }, + "docker_compose": { + "version": "3.8", + "services": { + "web-app": { + "build": ".", + "ports": ["8080:8080"], + "environment": { + "NODE_ENV": "${NODE_ENV}" + }, + "labels": { + "simple-container.com/ingress": "true", + "simple-container.com/ingress/port": "8080" + } + } + } + }, + "dockerfile_content": "FROM node:18-alpine\nWORKDIR /app\nCOPY package*.json ./\nRUN npm install\nCOPY . .\nEXPOSE 8080\nCMD [\"npm\", \"start\"]", + "git_repository": { + "url": "https://github.com/company/web-app", + "branch": "main", + "dockerfile_path": "Dockerfile", + "compose_path": "docker-compose.yaml" + } +} +``` + +#### Get Client Stack +```http +GET /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks/{stack_id} +Authorization: Bearer +``` + +#### Update Client Stack +```http +PUT /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks/{stack_id} +Authorization: Bearer +Content-Type: application/json + +{ + "display_name": "Updated Web Application", + "client_config": { + // Updated client.yaml configuration + }, + "docker_compose": { + // Updated docker-compose.yaml + } +} +``` + +### 6. Stack Operations + +#### Provision Parent Stack +```http +POST /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id}/provision +Authorization: Bearer +Content-Type: application/json + +{ + "environment": "production", + "skip_preview": false, + "timeout_minutes": 30 +} +``` + +**Response:** +```json +{ + "success": true, + "data": { + "operation_id": "op_789def456", + "status": "pending", + "started_at": "2024-01-15T10:30:00Z", + "estimated_duration": "15 minutes", + "github_repository": "organization/infrastructure-stack", + "workflow_dispatch": { + "dispatched_at": "2024-01-15T10:30:05Z", + "event_type": "provision-infrastructure", + "workflow_run_url": "https://github.com/organization/infrastructure-stack/actions/runs/123456" + }, + "progress": { + "phase": "workflow_dispatched", + "step": "waiting_for_github_workflow", + "completion_percent": 0, + "message": "GitHub Actions workflow dispatched, waiting for execution to start..." + } + } +} +``` + +#### Deploy Client Stack +```http +POST /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks/{stack_id}/deploy +Authorization: Bearer +Content-Type: application/json + +{ + "environment": "production", + "git_commit": "a1b2c3d4e5f6", + "rollback_on_failure": true, + "timeout_minutes": 15 +} +``` + +**Response:** +```json +{ + "success": true, + "data": { + "operation_id": "op_456def789", + "status": "pending", + "started_at": "2024-01-15T10:30:00Z", + "github_repository": "developer/web-application", + "workflow_dispatch": { + "dispatched_at": "2024-01-15T10:30:05Z", + "event_type": "deploy-service", + "workflow_run_url": "https://github.com/developer/web-application/actions/runs/789012" + } + } +} +``` + +#### Get Operation Status +```http +GET /api/v1/operations/{operation_id} +Authorization: Bearer +``` + +**Response:** +```json +{ + "success": true, + "data": { + "operation_id": "op_789def456", + "type": "provision_parent", + "status": "completed", + "started_at": "2024-01-15T10:30:00Z", + "completed_at": "2024-01-15T10:45:00Z", + "duration": "15m23s", + "result": "success", + "github_workflow": { + "repository": "organization/infrastructure-stack", + "workflow_run_id": "123456", + "workflow_run_url": "https://github.com/organization/infrastructure-stack/actions/runs/123456", + "status": "completed", + "conclusion": "success" + }, + "progress": { + "phase": "completed", + "completion_percent": 100, + "message": "Infrastructure provisioned successfully via GitHub Actions" + }, + "logs": [ + { + "timestamp": "2024-01-15T10:30:05Z", + "level": "info", + "source": "github_workflow", + "message": "GitHub Actions workflow started" + }, + { + "timestamp": "2024-01-15T10:35:12Z", + "level": "info", + "source": "sc_engine", + "message": "GKE cluster provisioned successfully" + } + ], + "resources_created": [ + { + "resource_id": "gke_cluster_abc123", + "resource_type": "gcp-gke-autopilot-cluster", + "resource_name": "main-cluster", + "status": "healthy" + } + ] + } +} +``` + +#### Cancel Operation +```http +POST /api/v1/operations/{operation_id}/cancel +Authorization: Bearer +``` + +### 7. Resource Discovery & Management + +#### Discover Cloud Resources +```http +POST /api/v1/organizations/{org_id}/resources/discover +Authorization: Bearer +Content-Type: application/json + +{ + "cloud_provider": "gcp", + "regions": ["us-central1", "us-east1"], + "resource_types": ["gcp-gke-autopilot-cluster", "gcp-bucket", "gcp-cloudsql-postgres"], + "filters": { + "tags": { + "environment": "production" + } + } +} +``` + +#### List Discovered Resources +```http +GET /api/v1/organizations/{org_id}/resources +Authorization: Bearer +Query Parameters: + - cloud_provider (optional): Filter by provider + - resource_type (optional): Filter by type + - managed_by_sc (optional): Filter by SC management status + - region (optional): Filter by region +``` + +#### Adopt Existing Resource +```http +POST /api/v1/organizations/{org_id}/resources/{resource_id}/adopt +Authorization: Bearer +Content-Type: application/json + +{ + "parent_stack_id": "parent_stack_456", + "stack_environment": "production", + "configuration_overrides": { + // Any configuration adjustments needed for adoption + } +} +``` + +### 8. Secrets Management + +#### List Stack Secrets (Metadata Only) +```http +GET /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id}/secrets +Authorization: Bearer +Query Parameters: + - environment (optional): Filter by environment +``` + +#### Update Stack Secrets +```http +PUT /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id}/secrets/{environment} +Authorization: Bearer +Content-Type: application/json + +{ + "encrypted_secrets": { + "schemaVersion": "1.0", + "auth": { + "gcloud": { + "type": "gcp-service-account", + "config": { + "projectId": "my-gcp-project", + "credentials": "encrypted_service_account_json" + } + } + }, + "values": { + "DATABASE_PASSWORD": "encrypted_password_value", + "API_KEY": "encrypted_api_key_value" + } + } +} +``` + +### 9. GitHub Integration Management + +#### Authorize GitHub Repository +```http +POST /api/v1/organizations/{org_id}/github/repositories/authorize +Authorization: Bearer +Content-Type: application/json + +{ + "repository_owner": "organization", + "repository_name": "infrastructure-stack", + "purpose": "infrastructure", + "permissions": ["contents:write", "actions:write"] +} +``` + +#### List Authorized Repositories +```http +GET /api/v1/organizations/{org_id}/github/repositories +Authorization: Bearer +Query Parameters: + - purpose (optional): Filter by purpose (infrastructure/deployment) +``` + +#### Get Workflow Status +```http +GET /api/v1/github/repositories/{repo_owner}/{repo_name}/workflows/{workflow_run_id} +Authorization: Bearer +``` + +**Response:** +```json +{ + "success": true, + "data": { + "workflow_run_id": "123456", + "status": "completed", + "conclusion": "success", + "started_at": "2024-01-15T10:30:00Z", + "completed_at": "2024-01-15T10:45:00Z", + "html_url": "https://github.com/organization/infrastructure-stack/actions/runs/123456", + "workflow_name": "Provision Infrastructure", + "event": "repository_dispatch" + } +} +``` + +#### Download Stack Configuration (For Workflows) +```http +GET /api/v1/workflows/stacks/{stack_id}/config +Authorization: Bearer +Query Parameters: + - environment: Target environment + - stack_type: parent or client +``` + +**Response:** +```json +{ + "success": true, + "data": { + "stack_name": "infrastructure", + "environment": "production", + "server_config": { + "schemaVersion": "1.0", + "provisioner": { + "type": "pulumi" + }, + "resources": { + // Complete server.yaml configuration + } + }, + "secrets": { + // Decrypted secrets for the environment + } + } +} +``` + +#### Report Workflow Progress (From Workflows) +```http +POST /api/v1/workflows/operations/{operation_id}/progress +Authorization: Bearer +Content-Type: application/json + +{ + "phase": "provisioning", + "current_step": "creating_gke_cluster", + "completion_percent": 45, + "message": "Creating GKE Autopilot cluster in us-central1", + "workflow_run_id": "123456" +} +``` + +### 10. Cloud Account Management + +#### List Cloud Accounts +```http +GET /api/v1/organizations/{org_id}/cloud-accounts +Authorization: Bearer +``` + +#### Get Cloud Account Status +```http +GET /api/v1/organizations/{org_id}/cloud-accounts/{account_id} +Authorization: Bearer +``` + +#### Refresh Cloud Account Credentials +```http +POST /api/v1/organizations/{org_id}/cloud-accounts/{account_id}/refresh +Authorization: Bearer +``` + +### 10. Audit & Monitoring + +#### Get Audit Logs +```http +GET /api/v1/organizations/{org_id}/audit-logs +Authorization: Bearer +Query Parameters: + - event_type (optional): Filter by event type + - actor_id (optional): Filter by user ID + - start_date (optional): Filter from date (ISO 8601) + - end_date (optional): Filter to date (ISO 8601) + - page, per_page: Pagination +``` + +#### Get Organization Usage Statistics +```http +GET /api/v1/organizations/{org_id}/usage +Authorization: Bearer +Query Parameters: + - period (optional): "day", "week", "month" (default: "month") + - start_date (optional): Custom period start + - end_date (optional): Custom period end +``` + +**Response:** +```json +{ + "success": true, + "data": { + "period": { + "start": "2024-01-01T00:00:00Z", + "end": "2024-01-31T23:59:59Z" + }, + "metrics": { + "api_requests": 45230, + "provisioning_operations": 123, + "deployment_operations": 456, + "active_parent_stacks": 5, + "active_client_stacks": 23, + "cloud_resources_managed": 87, + "estimated_monthly_cost": 1250.75 + }, + "breakdown": { + "by_user": [ + { + "user_id": "user_123", + "user_name": "John Doe", + "api_requests": 12340, + "operations": 45 + } + ], + "by_cloud_provider": { + "gcp": { + "resources": 52, + "estimated_cost": 890.25 + }, + "aws": { + "resources": 35, + "estimated_cost": 360.50 + } + } + } + } +} +``` + +## Error Codes + +### Authentication Errors +- `AUTH_REQUIRED`: Authentication token required +- `AUTH_INVALID`: Invalid or expired token +- `AUTH_INSUFFICIENT`: Insufficient permissions + +### Validation Errors +- `VALIDATION_ERROR`: General validation error +- `INVALID_FORMAT`: Invalid request format +- `REQUIRED_FIELD`: Required field missing +- `INVALID_VALUE`: Invalid field value + +### Business Logic Errors +- `RESOURCE_NOT_FOUND`: Resource not found +- `RESOURCE_CONFLICT`: Resource name conflict +- `DEPENDENCY_ERROR`: Resource dependency conflict +- `OPERATION_IN_PROGRESS`: Conflicting operation in progress + +### System Errors +- `INTERNAL_ERROR`: Server-side error +- `SERVICE_UNAVAILABLE`: Service temporarily unavailable +- `RATE_LIMIT_EXCEEDED`: Too many requests + +## WebSocket API for Real-Time Updates + +### Connection +```javascript +const ws = new WebSocket('wss://api.simple-container.com/ws?token='); +``` + +### Event Types +```json +// Operation progress updates +{ + "type": "operation_progress", + "data": { + "operation_id": "op_789def456", + "status": "running", + "progress": { + "completion_percent": 45, + "current_step": "configuring_networking", + "message": "Setting up VPC networking..." + } + } +} + +// Stack status changes +{ + "type": "stack_status_change", + "data": { + "stack_id": "stack_123", + "stack_type": "parent", + "environment": "production", + "old_status": "deploying", + "new_status": "deployed" + } +} + +// Resource health updates +{ + "type": "resource_health_update", + "data": { + "resource_id": "resource_456", + "resource_type": "gcp-gke-autopilot-cluster", + "old_status": "unknown", + "new_status": "healthy", + "timestamp": "2024-01-15T10:30:00Z" + } +} +``` + +This comprehensive REST API specification provides all the necessary endpoints for managing Simple Container deployments through a web interface while maintaining compatibility with the existing CLI-based workflow. diff --git a/docs/design/cloud-api/05-stack-management-apis.md b/docs/design/cloud-api/05-stack-management-apis.md new file mode 100644 index 00000000..9ac72d6b --- /dev/null +++ b/docs/design/cloud-api/05-stack-management-apis.md @@ -0,0 +1,784 @@ +# Simple Container Cloud API - Stack Management APIs + +## Overview + +This document provides detailed specifications for stack lifecycle management operations, covering both parent stacks (infrastructure) and client stacks (applications). These APIs orchestrate GitHub Actions workflows to execute Simple Container provisioning and deployment operations while providing centralized configuration management and multi-tenant capabilities. + +## Stack Lifecycle Management + +### Parent Stack Operations + +Parent stacks represent infrastructure definitions managed by DevOps teams. They follow a structured lifecycle from creation through deployment and maintenance. + +```mermaid +graph TB + DRAFT[Draft] --> VALIDATING[Validating] + VALIDATING --> VALID[Valid] + VALIDATING --> INVALID[Invalid] + VALID --> PROVISIONING[Provisioning] + PROVISIONING --> DEPLOYED[Deployed] + PROVISIONING --> FAILED[Failed] + DEPLOYED --> UPDATING[Updating] + UPDATING --> DEPLOYED + UPDATING --> FAILED + DEPLOYED --> DESTROYING[Destroying] + DESTROYING --> DESTROYED[Destroyed] + FAILED --> PROVISIONING + INVALID --> DRAFT +``` + +#### Create Parent Stack + +**Endpoint:** `POST /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks` + +```go +type CreateParentStackRequest struct { + Name string `json:"name" validate:"required,alphanum_dash"` + DisplayName string `json:"display_name" validate:"required"` + Description string `json:"description"` + ServerConfig api.ServerDescriptor `json:"server_config" validate:"required"` + GitConfig *GitIntegrationConfig `json:"git_config,omitempty"` + + // Access control during creation + Owners []string `json:"owners,omitempty"` // User IDs + Editors []string `json:"editors,omitempty"` // User IDs + Viewers []string `json:"viewers,omitempty"` // User IDs +} + +type GitIntegrationConfig struct { + RepositoryURL string `json:"repository_url"` + Branch string `json:"branch"` + Path string `json:"path"` // Path within repo to server.yaml + AutoSync bool `json:"auto_sync"` +} +``` + +**Implementation Logic:** +```go +func (s *StackService) CreateParentStack(ctx context.Context, orgID, projectID string, req *CreateParentStackRequest) (*ParentStack, error) { + // 1. Validate user permissions + if !s.rbac.HasPermission(ctx, "parent_stacks.create", orgID, &projectID) { + return nil, ErrInsufficientPermissions + } + + // 2. Validate server configuration using SC's validation + if err := api.ValidateServerDescriptor(&req.ServerConfig); err != nil { + return nil, fmt.Errorf("invalid server configuration: %w", err) + } + + // 3. Check for name conflicts within project + if exists, err := s.repo.ParentStackExists(ctx, orgID, projectID, req.Name); err != nil { + return nil, err + } else if exists { + return nil, ErrParentStackNameExists + } + + // 4. Validate resource references and dependencies + if err := s.validateResourceDependencies(ctx, &req.ServerConfig); err != nil { + return nil, fmt.Errorf("resource validation failed: %w", err) + } + + // 5. Create stack record with initial status + stack := &ParentStack{ + OrganizationID: orgID, + ProjectID: projectID, + Name: req.Name, + DisplayName: req.DisplayName, + Description: req.Description, + ServerConfig: req.ServerConfig, + GitConfig: req.GitConfig, + Status: StatusDraft, + Owners: append(req.Owners, getCurrentUserID(ctx)), + Editors: req.Editors, + Viewers: req.Viewers, + CreatedBy: getCurrentUserID(ctx), + CreatedAt: time.Now(), + Version: 1, + } + + // 6. Store in database with transaction + if err := s.repo.CreateParentStack(ctx, stack); err != nil { + return nil, err + } + + // 7. Log audit event + s.audit.LogEvent(ctx, &AuditEvent{ + EventType: "parent_stack_created", + EventCategory: "stack_management", + TargetID: stack.ID, + TargetName: stack.Name, + Changes: map[string]interface{}{"action": "create"}, + }) + + return stack, nil +} +``` + +#### Validate Parent Stack Configuration + +**Endpoint:** `POST /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/validate` + +```go +type ValidateParentStackRequest struct { + ServerConfig api.ServerDescriptor `json:"server_config" validate:"required"` + Environment string `json:"environment,omitempty"` // Validate specific environment +} + +type ValidationResult struct { + Valid bool `json:"valid"` + Errors []ValidationError `json:"errors,omitempty"` + Warnings []ValidationWarning `json:"warnings,omitempty"` + + // Resource analysis + ResourceAnalysis struct { + TotalResources int `json:"total_resources"` + ResourcesByType map[string]int `json:"resources_by_type"` + EstimatedCost *CostEstimate `json:"estimated_cost,omitempty"` + Dependencies []ResourceDependency `json:"dependencies"` + } `json:"resource_analysis"` + + // Template analysis + TemplateAnalysis struct { + TemplateCount int `json:"template_count"` + SupportedTypes []string `json:"supported_types"` + CloudProviders []string `json:"cloud_providers"` + } `json:"template_analysis"` +} + +type ValidationError struct { + Code string `json:"code"` + Message string `json:"message"` + Path string `json:"path"` // JSON path to problematic field + Details interface{} `json:"details,omitempty"` +} +``` + +#### Provision Parent Stack + +**Endpoint:** `POST /api/v1/organizations/{org_id}/projects/{project_id}/parent-stacks/{stack_id}/provision` + +```go +type ProvisionParentStackRequest struct { + Environment string `json:"environment" validate:"required"` + SkipPreview bool `json:"skip_preview"` + TimeoutMinutes int `json:"timeout_minutes" validate:"min=5,max=120"` + ForceRecreate bool `json:"force_recreate"` // Destroy and recreate resources + + // Resource selection + ResourceFilter *ResourceFilter `json:"resource_filter,omitempty"` + + // Approval workflow + RequireApproval bool `json:"require_approval"` + ApproverUserIDs []string `json:"approver_user_ids,omitempty"` +} + +type ResourceFilter struct { + IncludeResources []string `json:"include_resources,omitempty"` // Resource names to include + ExcludeResources []string `json:"exclude_resources,omitempty"` // Resource names to exclude + ResourceTypes []string `json:"resource_types,omitempty"` // Resource types to filter +} +``` + +**Implementation with GitHub Actions Orchestration:** +```go +func (s *StackService) ProvisionParentStack(ctx context.Context, stackID string, req *ProvisionParentStackRequest) (*ProvisionOperation, error) { + // 1. Load and validate stack + stack, err := s.repo.GetParentStack(ctx, stackID) + if err != nil { + return nil, err + } + + // 2. Check permissions + if !s.rbac.HasPermission(ctx, "parent_stacks.provision", stack.OrganizationID, &stack.ProjectID) { + return nil, ErrInsufficientPermissions + } + + // 3. Ensure infrastructure repository exists + infraRepo, err := s.github.EnsureInfrastructureRepository(ctx, stack) + if err != nil { + return nil, fmt.Errorf("failed to setup infrastructure repository: %w", err) + } + + // 4. Create operation record + operation := &ProvisionOperation{ + ID: generateOperationID(), + Type: "provision_parent", + StackID: stackID, + Environment: req.Environment, + Status: StatusPending, + StartedAt: time.Now(), + StartedBy: getCurrentUserID(ctx), + Parameters: req, + EstimatedDuration: s.estimateProvisioningDuration(stack), + GitHubRepository: infraRepo.FullName, + } + + if err := s.repo.CreateOperation(ctx, operation); err != nil { + return nil, err + } + + // 5. Generate short-lived token for GitHub workflow + token, err := s.tokenService.GenerateWorkflowToken(ctx, &WorkflowTokenRequest{ + Purpose: "infrastructure", + StackID: stackID, + Environment: req.Environment, + OperationID: operation.ID, + Permissions: []string{"parent_stacks.read", "stack_secrets.read", "operations.report"}, + WorkflowRunID: 0, // Will be updated by GitHub webhook + }) + if err != nil { + return nil, fmt.Errorf("failed to generate workflow token: %w", err) + } + + // 6. Trigger GitHub Actions workflow + err = s.github.DispatchWorkflow(ctx, &WorkflowDispatchRequest{ + Repository: infraRepo, + EventType: "provision-infrastructure", + Environment: req.Environment, + Payload: map[string]interface{}{ + "operation_id": operation.ID, + "stack_id": stackID, + "environment": req.Environment, + "skip_preview": req.SkipPreview, + "timeout_minutes": req.TimeoutMinutes, + }, + Token: token.Token, + }) + + if err != nil { + // Update operation status on failure + s.updateOperationStatus(ctx, operation.ID, StatusFailed, fmt.Sprintf("Failed to trigger GitHub workflow: %v", err)) + return nil, fmt.Errorf("failed to trigger provisioning workflow: %w", err) + } + + // 7. Update operation status to indicate workflow dispatched + s.updateOperationStatus(ctx, operation.ID, StatusRunning, "GitHub Actions workflow dispatched") + + return operation, nil +} + +// Configuration and secrets delivery for GitHub workflows +func (s *ConfigService) GetStackConfigForWorkflow(ctx context.Context, token, stackID, environment string) (*StackWorkflowConfig, error) { + // 1. Validate workflow token + claims, err := s.validateWorkflowToken(ctx, token) + if err != nil { + return nil, ErrInvalidWorkflowToken + } + + // 2. Check token scope + if claims.StackID != stackID || claims.Environment != environment { + return nil, ErrInsufficientPermissions + } + + // 3. Load stack configuration + stack, err := s.repo.GetParentStack(ctx, stackID) + if err != nil { + return nil, err + } + + // 4. Load secrets for the environment + secrets, err := s.secretsService.GetStackSecrets(ctx, stackID, environment) + if err != nil { + return nil, err + } + + // 5. Return configuration package for GitHub workflow + return &StackWorkflowConfig{ + ServerConfig: stack.ServerConfig, + Secrets: secrets, + Environment: environment, + StackName: stack.Name, + }, nil +} + +// Operation status reporting from GitHub workflows +func (s *OperationService) ReportWorkflowProgress(ctx context.Context, token, operationID string, progress *WorkflowProgress) error { + // 1. Validate workflow token + claims, err := s.validateWorkflowToken(ctx, token) + if err != nil { + return ErrInvalidWorkflowToken + } + + // 2. Update operation progress + operation, err := s.repo.GetOperation(ctx, operationID) + if err != nil { + return err + } + + // 3. Update progress in database + operation.Progress = &OperationProgress{ + Phase: progress.Phase, + CurrentStep: progress.CurrentStep, + CompletionPercent: progress.CompletionPercent, + Message: progress.Message, + LastUpdated: time.Now(), + } + + return s.repo.UpdateOperation(ctx, operation) +} +``` + +### Client Stack Operations + +Client stacks represent application deployments that consume infrastructure from parent stacks. + +#### Create Client Stack + +**Endpoint:** `POST /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks` + +```go +type CreateClientStackRequest struct { + Name string `json:"name" validate:"required,alphanum_dash"` + DisplayName string `json:"display_name" validate:"required"` + Description string `json:"description"` + + // Parent stack relationship + ParentStackID string `json:"parent_stack_id" validate:"required"` + ParentEnvironment string `json:"parent_environment"` // Optional override + + // SC configuration + ClientConfig api.ClientDescriptor `json:"client_config" validate:"required"` + DockerCompose map[string]interface{} `json:"docker_compose"` + DockerfileContent string `json:"dockerfile_content"` + + // Git integration + GitRepository *GitRepositoryConfig `json:"git_repository,omitempty"` + + // Access control + Owners []string `json:"owners,omitempty"` + Collaborators []CollaboratorConfig `json:"collaborators,omitempty"` +} + +type GitRepositoryConfig struct { + URL string `json:"url"` + Branch string `json:"branch"` + DockerfilePath string `json:"dockerfile_path"` + ComposePath string `json:"compose_path"` +} + +type CollaboratorConfig struct { + UserID string `json:"user_id"` + Role string `json:"role"` // "read", "write" + Permissions []string `json:"permissions,omitempty"` +} +``` + +**Implementation:** +```go +func (s *StackService) CreateClientStack(ctx context.Context, orgID, projectID string, req *CreateClientStackRequest) (*ClientStack, error) { + // 1. Validate permissions + if !s.rbac.HasPermission(ctx, "client_stacks.create", orgID, &projectID) { + return nil, ErrInsufficientPermissions + } + + // 2. Validate parent stack exists and user has access + parentStack, err := s.repo.GetParentStack(ctx, req.ParentStackID) + if err != nil { + return nil, fmt.Errorf("parent stack not found: %w", err) + } + + if !s.rbac.HasPermission(ctx, "parent_stacks.read", parentStack.OrganizationID, &parentStack.ProjectID) { + return nil, ErrInsufficientPermissions + } + + // 3. Validate client configuration + if err := api.ValidateClientDescriptor(&req.ClientConfig); err != nil { + return nil, fmt.Errorf("invalid client configuration: %w", err) + } + + // 4. Validate docker-compose configuration + if err := s.validateDockerCompose(req.DockerCompose); err != nil { + return nil, fmt.Errorf("invalid docker-compose configuration: %w", err) + } + + // 5. Check resource references against parent stack + if err := s.validateClientResourceUsage(ctx, parentStack, &req.ClientConfig); err != nil { + return nil, fmt.Errorf("resource validation failed: %w", err) + } + + // 6. Create client stack record + stack := &ClientStack{ + OrganizationID: orgID, + ProjectID: projectID, + Name: req.Name, + DisplayName: req.DisplayName, + Description: req.Description, + ParentStackID: req.ParentStackID, + ParentEnvironment: req.ParentEnvironment, + ClientConfig: req.ClientConfig, + DockerCompose: req.DockerCompose, + DockerfileContent: req.DockerfileContent, + GitRepository: req.GitRepository, + Owners: append(req.Owners, getCurrentUserID(ctx)), + Collaborators: req.Collaborators, + CreatedBy: getCurrentUserID(ctx), + CreatedAt: time.Now(), + Version: 1, + Status: StatusDraft, + } + + return s.repo.CreateClientStack(ctx, stack) +} +``` + +#### Deploy Client Stack + +**Endpoint:** `POST /api/v1/organizations/{org_id}/projects/{project_id}/client-stacks/{stack_id}/deploy` + +```go +type DeployClientStackRequest struct { + Environment string `json:"environment" validate:"required"` + GitCommit string `json:"git_commit,omitempty"` + TimeoutMinutes int `json:"timeout_minutes" validate:"min=5,max=60"` + RollbackOnFailure bool `json:"rollback_on_failure"` + + // Deployment strategy + Strategy DeploymentStrategy `json:"strategy"` + + // Environment overrides + EnvironmentOverrides map[string]string `json:"environment_overrides,omitempty"` + + // Resource requirements + ResourceRequirements *ResourceRequirements `json:"resource_requirements,omitempty"` +} + +type DeploymentStrategy struct { + Type string `json:"type"` // "rolling", "blue_green", "canary" + MaxUnavailable string `json:"max_unavailable,omitempty"` // "25%" or "1" + MaxSurge string `json:"max_surge,omitempty"` // "25%" or "1" + ProgressDeadline int `json:"progress_deadline,omitempty"` // seconds +} + +type ResourceRequirements struct { + CPU string `json:"cpu"` // "100m", "0.5", "2" + Memory string `json:"memory"` // "128Mi", "256Mi", "1Gi" + Storage string `json:"storage,omitempty"` // "1Gi", "10Gi" +} +``` + +**GitHub Actions Orchestration for Client Deployment:** +```go +func (s *StackService) DeployClientStack(ctx context.Context, stackID string, req *DeployClientStackRequest) (*DeploymentOperation, error) { + // 1. Load client stack and validate permissions + clientStack, err := s.repo.GetClientStack(ctx, stackID) + if err != nil { + return nil, err + } + + if !s.rbac.HasPermission(ctx, "client_stacks.deploy", clientStack.OrganizationID, &clientStack.ProjectID) { + return nil, ErrInsufficientPermissions + } + + // 2. Get application repository information + appRepo, err := s.github.GetApplicationRepository(ctx, clientStack.GitHubRepositoryID) + if err != nil { + return nil, fmt.Errorf("application repository not found: %w", err) + } + + // 3. Create deployment operation + operation := &DeploymentOperation{ + ID: generateOperationID(), + Type: "deploy_client", + StackID: stackID, + Environment: req.Environment, + GitCommit: req.GitCommit, + Strategy: req.Strategy, + Status: StatusPending, + StartedAt: time.Now(), + StartedBy: getCurrentUserID(ctx), + GitHubRepository: appRepo.FullName, + } + + if err := s.repo.CreateOperation(ctx, operation); err != nil { + return nil, err + } + + // 4. Generate short-lived token for deployment workflow + token, err := s.tokenService.GenerateWorkflowToken(ctx, &WorkflowTokenRequest{ + Purpose: "deployment", + StackID: stackID, + Environment: req.Environment, + OperationID: operation.ID, + Permissions: []string{"client_stacks.read", "client_stacks.deploy", "deployments.report"}, + WorkflowRunID: 0, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate workflow token: %w", err) + } + + // 5. Trigger GitHub Actions deployment workflow + err = s.github.DispatchWorkflow(ctx, &WorkflowDispatchRequest{ + Repository: appRepo, + EventType: "deploy-service", + Environment: req.Environment, + Payload: map[string]interface{}{ + "operation_id": operation.ID, + "stack_id": stackID, + "environment": req.Environment, + "git_commit": req.GitCommit, + "deployment_strategy": req.Strategy, + "environment_overrides": req.EnvironmentOverrides, + }, + Token: token.Token, + }) + + if err != nil { + s.updateOperationStatus(ctx, operation.ID, StatusFailed, fmt.Sprintf("Failed to trigger deployment workflow: %v", err)) + return nil, fmt.Errorf("failed to trigger deployment workflow: %w", err) + } + + // 6. Update operation status + s.updateOperationStatus(ctx, operation.ID, StatusRunning, "GitHub Actions deployment workflow dispatched") + + return operation, nil +} + +// Client stack configuration delivery for deployment workflows +func (s *ConfigService) GetClientStackConfigForWorkflow(ctx context.Context, token, stackID, environment string) (*ClientWorkflowConfig, error) { + // 1. Validate workflow token + claims, err := s.validateWorkflowToken(ctx, token) + if err != nil { + return nil, ErrInvalidWorkflowToken + } + + // 2. Check token scope + if claims.StackID != stackID || claims.Environment != environment { + return nil, ErrInsufficientPermissions + } + + // 3. Load client stack and parent stack + clientStack, err := s.repo.GetClientStack(ctx, stackID) + if err != nil { + return nil, err + } + + parentStack, err := s.repo.GetParentStack(ctx, clientStack.ParentStackID) + if err != nil { + return nil, err + } + + // 4. Load merged configuration + mergedConfig, err := s.mergeStackConfigurations(ctx, clientStack, parentStack, environment) + if err != nil { + return nil, err + } + + // 5. Return configuration package for deployment workflow + return &ClientWorkflowConfig{ + ClientConfig: clientStack.ClientConfig, + ParentStackConfig: parentStack.ServerConfig, + MergedConfiguration: mergedConfig, + DockerCompose: clientStack.DockerCompose, + Environment: environment, + StackName: clientStack.Name, + ParentStackName: parentStack.Name, + }, nil +} + +// Deployment status reporting from GitHub workflows +func (s *DeploymentService) ReportDeploymentStatus(ctx context.Context, token, operationID string, status *DeploymentStatus) error { + // 1. Validate workflow token + claims, err := s.validateWorkflowToken(ctx, token) + if err != nil { + return ErrInvalidWorkflowToken + } + + // 2. Update deployment operation + operation, err := s.repo.GetDeploymentOperation(ctx, operationID) + if err != nil { + return err + } + + // 3. Update deployment status and endpoints + operation.Status = status.Status + operation.CompletedAt = &status.CompletedAt + operation.DeployedEndpoints = status.Endpoints + operation.GitCommit = status.GitCommit + operation.WorkflowRunID = status.WorkflowRunID + + if status.Status == StatusCompleted { + // Update client stack with deployment information + clientStack, _ := s.repo.GetClientStack(ctx, operation.StackID) + clientStack.LastDeployment = &LastDeployment{ + Environment: operation.Environment, + GitCommit: status.GitCommit, + DeployedAt: status.CompletedAt, + Endpoints: status.Endpoints, + } + s.repo.UpdateClientStack(ctx, clientStack) + } + + return s.repo.UpdateDeploymentOperation(ctx, operation) +} +``` + +## Stack Configuration Management + +### Configuration Validation + +The API provides comprehensive validation of Simple Container configurations before deployment: + +```go +type ConfigurationValidator struct { + scValidator *api.ConfigValidator + cloudValidator map[string]CloudValidator +} + +func (cv *ConfigurationValidator) ValidateParentStackConfig(ctx context.Context, config *api.ServerDescriptor) (*ValidationResult, error) { + result := &ValidationResult{ + Valid: true, + Errors: []ValidationError{}, + Warnings: []ValidationWarning{}, + } + + // 1. Basic schema validation using SC's validator + if err := cv.scValidator.ValidateServerDescriptor(config); err != nil { + result.Valid = false + result.Errors = append(result.Errors, ValidationError{ + Code: "SCHEMA_VALIDATION_ERROR", + Message: err.Error(), + Path: extractJSONPath(err), + }) + } + + // 2. Resource dependency validation + if depErrors := cv.validateResourceDependencies(config); len(depErrors) > 0 { + result.Valid = false + result.Errors = append(result.Errors, depErrors...) + } + + // 3. Cloud provider configuration validation + for provider, validator := range cv.cloudValidator { + if providerErrors := validator.ValidateResources(config, provider); len(providerErrors) > 0 { + result.Errors = append(result.Errors, providerErrors...) + result.Valid = false + } + } + + // 4. Cost estimation + if costEstimate, err := cv.estimateStackCost(config); err == nil { + result.ResourceAnalysis.EstimatedCost = costEstimate + } + + // 5. Resource analysis + cv.analyzeResources(config, result) + cv.analyzeTemplates(config, result) + + return result, nil +} +``` + +### Configuration Merging + +For client stack deployments, configurations are merged from parent and client stacks: + +```go +func (s *StackService) mergeStackConfigurations(ctx context.Context, clientStack *ClientStack, parentStack *ParentStack, environment string) (*api.MergedConfiguration, error) { + // 1. Extract parent configuration for the environment + parentEnvConfig := s.extractParentEnvironmentConfig(parentStack, environment) + + // 2. Extract client configuration for the environment + clientEnvConfig, exists := clientStack.ClientConfig.Stacks[environment] + if !exists { + return nil, fmt.Errorf("client stack has no configuration for environment %s", environment) + } + + // 3. Validate resource usage (client can only use resources from parent) + if err := s.validateResourceUsage(clientEnvConfig.Config.Uses, parentEnvConfig.Resources); err != nil { + return nil, err + } + + // 4. Merge configurations using SC's merger + merger := api.NewConfigurationMerger() + mergedConfig, err := merger.MergeConfigurations(&api.MergeRequest{ + ParentConfig: parentEnvConfig, + ClientConfig: clientEnvConfig, + Environment: environment, + ResolveSecrets: true, + }) + + if err != nil { + return nil, fmt.Errorf("failed to merge configurations: %w", err) + } + + // 5. Apply client-specific overrides + s.applyClientOverrides(mergedConfig, clientStack) + + return mergedConfig, nil +} +``` + +## Operation Monitoring and Management + +### Operation Status Tracking + +All long-running operations (provisioning, deployment) provide real-time status updates: + +```go +type Operation struct { + ID string `json:"id"` + Type string `json:"type"` + StackID string `json:"stack_id"` + Environment string `json:"environment"` + Status string `json:"status"` + Progress *OperationProgress `json:"progress,omitempty"` + StartedAt time.Time `json:"started_at"` + CompletedAt *time.Time `json:"completed_at,omitempty"` + Duration string `json:"duration"` + StartedBy string `json:"started_by"` + Result string `json:"result,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Logs []OperationLog `json:"logs"` + ResourcesAffected []ResourceChange `json:"resources_affected"` +} + +type OperationProgress struct { + Phase string `json:"phase"` + CurrentStep string `json:"current_step"` + CompletionPercent int `json:"completion_percent"` + Message string `json:"message"` + EstimatedTimeLeft string `json:"estimated_time_left,omitempty"` + LastUpdated time.Time `json:"last_updated"` +} + +type OperationLog struct { + Timestamp time.Time `json:"timestamp"` + Level string `json:"level"` + Message string `json:"message"` + Source string `json:"source"` + Details map[string]interface{} `json:"details,omitempty"` +} +``` + +### Operation Cancellation + +**Endpoint:** `POST /api/v1/operations/{operation_id}/cancel` + +```go +func (s *OperationService) CancelOperation(ctx context.Context, operationID string) error { + operation, err := s.repo.GetOperation(ctx, operationID) + if err != nil { + return err + } + + // Check if operation is cancellable + if !s.isCancellable(operation.Status) { + return ErrOperationNotCancellable + } + + // Check permissions + if !s.rbac.CanCancelOperation(ctx, operation) { + return ErrInsufficientPermissions + } + + // Send cancellation signal to running operation + if err := s.signalCancellation(ctx, operationID); err != nil { + return err + } + + // Update operation status + return s.repo.UpdateOperationStatus(ctx, operationID, StatusCancelling, "Cancellation requested by user") +} +``` + +This comprehensive stack management API provides full lifecycle control over Simple Container deployments while maintaining compatibility with existing CLI workflows and ensuring proper multi-tenant isolation and access control. diff --git a/docs/design/cloud-api/06-cloud-integrations.md b/docs/design/cloud-api/06-cloud-integrations.md new file mode 100644 index 00000000..8e550e7c --- /dev/null +++ b/docs/design/cloud-api/06-cloud-integrations.md @@ -0,0 +1,754 @@ +# Simple Container Cloud API - Cloud Integrations + +## Overview + +The Simple Container Cloud API provides seamless integration with major cloud providers (AWS, GCP) through automated service account provisioning, resource discovery, and lifecycle management. The system automatically creates and manages cloud credentials for users upon authentication, enabling immediate infrastructure management capabilities. + +## Google Cloud Platform Integration + +### Automatic Service Account Provisioning + +When a user authenticates with Google OAuth for the first time, the system automatically provisions a GCP service account with appropriate permissions for Simple Container operations. + +```mermaid +sequenceDiagram + participant User + participant API + participant Google OAuth + participant GCP IAM + participant MongoDB + + User->>API: First-time login + API->>Google OAuth: Validate token + Google OAuth->>API: User profile + project access + + API->>GCP IAM: Create service account + Note over GCP IAM: Service Account: sc-user-{hash} + GCP IAM->>API: Service account created + + API->>GCP IAM: Assign predefined roles + Note over GCP IAM: Roles: storage.admin, container.admin, etc. + GCP IAM->>API: Roles assigned + + API->>GCP IAM: Create service account key + GCP IAM->>API: JSON key file + + API->>MongoDB: Store encrypted key + metadata + API->>User: Authentication complete +``` + +#### Service Account Creation Logic + +```go +type GCPIntegrationService struct { + iamClient *admin.Service + rmClient *cloudresourcemanager.Service + encryptor *crypto.AESEncryptor + db *mongo.Database + logger *logrus.Logger +} + +func (g *GCPIntegrationService) ProvisionServiceAccount(ctx context.Context, user *User, projectID string) (*GCPServiceAccount, error) { + // 1. Generate unique service account ID + saID := g.generateServiceAccountID(user.Email) + + // 2. Create service account + sa := &admin.ServiceAccount{ + AccountId: saID, + DisplayName: fmt.Sprintf("Simple Container - %s", user.Name), + Description: "Auto-provisioned service account for Simple Container Cloud API", + } + + createdSA, err := g.iamClient.Projects.ServiceAccounts.Create( + fmt.Sprintf("projects/%s", projectID), + &admin.CreateServiceAccountRequest{ServiceAccount: sa}, + ).Context(ctx).Do() + + if err != nil { + return nil, fmt.Errorf("failed to create service account: %w", err) + } + + // 3. Assign predefined roles for Simple Container operations + roles := g.getSimpleContainerRoles() + for _, role := range roles { + if err := g.assignRole(ctx, createdSA.Email, projectID, role); err != nil { + g.logger.Warnf("Failed to assign role %s to %s: %v", role, createdSA.Email, err) + // Continue with other roles rather than failing entirely + } + } + + // 4. Create service account key + key, err := g.iamClient.Projects.ServiceAccounts.Keys.Create( + createdSA.Name, + &admin.CreateServiceAccountKeyRequest{ + KeyAlgorithm: "KEY_ALG_RSA_2048", + }, + ).Context(ctx).Do() + + if err != nil { + // Clean up created service account + g.iamClient.Projects.ServiceAccounts.Delete(createdSA.Name).Context(ctx).Do() + return nil, fmt.Errorf("failed to create service account key: %w", err) + } + + // 5. Encrypt and store key data + keyData, err := base64.StdEncoding.DecodeString(key.PrivateKeyData) + if err != nil { + return nil, fmt.Errorf("failed to decode key data: %w", err) + } + + encryptedKey, err := g.encryptor.Encrypt(keyData) + if err != nil { + return nil, fmt.Errorf("failed to encrypt key data: %w", err) + } + + // 6. Store service account information + gcpAccount := &GCPServiceAccount{ + UserID: user.ID, + OrganizationID: user.OrganizationID, + ProjectID: projectID, + ServiceAccount: ServiceAccountDetails{ + Email: createdSA.Email, + UniqueID: createdSA.UniqueId, + KeyID: extractKeyID(key.Name), + EncryptedKeyData: encryptedKey, + }, + Permissions: ServiceAccountPermissions{ + Roles: roles, + LastValidated: time.Now(), + Status: "active", + }, + CreatedAt: time.Now(), + } + + if err := g.db.Collection("cloud_accounts").InsertOne(ctx, gcpAccount); err != nil { + return nil, fmt.Errorf("failed to store service account: %w", err) + } + + return gcpAccount, nil +} + +func (g *GCPIntegrationService) getSimpleContainerRoles() []string { + return []string{ + "roles/storage.admin", // GCS bucket management + "roles/container.admin", // GKE cluster management + "roles/compute.admin", // Compute Engine resources + "roles/cloudsql.admin", // Cloud SQL management + "roles/redis.admin", // Redis memorystore + "roles/pubsub.admin", // Pub/Sub topics/subscriptions + "roles/artifactregistry.admin", // Artifact Registry + "roles/dns.admin", // Cloud DNS (if used) + "roles/secretmanager.admin", // Secret Manager + "roles/iam.serviceAccountTokenCreator", // Token creation for workload identity + } +} +``` + +#### Custom IAM Role for Fine-Grained Permissions + +For enhanced security, the system can create custom IAM roles with minimal required permissions: + +```go +func (g *GCPIntegrationService) createCustomSimpleContainerRole(ctx context.Context, projectID string) error { + role := &admin.Role{ + RoleId: "simple_container_operator", + Title: "Simple Container Operator", + Description: "Custom role for Simple Container operations with minimal required permissions", + Stage: "GA", + IncludedPermissions: []string{ + // Storage permissions + "storage.buckets.create", + "storage.buckets.delete", + "storage.buckets.get", + "storage.buckets.list", + "storage.buckets.update", + "storage.objects.*", + + // Container/GKE permissions + "container.clusters.create", + "container.clusters.delete", + "container.clusters.get", + "container.clusters.list", + "container.clusters.update", + "container.operations.*", + + // Compute permissions (for networking, service accounts) + "compute.networks.create", + "compute.subnetworks.create", + "compute.firewalls.create", + "compute.addresses.create", + "compute.routers.create", + + // Cloud SQL permissions + "cloudsql.instances.create", + "cloudsql.instances.delete", + "cloudsql.instances.get", + "cloudsql.instances.list", + "cloudsql.instances.update", + + // Redis permissions + "redis.instances.create", + "redis.instances.delete", + "redis.instances.get", + "redis.instances.list", + "redis.instances.update", + }, + } + + _, err := g.iamClient.Projects.Roles.Create( + fmt.Sprintf("projects/%s", projectID), + &admin.CreateRoleRequest{Role: role}, + ).Context(ctx).Do() + + return err +} +``` + +### GCP Resource Discovery + +The system can discover existing GCP resources for adoption into Simple Container management: + +```go +type GCPResourceDiscovery struct { + clients map[string]interface{} // Service clients for different GCP services +} + +func (d *GCPResourceDiscovery) DiscoverResources(ctx context.Context, request *ResourceDiscoveryRequest) ([]*DiscoveredResource, error) { + var resources []*DiscoveredResource + + // Discover GKE clusters + if contains(request.ResourceTypes, "gcp-gke-autopilot-cluster") { + gkeResources, err := d.discoverGKEClusters(ctx, request.ProjectID, request.Regions) + if err != nil { + return nil, err + } + resources = append(resources, gkeResources...) + } + + // Discover GCS buckets + if contains(request.ResourceTypes, "gcp-bucket") { + bucketResources, err := d.discoverGCSBuckets(ctx, request.ProjectID) + if err != nil { + return nil, err + } + resources = append(resources, bucketResources...) + } + + // Discover Cloud SQL instances + if contains(request.ResourceTypes, "gcp-cloudsql-postgres") { + sqlResources, err := d.discoverCloudSQLInstances(ctx, request.ProjectID, request.Regions) + if err != nil { + return nil, err + } + resources = append(resources, sqlResources...) + } + + // Apply filters + return d.applyFilters(resources, request.Filters), nil +} + +func (d *GCPResourceDiscovery) discoverGKEClusters(ctx context.Context, projectID string, regions []string) ([]*DiscoveredResource, error) { + containerClient, err := container.NewService(ctx) + if err != nil { + return nil, err + } + + var resources []*DiscoveredResource + + for _, region := range regions { + // List clusters in region + clusters, err := containerClient.Projects.Locations.Clusters.List( + fmt.Sprintf("projects/%s/locations/%s", projectID, region), + ).Context(ctx).Do() + + if err != nil { + continue // Skip region on error, don't fail entire discovery + } + + for _, cluster := range clusters.Clusters { + resource := &DiscoveredResource{ + CloudProvider: "gcp", + ResourceID: cluster.Name, + ResourceType: "gcp-gke-autopilot-cluster", + ResourceName: cluster.Name, + Region: region, + Status: strings.ToLower(cluster.Status), + Configuration: map[string]interface{}{ + "location": cluster.Location, + "node_version": cluster.CurrentNodeVersion, + "master_version": cluster.CurrentMasterVersion, + "autopilot": cluster.Autopilot.Enabled, + "network": cluster.Network, + "subnetwork": cluster.Subnetwork, + }, + Tags: convertGCPLabels(cluster.ResourceLabels), + DiscoveredAt: time.Now(), + DiscoveredBy: "api_discovery", + } + + // Determine if cluster can be managed by Simple Container + resource.Metadata = map[string]interface{}{ + "manageable": d.isClusterManageable(cluster), + "adoption_requirements": d.getAdoptionRequirements(cluster), + } + + resources = append(resources, resource) + } + } + + return resources, nil +} +``` + +## AWS Integration + +### IAM User/Role Provisioning + +For AWS integration, the system creates IAM users or assumes roles with appropriate policies: + +```go +type AWSIntegrationService struct { + iamClient *iam.IAM + stsClient *sts.STS + encryptor *crypto.AESEncryptor + db *mongo.Database +} + +func (a *AWSIntegrationService) ProvisionIAMUser(ctx context.Context, user *User) (*AWSAccount, error) { + // 1. Create IAM user + userName := a.generateIAMUserName(user.Email) + + createUserInput := &iam.CreateUserInput{ + UserName: aws.String(userName), + Tags: []*iam.Tag{ + { + Key: aws.String("Purpose"), + Value: aws.String("SimpleContainerCloudAPI"), + }, + { + Key: aws.String("UserEmail"), + Value: aws.String(user.Email), + }, + { + Key: aws.String("CreatedAt"), + Value: aws.String(time.Now().Format(time.RFC3339)), + }, + }, + } + + iamUser, err := a.iamClient.CreateUserWithContext(ctx, createUserInput) + if err != nil { + return nil, fmt.Errorf("failed to create IAM user: %w", err) + } + + // 2. Create and attach policy + policyARN, err := a.createSimpleContainerPolicy(ctx) + if err != nil { + // Clean up user + a.iamClient.DeleteUserWithContext(ctx, &iam.DeleteUserInput{UserName: aws.String(userName)}) + return nil, fmt.Errorf("failed to create policy: %w", err) + } + + // Attach policy to user + _, err = a.iamClient.AttachUserPolicyWithContext(ctx, &iam.AttachUserPolicyInput{ + UserName: aws.String(userName), + PolicyArn: aws.String(policyARN), + }) + if err != nil { + return nil, fmt.Errorf("failed to attach policy: %w", err) + } + + // 3. Create access keys + accessKeyResult, err := a.iamClient.CreateAccessKeyWithContext(ctx, &iam.CreateAccessKeyInput{ + UserName: aws.String(userName), + }) + if err != nil { + return nil, fmt.Errorf("failed to create access key: %w", err) + } + + // 4. Encrypt and store credentials + encryptedSecretKey, err := a.encryptor.Encrypt([]byte(*accessKeyResult.AccessKey.SecretAccessKey)) + if err != nil { + return nil, fmt.Errorf("failed to encrypt secret key: %w", err) + } + + awsAccount := &AWSAccount{ + UserID: user.ID, + OrganizationID: user.OrganizationID, + IAMUser: IAMUserDetails{ + UserName: userName, + UserARN: *iamUser.User.Arn, + AccessKeyID: *accessKeyResult.AccessKey.AccessKeyId, + EncryptedSecret: encryptedSecretKey, + PolicyARN: policyARN, + }, + Permissions: IAMPermissions{ + Policies: []string{policyARN}, + LastValidated: time.Now(), + Status: "active", + }, + CreatedAt: time.Now(), + } + + return awsAccount, nil +} + +func (a *AWSIntegrationService) createSimpleContainerPolicy(ctx context.Context) (string, error) { + policyName := "SimpleContainerCloudAPIPolicy" + policyDocument := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:*", + "ecs:*", + "ecr:*", + "rds:*", + "lambda:*", + "cloudfront:*", + "route53:*", + "secretsmanager:*", + "kms:*", + "iam:GetRole", + "iam:PassRole", + "iam:CreateRole", + "iam:AttachRolePolicy", + "logs:*", + "cloudwatch:*" + ], + "Resource": "*" + } + ] + }` + + createPolicyInput := &iam.CreatePolicyInput{ + PolicyName: aws.String(policyName), + PolicyDocument: aws.String(policyDocument), + Description: aws.String("Policy for Simple Container Cloud API operations"), + } + + policy, err := a.iamClient.CreatePolicyWithContext(ctx, createPolicyInput) + if err != nil { + return "", err + } + + return *policy.Policy.Arn, nil +} +``` + +### AWS Resource Discovery + +```go +func (d *AWSResourceDiscovery) DiscoverECSClusters(ctx context.Context, region string) ([]*DiscoveredResource, error) { + ecsClient := ecs.New(session.New(&aws.Config{Region: aws.String(region)})) + + // List all clusters + clusterARNs, err := ecsClient.ListClustersWithContext(ctx, &ecs.ListClustersInput{}) + if err != nil { + return nil, err + } + + if len(clusterARNs.ClusterArns) == 0 { + return nil, nil + } + + // Get cluster details + clusters, err := ecsClient.DescribeClustersWithContext(ctx, &ecs.DescribeClustersInput{ + Clusters: clusterARNs.ClusterArns, + }) + if err != nil { + return nil, err + } + + var resources []*DiscoveredResource + for _, cluster := range clusters.Clusters { + resource := &DiscoveredResource{ + CloudProvider: "aws", + ResourceID: *cluster.ClusterName, + ResourceType: "aws-ecs-cluster", + ResourceName: *cluster.ClusterName, + Region: region, + Status: strings.ToLower(*cluster.Status), + Configuration: map[string]interface{}{ + "capacity_providers": cluster.CapacityProviders, + "running_tasks": *cluster.RunningTasksCount, + "pending_tasks": *cluster.PendingTasksCount, + "active_services": *cluster.ActiveServicesCount, + }, + Tags: convertECSTags(cluster.Tags), + DiscoveredAt: time.Now(), + DiscoveredBy: "api_discovery", + } + + resources = append(resources, resource) + } + + return resources, nil +} +``` + +## Multi-Cloud Resource Management + +### Unified Resource Interface + +The system provides a unified interface for managing resources across cloud providers: + +```go +type CloudResourceManager struct { + gcpService *GCPIntegrationService + awsService *AWSIntegrationService + db *mongo.Database +} + +func (crm *CloudResourceManager) ProvisionResource(ctx context.Context, request *ResourceProvisionRequest) (*ProvisionOperation, error) { + switch request.CloudProvider { + case "gcp": + return crm.provisionGCPResource(ctx, request) + case "aws": + return crm.provisionAWSResource(ctx, request) + default: + return nil, fmt.Errorf("unsupported cloud provider: %s", request.CloudProvider) + } +} + +func (crm *CloudResourceManager) AdoptExistingResource(ctx context.Context, request *ResourceAdoptionRequest) error { + // 1. Validate resource exists and is accessible + resource, err := crm.validateResourceForAdoption(ctx, request) + if err != nil { + return err + } + + // 2. Create Simple Container configuration for the resource + scConfig, err := crm.generateSCConfigForResource(resource, request.AdoptionConfig) + if err != nil { + return err + } + + // 3. Update parent stack to include adopted resource + parentStack, err := crm.db.Collection("parent_stacks").FindOne(ctx, bson.M{ + "_id": request.ParentStackID, + }).Decode(&ParentStack{}) + if err != nil { + return err + } + + // Add resource to parent stack configuration + if parentStack.ServerConfig.Resources.Resources == nil { + parentStack.ServerConfig.Resources.Resources = make(map[string]api.PerEnvResourcesDescriptor) + } + + envConfig := parentStack.ServerConfig.Resources.Resources[request.Environment] + if envConfig.Resources == nil { + envConfig.Resources = make(map[string]api.ResourceDescriptor) + } + + envConfig.Resources[resource.ResourceName] = api.ResourceDescriptor{ + Type: resource.ResourceType, + Config: api.Config{Config: scConfig}, + } + + parentStack.ServerConfig.Resources.Resources[request.Environment] = envConfig + + // 4. Update database + _, err = crm.db.Collection("parent_stacks").UpdateOne(ctx, + bson.M{"_id": request.ParentStackID}, + bson.M{"$set": bson.M{ + "server_config": parentStack.ServerConfig, + "updated_at": time.Now(), + "version": parentStack.Version + 1, + }}, + ) + + return err +} +``` + +## Cloud Cost Monitoring + +### Cost Estimation and Tracking + +```go +type CloudCostService struct { + gcpBilling *cloudbilling.Service + awsCostExpl *costexplorer.CostExplorer + db *mongo.Database +} + +func (ccs *CloudCostService) EstimateStackCost(ctx context.Context, stackConfig *api.ServerDescriptor) (*CostEstimate, error) { + estimate := &CostEstimate{ + Currency: "USD", + Period: "monthly", + } + + // Analyze each resource in the stack + for envName, envConfig := range stackConfig.Resources.Resources { + for resName, resConfig := range envConfig.Resources { + resourceCost, err := ccs.estimateResourceCost(ctx, resConfig.Type, resConfig.Config) + if err != nil { + // Log warning but don't fail the estimation + continue + } + + estimate.Total += resourceCost.MonthlyEstimate + estimate.Breakdown = append(estimate.Breakdown, CostBreakdown{ + ResourceName: resName, + ResourceType: resConfig.Type, + Environment: envName, + MonthlyCost: resourceCost.MonthlyEstimate, + Details: resourceCost.Details, + }) + } + } + + return estimate, nil +} + +func (ccs *CloudCostService) GetActualCosts(ctx context.Context, organizationID string, period CostPeriod) (*ActualCostReport, error) { + // Get all cloud accounts for organization + cloudAccounts, err := ccs.getOrganizationCloudAccounts(ctx, organizationID) + if err != nil { + return nil, err + } + + report := &ActualCostReport{ + OrganizationID: organizationID, + Period: period, + GeneratedAt: time.Now(), + } + + // Collect costs from each cloud provider + for _, account := range cloudAccounts { + switch account.Provider { + case "gcp": + gcpCosts, err := ccs.getGCPCosts(ctx, account, period) + if err != nil { + continue // Log error but continue with other accounts + } + report.ProviderCosts = append(report.ProviderCosts, gcpCosts) + + case "aws": + awsCosts, err := ccs.getAWSCosts(ctx, account, period) + if err != nil { + continue + } + report.ProviderCosts = append(report.ProviderCosts, awsCosts) + } + } + + // Calculate totals + for _, providerCost := range report.ProviderCosts { + report.TotalCost += providerCost.Total + } + + return report, nil +} +``` + +## Security and Compliance + +### Credential Rotation + +```go +func (g *GCPIntegrationService) RotateServiceAccountKey(ctx context.Context, userID, accountID string) error { + // 1. Get current service account details + account, err := g.getServiceAccount(ctx, userID, accountID) + if err != nil { + return err + } + + // 2. Create new service account key + newKey, err := g.iamClient.Projects.ServiceAccounts.Keys.Create( + account.ServiceAccount.Email, + &admin.CreateServiceAccountKeyRequest{ + KeyAlgorithm: "KEY_ALG_RSA_2048", + }, + ).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create new service account key: %w", err) + } + + // 3. Encrypt new key data + keyData, _ := base64.StdEncoding.DecodeString(newKey.PrivateKeyData) + encryptedKey, err := g.encryptor.Encrypt(keyData) + if err != nil { + return fmt.Errorf("failed to encrypt new key: %w", err) + } + + // 4. Update database with new key + _, err = g.db.Collection("cloud_accounts").UpdateOne(ctx, + bson.M{"_id": accountID}, + bson.M{"$set": bson.M{ + "service_account.key_id": extractKeyID(newKey.Name), + "service_account.encrypted_key_data": encryptedKey, + "service_account.last_rotated": time.Now(), + "updated_at": time.Now(), + }}, + ) + if err != nil { + return err + } + + // 5. Delete old key (after a grace period) + go func() { + time.Sleep(5 * time.Minute) // Grace period for existing operations + g.iamClient.Projects.ServiceAccounts.Keys.Delete( + fmt.Sprintf("%s/keys/%s", account.ServiceAccount.Email, account.ServiceAccount.KeyID), + ).Context(context.Background()).Do() + }() + + return nil +} +``` + +### Compliance Monitoring + +```go +type ComplianceMonitor struct { + gcpService *GCPIntegrationService + awsService *AWSIntegrationService + db *mongo.Database +} + +func (cm *ComplianceMonitor) CheckCompliance(ctx context.Context, organizationID string) (*ComplianceReport, error) { + report := &ComplianceReport{ + OrganizationID: organizationID, + CheckedAt: time.Now(), + Standards: []string{"SOC2", "GDPR", "HIPAA"}, + } + + // Check cloud account compliance + accounts, err := cm.getCloudAccounts(ctx, organizationID) + if err != nil { + return nil, err + } + + for _, account := range accounts { + accountCompliance := cm.checkAccountCompliance(ctx, account) + report.AccountCompliance = append(report.AccountCompliance, accountCompliance) + + if !accountCompliance.Compliant { + report.OverallCompliant = false + } + } + + // Check resource compliance + resources, err := cm.getOrganizationResources(ctx, organizationID) + if err != nil { + return nil, err + } + + for _, resource := range resources { + resourceCompliance := cm.checkResourceCompliance(ctx, resource) + report.ResourceCompliance = append(report.ResourceCompliance, resourceCompliance) + + if !resourceCompliance.Compliant { + report.OverallCompliant = false + } + } + + return report, nil +} +``` + +This comprehensive cloud integration system provides seamless, secure, and compliant management of multi-cloud resources while maintaining the simplicity that Simple Container users expect. diff --git a/docs/design/cloud-api/07-security-compliance.md b/docs/design/cloud-api/07-security-compliance.md new file mode 100644 index 00000000..4885181a --- /dev/null +++ b/docs/design/cloud-api/07-security-compliance.md @@ -0,0 +1,201 @@ +# Simple Container Cloud API - Security & Compliance + +## Overview + +The Simple Container Cloud API implements enterprise-grade security controls and compliance measures to protect sensitive infrastructure configurations, credentials, and operational data. + +## Security Architecture + +### Data Protection + +#### Encryption at Rest +All sensitive data encrypted using AES-256-GCM with automatic key rotation: + +```go +type EncryptionService struct { + currentKey *crypto.AESKey + previousKeys map[string]*crypto.AESKey +} + +func (es *EncryptionService) Encrypt(plaintext []byte) (*EncryptedData, error) { + // Generate random nonce, encrypt with AES-256-GCM + // Return encrypted data with key ID for rotation support +} +``` + +#### Database Field-Level Encryption +- User PII and credentials automatically encrypted +- Cloud service account keys encrypted with separate key rotation +- Stack secrets double-encrypted (application + database layer) + +### Authentication Security + +#### Multi-Factor Authentication (MFA) +- TOTP support with encrypted secret storage +- WebAuthn/FIDO2 hardware security keys +- Backup codes for recovery scenarios +- MFA required for administrative operations + +```go +type MFAService struct { + db *mongo.Database + encryptor *EncryptionService +} + +func (mfa *MFAService) VerifyTOTP(ctx context.Context, userID, token string) (bool, error) { + // Decrypt TOTP secret, verify token with time window tolerance +} +``` + +#### Session Security +- JWT tokens with short expiration (1 hour) +- Refresh token rotation +- Concurrent session limits per user +- Geographic anomaly detection + +### Authorization & Access Control + +#### Secure RBAC Implementation +- Rate-limited permission checks +- Privilege escalation detection +- Comprehensive audit logging of all permission checks +- Dynamic permission evaluation with context awareness + +```go +func (srbac *SecureRBACService) CheckPermission(ctx context.Context, userID, resource, action string) (bool, error) { + // Rate limiting, input validation, escalation detection + // Audit all permission checks +} +``` + +#### API Security +- Input validation and sanitization +- SQL injection and XSS pattern detection +- Request size limits and content-type validation +- WAF integration for advanced threat detection + +## Infrastructure Security + +### Container Security +- Automated vulnerability scanning of container images +- Runtime security monitoring with Falco +- Non-root container enforcement +- Resource limits and security contexts + +### Network Security +- TLS 1.3 for all communications +- VPC isolation with private subnets +- Network segmentation between services +- DDoS protection and geographic filtering + +## Compliance Framework + +### SOC 2 Type II Compliance + +#### Security Controls Implementation +- **CC6.1**: Logical and physical access controls with MFA +- **CC6.2**: Role-based authentication and authorization +- **CC6.3**: Network security with encryption and monitoring +- **CC7.1**: Continuous system monitoring and alerting +- **CC7.2**: Encrypted backups with tested recovery procedures + +#### Audit and Monitoring +```go +type SOC2ComplianceService struct { + auditLogger *AuditLogger + accessManager *AccessManager +} + +func (soc *SOC2ComplianceService) GenerateComplianceReport() (*SOC2Report, error) { + // Assess all SOC 2 controls, generate compliance score +} +``` + +### GDPR Compliance + +#### Data Subject Rights +- **Right of Access**: Automated data export functionality +- **Right to Rectification**: Secure data update mechanisms +- **Right to Erasure**: Complete data deletion with referential integrity +- **Data Portability**: Structured data export in common formats + +```go +type GDPRComplianceService struct { + db *mongo.Database + anonymizer *DataAnonymizer +} + +func (gdpr *GDPRComplianceService) ProcessDataSubjectRequest(ctx context.Context, request *DataSubjectRequest) error { + // Handle GDPR requests with identity verification +} +``` + +#### Privacy by Design +- Data minimization in collection and storage +- Purpose limitation for all data processing +- Consent management for optional data collection +- Regular data retention policy enforcement + +### Additional Compliance Standards + +#### HIPAA Compliance (Healthcare customers) +- PHI encryption with FIPS 140-2 Level 3 validated modules +- Access controls with minimum necessary principle +- Audit logs with tamper protection +- Business Associate Agreement (BAA) support + +#### ISO 27001 Alignment +- Information Security Management System (ISMS) +- Risk assessment and treatment procedures +- Incident response and business continuity planning +- Regular security awareness training requirements + +## Security Monitoring & Incident Response + +### Real-Time Monitoring +```go +type SecurityMonitoringService struct { + falcoClient *falco.Client + alertManager *AlertManager + responseEngine *IncidentResponseEngine +} + +func (sms *SecurityMonitoringService) HandleSecurityEvent(ctx context.Context, event *SecurityEvent) { + // Categorize severity, trigger automated response, escalate if needed +} +``` + +### Incident Response Framework +- **Detection**: Automated security event correlation +- **Analysis**: Threat intelligence integration +- **Containment**: Automated quarantine capabilities +- **Recovery**: Rollback and restoration procedures +- **Lessons Learned**: Post-incident review and improvement + +### Vulnerability Management +- **Scanning**: Automated vulnerability assessment +- **Assessment**: Risk-based prioritization +- **Remediation**: Automated patching where possible +- **Verification**: Continuous validation of fixes + +## Key Security Features + +### Secrets Management +- Integration with HashiCorp Vault for enterprise deployments +- Automatic rotation of service account credentials +- Encrypted storage of all sensitive configuration data +- Secure distribution to Simple Container provisioning engine + +### Backup & Recovery +- Encrypted backups with geographically distributed storage +- Point-in-time recovery capabilities +- Regular backup testing and validation +- RTO/RPO targets: 4 hours/1 hour respectively + +### Security Hardening +- Container images based on distroless/minimal base images +- Regular security updates with automated testing +- Principle of least privilege for all service accounts +- Network policies restricting inter-service communication + +This comprehensive security and compliance framework ensures that the Simple Container Cloud API meets enterprise security requirements while maintaining the simplicity and usability that users expect from Simple Container. diff --git a/docs/design/cloud-api/08-deployment-architecture.md b/docs/design/cloud-api/08-deployment-architecture.md new file mode 100644 index 00000000..01ac72bd --- /dev/null +++ b/docs/design/cloud-api/08-deployment-architecture.md @@ -0,0 +1,508 @@ +# Simple Container Cloud API - Deployment Architecture + +## Overview + +This document outlines the deployment architecture for the Simple Container Cloud API using Simple Container's existing GitHub Actions CI/CD integration. The SC Cloud API follows SC's standard parent-client stack pattern and uses real SC CLI commands for deployment automation. + +## Simple Container Configuration Structure + +The SC Cloud API follows the standard SC project structure: + +``` +.sc/ +└── stacks/ + └── sc-cloud-api/ + ├── server.yaml # Infrastructure configuration + ├── client.yaml # Application configuration + └── secrets.yaml # Encrypted secrets +``` + +### Infrastructure Stack - server.yaml + +Based on existing SC resource types and patterns from the documentation: + +```yaml +schemaVersion: 1.0 + +provisioner: + type: pulumi + config: + state-storage: + type: gcp-bucket + config: + credentials: "${auth:gcloud}" + projectId: "${auth:gcloud.projectId}" + provision: true + bucketName: sc-cloud-api-state + location: us-central1 + secrets-provider: + type: gcp-kms + config: + provision: true + projectId: "${auth:gcloud.projectId}" + keyName: sc-cloud-api-kms-key + keyLocation: global + credentials: "${auth:gcloud}" + +templates: + cloud-api: + type: gcp-gke-autopilot + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + gkeClusterResource: gke-cluster + artifactRegistryResource: artifact-registry + +cicd: + type: github-actions + config: + organization: "simple-container-org" + environments: + staging: + type: staging + protection: false + auto-deploy: true + runner: "ubuntu-latest" + deploy-flags: ["--skip-preview"] + secrets: ["MONGODB_CONNECTION_STRING", "REDIS_URL", "JWT_SECRET"] + variables: + NODE_ENV: "staging" + LOG_LEVEL: "debug" + production: + type: production + protection: true + reviewers: ["devops-team", "senior-dev"] + auto-deploy: false + runner: "ubuntu-latest" + deploy-flags: ["--skip-preview", "--timeout", "30m"] + secrets: ["MONGODB_CONNECTION_STRING", "REDIS_URL", "JWT_SECRET"] + variables: + NODE_ENV: "production" + LOG_LEVEL: "warn" + notifications: + slack: + enabled: true + webhook-url: "${secret:SLACK_WEBHOOK_URL}" + discord: + enabled: false + webhook-url: "" + telegram: + enabled: false + bot-token: "" + chat-id: "" + workflow-generation: + enabled: true + templates: ["deploy", "destroy"] + auto-update: true + custom-actions: {} + output-path: ".github/workflows/" + sc-version: "latest" + +resources: + registrar: + type: cloudflare + config: + credentials: "${secret:CLOUDFLARE_API_TOKEN}" + accountId: "${secret:CLOUDFLARE_ACCOUNT_ID}" + zoneName: simple-container.com + resources: + staging: + template: cloud-api + resources: + mongodb: + type: mongodb-atlas + config: + admins: ["admin"] + developers: [] + instanceSize: "M2" + orgId: "${secret:MONGODB_ATLAS_ORG_ID}" + region: "US_CENTRAL" + cloudProvider: GCP + privateKey: "${secret:MONGODB_ATLAS_PRIVATE_KEY}" + publicKey: "${secret:MONGODB_ATLAS_PUBLIC_KEY}" + redis: + type: gcp-redis + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + memorySizeGb: 2 + region: us-central1 + redisConfig: + maxmemory-policy: noeviction + gke-cluster: + type: gcp-gke-autopilot-cluster + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + location: us-central1 + caddy: + enable: true + namespace: caddy + replicas: 1 + artifact-registry: + type: gcp-artifact-registry + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + location: us-central1 + docker: + immutableTags: false + production: + template: cloud-api + resources: + mongodb: + type: mongodb-atlas + config: + admins: ["admin"] + developers: [] + instanceSize: "M10" + orgId: "${secret:MONGODB_ATLAS_ORG_ID}" + region: "US_CENTRAL" + cloudProvider: GCP + privateKey: "${secret:MONGODB_ATLAS_PRIVATE_KEY}" + publicKey: "${secret:MONGODB_ATLAS_PUBLIC_KEY}" + redis: + type: gcp-redis + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + memorySizeGb: 5 + region: us-central1 + redisConfig: + maxmemory-policy: noeviction + gke-cluster: + type: gcp-gke-autopilot-cluster + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + location: us-central1 + caddy: + enable: true + namespace: caddy + replicas: 2 + artifact-registry: + type: gcp-artifact-registry + config: + projectId: "${auth:gcloud.projectId}" + credentials: "${auth:gcloud}" + location: us-central1 + docker: + immutableTags: false + +secrets: + type: fs-passphrase + config: + inherit: "" + +variables: {} +``` + +### Application Stack - client.yaml + +```yaml +schemaVersion: 1.0 + +stacks: + production: + type: cloud-compose + parent: simple-container-org/sc-cloud-infra + parentEnv: production + template: cloud-api + config: + uses: [mongodb, redis, gke-cluster, artifact-registry] + domain: api.simple-container.com + runs: [cloud-api] + scale: + min: 3 + max: 10 + env: + NODE_ENV: "production" + LOG_LEVEL: "warn" + + staging: + type: cloud-compose + parent: simple-container-org/sc-cloud-infra + parentEnv: staging + template: cloud-api + config: + uses: [mongodb, redis, gke-cluster, artifact-registry] + domain: api-staging.simple-container.com + runs: [cloud-api] + scale: + min: 1 + max: 3 + env: + NODE_ENV: "staging" + LOG_LEVEL: "debug" +``` + +### Secrets Configuration - secrets.yaml + +Based on SC's real secrets management pattern: + +```yaml +schemaVersion: 1.0 + +# Cloud provider authentication (as shown in SC documentation) +auth: + aws: + type: aws-token + config: + account: "123456789012" + accessKey: "${secret:aws-access-key}" + secretAccessKey: "${secret:aws-secret-key}" + region: us-east-1 + +values: + # Cloud provider credentials + aws-access-key: your-aws-access-key-here + aws-secret-key: your-aws-secret-key-here + + # Application secrets for deployment + MONGODB_CONNECTION_STRING: mongodb+srv://user:pass@cluster.mongodb.net/db + REDIS_URL: redis://redis-cluster:6379 + JWT_SECRET: your-jwt-secret-key + + # GitHub App credentials for API functionality + GITHUB_APP_ID: your-github-app-id + GITHUB_APP_PRIVATE_KEY: your-github-app-private-key + GITHUB_WEBHOOK_SECRET: your-github-webhook-secret + + # Google OAuth credentials + GOOGLE_CLIENT_ID: your-google-client-id + GOOGLE_CLIENT_SECRET: your-google-client-secret +``` + +### Docker Compose Configuration + +```yaml +# docker-compose.yaml for SC Cloud API +version: '3.8' + +services: + cloud-api: + build: . + ports: + - "8080:8080" + - "9090:9090" # metrics + environment: + - DATABASE_URL=${MONGODB_CONNECTION_STRING} + - REDIS_URL=${REDIS_URL} + - JWT_SECRET=${JWT_SECRET} + - GITHUB_APP_ID=${GITHUB_APP_ID} + - GITHUB_APP_PRIVATE_KEY=${GITHUB_APP_PRIVATE_KEY} + - GITHUB_WEBHOOK_SECRET=${GITHUB_WEBHOOK_SECRET} + - GOOGLE_CLIENT_ID=${GOOGLE_CLIENT_ID} + - GOOGLE_CLIENT_SECRET=${GOOGLE_CLIENT_SECRET} +``` + +## Real SC CI/CD Deployment Workflow + +### Setup and Configuration + +Following the actual SC GitHub Actions workflow: + +```bash +# 1. Clone and setup project structure +git clone https://github.com/simple-container-org/sc-cloud-api.git +cd sc-cloud-api + +# Create SC project structure +mkdir -p .sc/stacks/sc-cloud-api +``` + +### Secrets Management (Real SC Commands) + +```bash +# Initialize secrets management +sc secrets init + +# Add your public key +sc secrets allow your-public-key + +# Encrypt the secrets file +sc secrets hide +``` + +### Generate GitHub Actions Workflows (Real SC Command) + +```bash +# Generate workflows using actual SC CLI command +sc cicd generate --stack sc-cloud-api --output .github/workflows/ + +# Validate the generated configuration +sc cicd validate --stack sc-cloud-api + +# Preview workflows before committing +sc cicd preview --stack sc-cloud-api --show-content +``` + +### Generated Workflows + +SC automatically generates these workflow files based on the actual patterns from SC documentation: + +#### `.github/workflows/deploy-sc-cloud-api.yml` + +```yaml +name: Deploy SC Cloud API +on: + push: + branches: [main] + workflow_dispatch: + inputs: + environment: + description: 'Environment to deploy' + required: true + default: 'staging' + type: choice + options: ['staging', 'production'] + +jobs: + deploy-staging: + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + runs-on: ubuntu-latest + environment: staging + steps: + - name: Deploy to Staging + uses: simple-container-com/api/.github/actions/deploy@v2025.10.4 + with: + stack-name: sc-cloud-api + environment: staging + sc-config: ${{ secrets.SC_CONFIG }} + + deploy-production: + if: github.event_name == 'workflow_dispatch' && github.event.inputs.environment == 'production' + runs-on: ubuntu-latest + environment: production + steps: + - name: Deploy to Production + uses: simple-container-com/api/.github/actions/deploy@v2025.10.4 + with: + stack-name: sc-cloud-api + environment: production + sc-config: ${{ secrets.SC_CONFIG }} +``` + +#### `.github/workflows/destroy-sc-cloud-api.yml` + +```yaml +name: Destroy SC Cloud API +on: + workflow_dispatch: + inputs: + environment: + description: 'Environment to destroy' + required: true + type: choice + options: ['staging', 'production'] + confirm: + description: 'Type "destroy" to confirm' + required: true + +jobs: + destroy: + if: github.event.inputs.confirm == 'destroy' + runs-on: ubuntu-latest + environment: ${{ github.event.inputs.environment }} + steps: + - name: Destroy Stack + uses: simple-container-com/api/.github/actions/destroy@v2025.10.4 + with: + stack-name: sc-cloud-api + environment: ${{ github.event.inputs.environment }} + sc-config: ${{ secrets.SC_CONFIG }} +``` + +### Deployment Process + +```bash +# 1. Commit and push configuration +git add . +git commit -m "Add SC Cloud API configuration" +git push origin main + +# 2. Automatic staging deployment triggers on main branch push +# 3. Manual production deployment via GitHub UI workflow_dispatch +# 4. Monitor via GitHub Actions UI +``` + +## Monitoring & Operations + +### Built-in SC Features + +Simple Container provides these operational capabilities automatically: + +- **Resource monitoring** via cloud provider dashboards (GCP Console, AWS CloudWatch) +- **Application logs** aggregated to cloud logging services +- **Basic health checks** defined in docker-compose.yaml +- **GitHub Actions workflow monitoring** in the Actions tab +- **Stack status** via existing SC CLI commands + +### Operational Commands + +```bash +# Check deployment status (if available in SC CLI) +sc status --stack sc-cloud-api + +# View logs (if available in SC CLI) +sc logs --stack sc-cloud-api --follow + +# Update configuration and sync workflows +sc cicd sync --stack sc-cloud-api --dry-run +sc cicd sync --stack sc-cloud-api +``` + +### Application Health Monitoring + +Health check endpoint in the Go application: + +```go +// /health endpoint for basic health checking +func healthHandler(w http.ResponseWriter, r *http.Request) { + // Basic health check logic + w.WriteHeader(http.StatusOK) + w.Write([]byte("OK")) +} +``` + +## Operational Workflows + +### GitHub Actions Deployment Workflow + +The actual deployment process using SC's GitHub Actions integration: + +```bash +# 1. Development workflow +git add . +git commit -m "Update SC Cloud API configuration" + +# 2. Push to main triggers automatic staging deployment +git push origin main + +# 3. Production deployment via GitHub UI +# Navigate to Actions -> Deploy SC Cloud API -> Run workflow +# Select "production" environment -> Run workflow + +# 4. Monitor via GitHub Actions dashboard +# View logs and status in GitHub Actions UI +``` + +## Summary + +The SC Cloud API deployment architecture uses Simple Container's existing features: + +### **Real SC Components Used** +- **CI/CD Integration**: `sc cicd generate` command for GitHub Actions workflows +- **Stack Structure**: Parent-client pattern with server.yaml and client.yaml +- **Secrets Management**: `sc secrets` commands for encrypted secrets.yaml +- **Resource Types**: Actual SC resource types like `gcp-gke-autopilot-cluster`, `mongodb-atlas` +- **GitHub Actions**: SC's built-in actions for deployment automation + +### **Key Benefits** +- **Simplified Operations**: SC abstracts away Kubernetes complexity +- **Automated Workflows**: Generated GitHub Actions handle CI/CD +- **Secure Secrets**: Built-in secrets encryption and management +- **Multi-Environment**: Staging and production environments with approval workflows +- **Cloud Provider Integration**: Native support for GCP, AWS resources + +This approach leverages SC's philosophy of simplifying cloud deployments while providing the orchestration capabilities needed for the SC Cloud API service. diff --git a/docs/design/cloud-api/09-cicd-integration.md b/docs/design/cloud-api/09-cicd-integration.md new file mode 100644 index 00000000..ba5a5e6f --- /dev/null +++ b/docs/design/cloud-api/09-cicd-integration.md @@ -0,0 +1,339 @@ +# Simple Container Cloud API - CI/CD Integration + +## Overview + +The Simple Container Cloud API integrates with GitHub to orchestrate CI/CD workflows while maintaining centralized configuration management. This hybrid approach enables GitHub Actions to execute deployments using configurations and secrets managed by the SC Cloud API. + +## Architecture Flow + +```mermaid +sequenceDiagram + participant DevOps + participant SCCloud as SC Cloud API + participant GitHub + participant SCEngine as SC Engine (CI/CD) + participant Cloud as GCP/AWS + + Note over DevOps, Cloud: Infrastructure Management + DevOps->>SCCloud: Configure parent stack + DevOps->>SCCloud: Connect GitHub repository + SCCloud->>GitHub: Create infrastructure repo + workflows + DevOps->>SCCloud: Trigger provision + SCCloud->>GitHub: Dispatch workflow with token + GitHub->>SCEngine: Execute with short-lived token + SCEngine->>SCCloud: Fetch server.yaml + secrets + SCEngine->>Cloud: Provision infrastructure + + Note over DevOps, Cloud: Application Deployment + Developer->>SCCloud: Configure client stack + SCCloud->>GitHub: Create PR with deployment workflows + Developer->>GitHub: Merge PR + SCCloud->>GitHub: Dispatch deployment + GitHub->>SCEngine: Execute with deployment token + SCEngine->>SCCloud: Fetch client.yaml + compose overrides + SCEngine->>Cloud: Deploy application +``` + +## GitHub Integration + +### GitHub App Setup + +SC Cloud uses a GitHub App for repository access and workflow management: + +```go +type GitHubIntegrationService struct { + client *github.Client + appTransport *ghinstallation.Transport + db *mongo.Database +} + +func (gis *GitHubIntegrationService) AuthorizeRepository(ctx context.Context, userID, repoOwner, repoName, purpose string) (*GitHubRepository, error) { + // Verify repository access, store authorization + // Purpose: "infrastructure" or "deployment" +} +``` + +### Infrastructure Repository Management + +For DevOps teams, SC Cloud creates infrastructure repositories: + +```go +func (gis *GitHubIntegrationService) CreateInfrastructureRepository(ctx context.Context, parentStack *ParentStack) error { + // 1. Create private repository: {stack-name}-infrastructure + // 2. Setup repository structure (README, .gitignore, sc-config.yaml) + // 3. Generate GitHub Actions workflows for provision/destroy/plan + // 4. Store repository metadata +} +``` + +Generated workflows use short-lived tokens to access SC Cloud API: + +```yaml +# .github/workflows/provision-infrastructure.yml +name: Provision Infrastructure +on: + repository_dispatch: + types: [provision-infrastructure] + workflow_dispatch: + +jobs: + provision: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: simple-container-com/setup-sc@v1 + + - name: Configure SC Cloud API + run: | + echo "${{ secrets.SC_CLOUD_TOKEN }}" | sc cloud auth login --token-stdin + sc cloud config set api-url "${{ secrets.SC_CLOUD_API_URL }}" + + - name: Download configuration + run: | + sc cloud stack download --stack-id "${{ vars.STACK_ID }}" --environment "${{ inputs.environment }}" + + - name: Provision + run: sc provision --config-source cloud-api --stack-id "${{ vars.STACK_ID }}" +``` + +## Short-Lived Token System + +### Workflow Token Generation + +```go +type WorkflowToken struct { + Token string `json:"token"` + ExpiresAt time.Time `json:"expires_at"` // 2 hours max + Permissions []string `json:"permissions"` + Scope map[string]string `json:"scope"` // Stack/environment limits +} + +func (ts *TokenService) GenerateWorkflowToken(ctx context.Context, request *WorkflowTokenRequest) (*WorkflowToken, error) { + // 1. Validate limited permissions based on purpose (infrastructure/deployment) + // 2. Generate JWT with 2-hour expiration + // 3. Include scope restrictions (stack_id, environment) + // 4. Store for tracking and revocation +} +``` + +### Token-Based API Access + +```go +func (wam *WorkflowAuthMiddleware) AuthenticateWorkflowToken() gin.HandlerFunc { + return func(c *gin.Context) { + // 1. Extract Bearer token + // 2. Validate JWT and check expiration + // 3. Verify token not revoked + // 4. Set workflow context with permissions/scope + } +} +``` + +## SC CLI Cloud Integration + +### Configuration Source Resolution + +Modified SC CLI supports cloud API as configuration source: + +```go +type CloudAPIConfigSource struct { + client *CloudAPIClient + token string + stackID string +} + +func (cs *CloudAPIConfigSource) LoadServerConfig(ctx context.Context, stackName, environment string) (*api.ServerDescriptor, error) { + // Download server.yaml from SC Cloud API using workflow token +} + +func (cs *CloudAPIConfigSource) LoadSecrets(ctx context.Context, stackName, environment string) (*api.SecretsDescriptor, error) { + // Download secrets from SC Cloud API with token-based access +} +``` + +CLI commands support cloud API mode: + +```bash +# Traditional file-based +sc provision --stacks-dir ./config --profile production + +# Cloud API mode (used in CI/CD) +sc provision --config-source cloud-api --stack-id "stack-123" --environment production +sc deploy --config-source cloud-api --stack-id "client-456" --environment staging +``` + +## Developer Repository Integration + +### Repository Scanning + +SC Cloud scans developer repositories to generate deployment configurations: + +```go +func (rs *RepositoryScanner) ScanDeveloperRepository(ctx context.Context, repo *GitHubRepository) (*RepositoryScanResult, error) { + // 1. Analyze repository structure via GitHub API + // 2. Detect project type (Node.js, Python, Go, etc.) + // 3. Check for existing Dockerfile/docker-compose.yaml + // 4. Generate deployment recommendations +} +``` + +### Deployment Workflow Generation + +Creates pull requests with SC-integrated workflows: + +```go +func (gis *GitHubIntegrationService) CreateDeploymentWorkflowPR(ctx context.Context, clientStack *ClientStack, scan *RepositoryScanResult) error { + // 1. Create feature branch: sc-deployment-setup-{timestamp} + // 2. Generate deployment workflow (.github/workflows/deploy.yml) + // 3. Create SC-enhanced docker-compose.yaml + // 4. Add SC configuration file (sc-config.yaml) + // 5. Create pull request with description +} +``` + +Generated deployment workflow: + +```yaml +# .github/workflows/deploy.yml +name: Deploy Application +on: + repository_dispatch: + types: [deploy-service] + push: + branches: [main] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: simple-container-com/setup-sc@v1 + + - name: Download SC configuration + run: | + sc cloud stack download --stack-id "${{ vars.CLIENT_STACK_ID }}" --stack-type client + + - name: Merge configurations + run: | + sc compose merge --local docker-compose.yaml --sc-config ./sc-config/client.yaml + + - name: Deploy + run: sc deploy --config-source cloud-api --stack-id "${{ vars.CLIENT_STACK_ID }}" +``` + +## Hybrid Configuration Strategy + +### Configuration Layering + +```go +type HybridConfigManager struct { + cloudAPI *CloudAPIClient + fileSystem *FileSystemConfig +} + +func (hcm *HybridConfigManager) LoadMergedConfiguration(ctx context.Context, request *ConfigLoadRequest) (*MergedConfiguration, error) { + // 1. Load base configuration from SC Cloud API (authoritative) + cloudConfig, err := hcm.cloudAPI.GetStackConfiguration(ctx, request) + + // 2. Load local overrides from repository (if exists) + localOverrides, err := hcm.fileSystem.LoadLocalOverrides(request.LocalConfigPath) + + // 3. Merge with precedence rules: + // - Secrets/credentials: Cloud API only (security) + // - Resource definitions: Cloud API only (consistency) + // - Application config: Local overrides allowed + // - Environment variables: Local overrides allowed + + return hcm.merger.MergeConfigurations(cloudConfig, localOverrides, mergeRules) +} +``` + +### Configuration Precedence Rules + +| Configuration Type | Source Priority | Rationale | +|-------------------|----------------|-----------| +| **Secrets & Credentials** | Cloud API Only | Security & centralized management | +| **Resource Definitions** | Cloud API Only | Infrastructure consistency | +| **Parent Stack References** | Cloud API Only | Dependency management | +| **Application Configuration** | Local Override Allowed | Development flexibility | +| **Environment Variables** | Local Override Allowed | Development/testing needs | +| **Docker Compose Services** | Local Override Allowed | Service customization | + +### Local Override Example + +```yaml +# Repository: docker-compose.override.yaml +# Developers can override application-specific settings +version: '3.8' +services: + app: + environment: + - DEBUG=true + - LOG_LEVEL=debug + volumes: + - ./src:/app/src:ro # Development hot-reload + ports: + - "3000:3000" # Local development port + +# SC Cloud API provides: +# - Database connections (via parent stack resources) +# - Redis configuration +# - External service URLs +# - Production environment variables +``` + +## Triggering Workflows + +### Infrastructure Provisioning + +```go +func (scs *StackService) TriggerInfrastructureProvisioning(ctx context.Context, stackID, environment string) error { + // 1. Generate short-lived workflow token + token, err := scs.tokenService.GenerateWorkflowToken(ctx, &WorkflowTokenRequest{ + Purpose: "infrastructure", + StackID: stackID, + Environment: environment, + Permissions: []string{"parent_stacks.read", "stack_secrets.read", "operations.report"}, + }) + + // 2. Dispatch repository workflow + return scs.github.DispatchWorkflow(ctx, &WorkflowDispatchRequest{ + Repository: stackRepo, + EventType: "provision-infrastructure", + Payload: map[string]interface{}{ + "environment": environment, + "operation_id": operationID, + "stack_id": stackID, + }, + Token: token.Token, + }) +} +``` + +### Application Deployment + +```go +func (dcs *DeploymentService) TriggerDeployment(ctx context.Context, clientStackID, environment, gitCommit string) error { + // Generate deployment token with limited scope + token, err := dcs.tokenService.GenerateWorkflowToken(ctx, &WorkflowTokenRequest{ + Purpose: "deployment", + StackID: clientStackID, + Environment: environment, + Permissions: []string{"client_stacks.read", "client_stacks.deploy", "deployments.report"}, + }) + + // Dispatch deployment workflow + return dcs.github.DispatchWorkflow(ctx, &WorkflowDispatchRequest{ + Repository: clientRepo, + EventType: "deploy-service", + Payload: map[string]interface{}{ + "environment": environment, + "git_commit": gitCommit, + "stack_id": clientStackID, + }, + Token: token.Token, + }) +} +``` + +This CI/CD integration design maintains Simple Container's ease-of-use while providing enterprise-grade configuration management, security, and workflow orchestration through GitHub Actions. diff --git a/docs/design/cloud-api/README.md b/docs/design/cloud-api/README.md new file mode 100644 index 00000000..24403082 --- /dev/null +++ b/docs/design/cloud-api/README.md @@ -0,0 +1,84 @@ +# Simple Container Cloud API + +This directory contains the comprehensive design documentation for the Simple Container Cloud API - a REST API service that provides web-based management capabilities for Simple Container infrastructure and application deployments. + +## Overview + +The Simple Container Cloud API transforms the CLI-based Simple Container experience into a web-accessible, multi-tenant service that enables: + +- **Infrastructure Managers** to create and manage parent stacks (shared infrastructure) +- **Developers** to deploy and manage client stacks (applications that consume infrastructure) +- **Organizations** to manage multiple users with role-based access control +- **Cloud Integration** with automated service account provisioning and management + +## Design Documents + +### Core Architecture +- [**System Architecture**](./01-system-architecture.md) - Overall service design, technology stack, and component interactions +- [**Database Design**](./02-database-design.md) - MongoDB schema for multi-tenant data storage and RBAC +- [**Authentication & RBAC**](./03-authentication-rbac.md) - User authentication, authorization, and role-based access control + +### API Specifications +- [**REST API Specification**](./04-rest-api-specification.md) - Complete API endpoints, request/response schemas, and operation flows +- [**Stack Management APIs**](./05-stack-management-apis.md) - Detailed stack lifecycle management operations + +### Integration & Deployment +- [**Cloud Integrations**](./06-cloud-integrations.md) - AWS/GCP service account automation and resource provisioning +- [**Security & Compliance**](./07-security-compliance.md) - Security model, data protection, and compliance considerations +- [**Deployment Architecture**](./08-deployment-architecture.md) - Service deployment patterns and infrastructure requirements + +## Key Features + +### Multi-Tenant Architecture +- **Organizations** - Companies with multiple users and projects +- **Users** - Individual team members with specific roles and permissions +- **Projects** - Logical groupings of parent and client stacks +- **RBAC** - Fine-grained permissions for infrastructure vs application management + +### Simple Container Integration +- **Parent Stack Management** - Web interface for DevOps teams to define infrastructure templates and resources +- **Client Stack Management** - Developer-friendly interface for application deployment and configuration +- **Real-time Status** - Live monitoring of provisioning and deployment operations +- **Resource Discovery** - Automatic detection and cataloging of existing cloud resources + +### Cloud Provider Integration +- **Automated Provisioning** - Service account creation and IAM configuration upon user authentication +- **Multi-Cloud Support** - AWS and GCP integration with extensible architecture for additional providers +- **Resource Adoption** - Discovery and management of existing cloud infrastructure + +## Development Phases + +### Phase 1: Core Service (MVP) +- Basic authentication with Google OAuth +- MongoDB database setup with core schemas +- Parent stack CRUD operations +- Client stack CRUD operations +- Basic RBAC (infrastructure managers vs developers) + +### Phase 2: Cloud Integration +- Automated GCP service account provisioning +- AWS IAM integration +- Resource discovery and adoption +- Real-time provisioning status + +### Phase 3: Advanced Features +- Advanced RBAC with custom roles +- Multi-organization support +- Audit logging and compliance +- Advanced monitoring and alerting + +## Getting Started + +1. Review the [System Architecture](./01-system-architecture.md) for overall design understanding +2. Examine the [Database Design](./02-database-design.md) for data modeling +3. Study the [REST API Specification](./04-rest-api-specification.md) for implementation details +4. Follow the implementation guidelines in each design document + +## Technology Stack + +- **Backend**: Go (Gin framework) +- **Database**: MongoDB with transaction support +- **Authentication**: Google OAuth 2.0, JWT tokens +- **Cloud SDKs**: AWS SDK, Google Cloud SDK +- **Simple Container**: Integration with existing CLI and provisioning engine +- **Deployment**: Docker containers, Kubernetes-ready diff --git a/docs/implementation/2026-04-07/branch-preview-workflow/notes.md b/docs/implementation/2026-04-07/branch-preview-workflow/notes.md new file mode 100644 index 00000000..dbeef674 --- /dev/null +++ b/docs/implementation/2026-04-07/branch-preview-workflow/notes.md @@ -0,0 +1,62 @@ +# Branch Preview Workflow — Implementation Notes + +**Date**: 2026-04-07 +**Feature**: `feature/branch-builds-for-preview-versions` +**Design doc**: `docs/design/2026-04-07/branch-preview-workflow/architecture.md` + +--- + +## Deliverable + +**File**: `.github/workflows/branch-preview.yaml` + +--- + +## Implementation decisions + +### Version generation +Using `reecetech/version-increment@2023.10.2` with `use_api: "false"` — identical to `push.yaml` for the CalVer increment logic, but without creating an actual git tag. A subsequent shell step appends `-preview.{short_sha7}` to the computed base version. The real git tag is only created in `publish-git-tag`. + +### `${{ secrets.SC_CONFIG }}` in job summary +GitHub Actions processes all `${{ expr }}` in `run:` blocks before passing to the shell. To write the literal string `${{ secrets.SC_CONFIG }}` into the step summary markdown, the env var `SC_CONFIG_EXPR` is set via `"${{ '${{' }} secrets.SC_CONFIG }}"` — GHA evaluates `${{ '${{' }}` to the string `${{`, yielding `${{ secrets.SC_CONFIG }}` as the env var value. The heredoc then expands `${SC_CONFIG_EXPR}` safely. + +### Backtick escaping in heredoc +Unquoted bash heredocs treat `` ` `` as command substitution. Markdown code fences (` ``` `) are written as `` \`\`\` `` in the heredoc to produce literal backticks in the output. + +### `publish-sc-preview` bundle safety +Only versioned tarballs (`sc-{os}-{arch}-v{version}.tar.gz`) are placed in `.sc/stacks/dist/bundle/`. `sc.sh` and `version` are deliberately omitted. **Risk**: if `welder deploy -e prod` performs a full sync/delete of the CDN bucket, files absent from the bundle (including the live `sc.sh`) could be deleted. This must be validated against the `dist` stack Pulumi code before merging to a shared environment. + +### `publish-git-tag` — separate release branch +A new `release/{version}` branch is created from the current HEAD. All four `action.yml` files are patched to reference `simplecontainer/github-actions:{version}` (replacing `:staging`). A commit is made on this branch, then tagged `v{version}`. Both the branch and the tag are pushed. The working branch is never modified. + +### `docker-build` does not need `build-platforms` +The `github-actions.Dockerfile` only copies `dist/github-actions` (the server binary), not any SC platform tarballs. This allows `docker-build` to start as soon as `build-binaries` + `test` finish, in parallel with the `build-platforms` matrix. + +### `publish-sc-preview` and `publish-git-tag` run in parallel +These two jobs have no dependency on each other: +- `publish-sc-preview` only needs SC platform tarballs + passing tests +- `publish-git-tag` only needs the Docker image to exist (gated by `docker-build`) +Both feed into `finalize`. + +--- + +## Known issues / follow-ups + +- [ ] Validate `welder deploy -e prod` bundle sync behavior (see `publish-sc-preview` risk above) +- [ ] Confirm `reecetech/version-increment` with `use_api: "false"` does not create git tags (needs runtime verification) +- [ ] `release/{version}` branches accumulate over time — consider a cleanup strategy (e.g., auto-delete after 30 days) + +--- + +## Status + +- [x] Implementation docs created +- [x] `prepare` job +- [x] `build-setup` job +- [x] `build-platforms` job (matrix) +- [x] `build-binaries` job +- [x] `test` job +- [x] `docker-build` job +- [x] `publish-sc-preview` job +- [x] `publish-git-tag` job +- [x] `finalize` job with build summary diff --git a/pkg/assistant/mcp/.gitignore b/pkg/assistant/mcp/.gitignore index 78978dfb..cceaea8b 100644 --- a/pkg/assistant/mcp/.gitignore +++ b/pkg/assistant/mcp/.gitignore @@ -1,2 +1,3 @@ -.sc/analysis-cache.json \ No newline at end of file +.sc/analysis-cache.json +/.sc/ diff --git a/pkg/assistant/mcp/.sc/analysis-report.md b/pkg/assistant/mcp/.sc/analysis-report.md deleted file mode 100644 index 943ef0f5..00000000 --- a/pkg/assistant/mcp/.sc/analysis-report.md +++ /dev/null @@ -1,72 +0,0 @@ -# Simple Container Project Analysis Report - -**Generated:** 2026-02-27 17:41:10 +00 -**Analyzer Version:** 1.0 -**Overall Confidence:** 70.0% - -## Project Overview - -- **Name:** mcp -- **Path:** /home/runner/_work/api/api/pkg/assistant/mcp -- **Architecture:** standard-web-app -- **Primary Technology:** go (70.0% confidence) - -## Technology Stacks - -### 1. go - -- **Confidence:** 70.0% -- **Runtime:** go -- **Version:** -- **Evidence:** - - .go files found (legacy GOPATH mode) -- **Additional Information:** - - mode: gopath - -## Detected Resources - -## Recommendations - -### High Priority - -**Go Multi-stage Dockerfile** -- Generate optimized multi-stage Dockerfile for Go application with minimal final image -- Action: generate_dockerfile - -**Initialize Simple Container** -- Set up Simple Container configuration for streamlined deployment and infrastructure management -- Action: init_simple_container - -**Add Dockerfile** -- Generate optimized Dockerfile for containerized deployment -- Action: generate_dockerfile - -**Infrastructure as Code Setup** -- No infrastructure management detected. Simple Container provides easy infrastructure-as-code with built-in best practices -- Action: setup_infrastructure_as_code - -### Medium Priority - -**Go Build Optimization** -- Configure Go build with proper flags for smaller binaries and faster startup -- Action: optimize_go_build - -## Simple Container Setup Guide - -Based on this analysis, here's how to get started with Simple Container: - -1. **Initialize Simple Container** - ```bash - sc init - ``` - -2. **Configure for go ** - - Simple Container will automatically detect your technology stack - - Review the generated configuration files - -3. **Deploy** - ```bash - sc deploy - ``` - -For more information, visit: https://simple-container.com/docs