diff --git a/.github/workflows/ci-admin.yml b/.github/workflows/ci-admin.yml new file mode 100644 index 0000000..8442fe8 --- /dev/null +++ b/.github/workflows/ci-admin.yml @@ -0,0 +1,193 @@ +# Copyright 2026 CloudBlue LLC +# SPDX-License-Identifier: Apache-2.0 + +name: CI (Admin Portal) + +on: + push: + branches: + - master + - 'release/**' + paths: + - 'admin/**' + - 'Makefile' + - '.github/workflows/ci-admin.yml' + pull_request: + paths: + - 'admin/**' + - 'test/mock-chaperone/**' + - 'Makefile' + - '.github/workflows/ci-admin.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +env: + GOLANGCI_LINT_VERSION: 'v2.8.0' + PNPM_VERSION: '10.28.2' + +jobs: + lint-go: + name: Lint (Go) + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version-file: admin/go.mod + cache-dependency-path: admin/go.sum + + - name: Run golangci-lint (admin module) + # The admin module embeds ui/dist in !dev builds; use dev tags for + # backend lint/test so checks do not depend on prebuilt UI artifacts. + uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0 + with: + version: ${{ env.GOLANGCI_LINT_VERSION }} + install-mode: goinstall + working-directory: admin + args: --build-tags=dev + + lint-ui: + name: Lint (UI) + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 24 + + - name: Install pnpm + run: corepack enable && corepack prepare pnpm@${{ env.PNPM_VERSION }} --activate + + - name: Install dependencies + working-directory: admin/ui + run: pnpm install --frozen-lockfile + + - name: Check formatting + working-directory: admin/ui + run: pnpm prettier --check "src/**/*.{js,vue,css}" + + - name: Lint + working-directory: admin/ui + run: pnpm lint + + test-go: + name: Test (Go) + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version-file: admin/go.mod + cache-dependency-path: admin/go.sum + + - name: Run tests + # Keep Go test path independent from ui/dist embed requirements. + run: cd admin && go test -race -tags dev ./... + + test-ui: + name: Test (UI) + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 24 + + - name: Install pnpm + run: corepack enable && corepack prepare pnpm@${{ env.PNPM_VERSION }} --activate + + - name: Install dependencies + working-directory: admin/ui + run: pnpm install --frozen-lockfile + + - name: Run tests + working-directory: admin/ui + run: pnpm test + + e2e: + name: E2E + runs-on: ubuntu-latest + needs: [lint-go, lint-ui, test-go, test-ui] + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version-file: admin/go.mod + cache-dependency-path: admin/go.sum + + - name: Set up Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 24 + + - name: Install pnpm + run: corepack enable && corepack prepare pnpm@${{ env.PNPM_VERSION }} --activate + + - name: Install dependencies + working-directory: admin/ui + run: pnpm install --frozen-lockfile + + - name: Install Playwright browsers + working-directory: admin/ui + run: pnpm exec playwright install chromium firefox webkit --with-deps + + - name: Run E2E tests + run: make e2e-admin + + - name: Upload Playwright report + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + if: ${{ !cancelled() }} + with: + name: playwright-report + path: admin/ui/e2e/playwright-report/ + retention-days: 14 + + build: + name: Build + runs-on: ubuntu-latest + needs: [lint-go, lint-ui, test-go, test-ui] + steps: + - name: Checkout code + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version-file: admin/go.mod + cache-dependency-path: admin/go.sum + + - name: Set up Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 24 + + - name: Install pnpm + run: corepack enable && corepack prepare pnpm@${{ env.PNPM_VERSION }} --activate + + - name: Build admin portal + run: make build-admin + + - name: Verify binary exists + run: | + test -f bin/chaperone-admin + ./bin/chaperone-admin --version diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml index 0c7aa37..0b0afa5 100644 --- a/.github/workflows/security.yml +++ b/.github/workflows/security.yml @@ -101,7 +101,7 @@ jobs: # into message strings. Log injection is not possible. # Safety net: the "no-raw-logging" step below enforces that all # production code uses slog, not fmt.Print*/log.Print*. - run: gosec -exclude=G706 -exclude-dir=sdk -exclude-dir=plugins ./... + run: gosec -exclude=G706 -exclude-dir=sdk -exclude-dir=plugins -exclude-dir=admin ./... - name: Run gosec (SDK module) run: cd sdk && gosec ./... @@ -109,6 +109,10 @@ jobs: - name: Run gosec (Contrib module) run: cd plugins/contrib && gosec ./... + - name: Run gosec (admin module) + # Use -tags=dev so the embed directive for ui/dist is not required. + run: cd admin && gosec -tags=dev -exclude=G706 ./... + # TODO: Enable when repo is public (requires GitHub Advanced Security) # dependency-review: # name: Dependency Review diff --git a/.gitignore b/.gitignore index 5c93ecc..5f0e496 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,8 @@ /dist/ /chaperone /chaperone-onboard +/chaperone-admin +admin/chaperone-admin # Test binary, built with `go test -c` *.test @@ -65,3 +67,27 @@ test/load/results/ # PID files for background processes .target-server.pid + +# SQLite database artifacts +*.db +*.db-shm +*.db-wal + +# Admin portal frontend +admin/ui/node_modules/ +admin/ui/dist/ +admin/ui/.vite/ + +# Admin build artifacts +admin/seed-user + +# Admin E2E tests +admin/ui/e2e/.auth/ +admin/ui/e2e/results/ +admin/ui/e2e/playwright-report/ + +# Playwright MCP output +.playwright-mcp/ + +# Admin QA test logs +.admin-qa-logs/ diff --git a/.golangci.yml b/.golangci.yml index 4ef5482..3af5231 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -145,6 +145,11 @@ linters: path: internal/context/ text: "var-naming: avoid package names" + - linters: + - revive + path: admin/api/ + text: "var-naming: avoid meaningless package names" + # Allow pkg/crypto name even though it conflicts with stdlib. # This package handles certificate generation, not general crypto. # TODO: Consider renaming to pkg/certs in future refactor. diff --git a/Makefile b/Makefile index ddfbb31..1eced7d 100644 --- a/Makefile +++ b/Makefile @@ -51,7 +51,14 @@ LDFLAGS_DEV := -ldflags "\ all: lint test build .PHONY: ci -ci: fmt license-check lint test-race gosec govulncheck build ## Run all CI checks locally +ci: fmt license-check lint ci-admin-ui test-race gosec govulncheck build build-onboard build-admin-dev ## Run all CI checks locally + +.PHONY: ci-admin-ui +ci-admin-ui: ## Run admin UI checks (format, lint, test) + cd $(ADMIN_UI_DIR) && pnpm install --frozen-lockfile + cd $(ADMIN_UI_DIR) && pnpm prettier --check "src/**/*.{js,vue,css}" + cd $(ADMIN_UI_DIR) && pnpm lint + cd $(ADMIN_UI_DIR) && pnpm test # ============================================================================ # Build @@ -87,6 +94,65 @@ clean: ## Remove build artifacts @rm -rf $(BUILD_DIR) @rm -f coverage.out coverage.html +# ============================================================================ +# Admin Portal +# ============================================================================ + +ADMIN_BINARY_NAME := chaperone-admin +ADMIN_MODULE_DIR := admin +ADMIN_CMD_PATH := ./cmd/chaperone-admin +ADMIN_UI_DIR := admin/ui + +ADMIN_LDFLAGS := -ldflags "-s -w \ + -X main.Version=$(VERSION) \ + -X main.GitCommit=$(GIT_COMMIT) \ + -X main.BuildDate=$(BUILD_DATE)" + +ADMIN_LDFLAGS_DEV := -ldflags "\ + -X main.Version=$(VERSION)-dev \ + -X main.GitCommit=$(GIT_COMMIT) \ + -X main.BuildDate=$(BUILD_DATE)" + +.PHONY: build-admin +build-admin: build-admin-ui ## Build the admin portal binary (production) + @echo "Building $(ADMIN_BINARY_NAME)..." + @mkdir -p $(BUILD_DIR) + cd $(ADMIN_MODULE_DIR) && CGO_ENABLED=0 go build $(ADMIN_LDFLAGS) -o ../$(BUILD_DIR)/$(ADMIN_BINARY_NAME) $(ADMIN_CMD_PATH) + +.PHONY: build-admin-dev +build-admin-dev: ## Build admin portal for development (no UI build needed) + @echo "Building $(ADMIN_BINARY_NAME) (development)..." + @mkdir -p $(BUILD_DIR) + cd $(ADMIN_MODULE_DIR) && go build -tags dev $(ADMIN_LDFLAGS_DEV) -o ../$(BUILD_DIR)/$(ADMIN_BINARY_NAME) $(ADMIN_CMD_PATH) + +.PHONY: build-admin-ui +build-admin-ui: ## Build the admin portal SPA + @echo "Building admin UI..." + cd $(ADMIN_UI_DIR) && pnpm install && pnpm build + +.PHONY: build-seed-user +build-seed-user: ## Build the seed-user test helper + @echo "Building seed-user..." + @mkdir -p $(BUILD_DIR) + cd $(ADMIN_MODULE_DIR) && go build -o ../$(BUILD_DIR)/seed-user ./cmd/seed-user + +.PHONY: e2e-admin-setup +e2e-admin-setup: ## Install Playwright browsers for E2E tests (one-time setup) + cd $(ADMIN_UI_DIR) && pnpm install --frozen-lockfile + cd $(ADMIN_UI_DIR) && pnpm exec playwright install chromium firefox webkit + +.PHONY: e2e-admin +e2e-admin: ## Run admin portal E2E tests — all browsers (run e2e-admin-setup first) + cd $(ADMIN_UI_DIR) && pnpm e2e + +.PHONY: e2e-admin-chromium +e2e-admin-chromium: ## Run E2E tests on Chromium only + cd $(ADMIN_UI_DIR) && pnpm e2e --project=setup --project=chromium --project=auth + +.PHONY: run-admin +run-admin: build-admin-dev ## Build and run admin portal + @$(BUILD_DIR)/$(ADMIN_BINARY_NAME) + # ============================================================================ # Development Certificates # ============================================================================ @@ -104,24 +170,28 @@ test: ## Run tests (all modules) go test -v ./... cd sdk && go test -v ./... cd plugins/contrib && go test -v ./... + cd admin && go test -tags dev -v ./... .PHONY: test-race test-race: ## Run tests with race detector go test -race -v ./... cd sdk && go test -race -v ./... cd plugins/contrib && go test -race -v ./... + cd admin && go test -race -tags dev -v ./... .PHONY: test-cover test-cover: ## Run tests with coverage go test -coverprofile=coverage.out ./... cd sdk && go test -coverprofile=coverage-sdk.out ./... cd plugins/contrib && go test -coverprofile=coverage-contrib.out ./... + cd admin && go test -tags dev -coverprofile=coverage-admin.out ./... go tool cover -html=coverage.out -o coverage.html @echo "Coverage report: coverage.html" .PHONY: test-short test-short: ## Run short tests only go test -short -v ./... + cd admin && go test -short -tags dev -v ./... .PHONY: test-integration test-integration: ## Run integration tests @@ -254,7 +324,8 @@ lint: ## Run linters (all modules) @if [ -x "$(GOLANGCI_LINT)" ]; then \ $(GOLANGCI_LINT) run && \ (cd sdk && $(GOLANGCI_LINT) run) && \ - (cd plugins/contrib && $(GOLANGCI_LINT) run); \ + (cd plugins/contrib && $(GOLANGCI_LINT) run) && \ + (cd admin && $(GOLANGCI_LINT) run --build-tags=dev); \ else \ echo "golangci-lint not installed. Run: make tools"; \ exit 1; \ @@ -265,6 +336,7 @@ lint-fix: ## Run linters and fix issues $(GOLANGCI_LINT) run --fix cd sdk && $(GOLANGCI_LINT) run --fix cd plugins/contrib && $(GOLANGCI_LINT) run --fix + cd admin && $(GOLANGCI_LINT) run --fix --build-tags=dev .PHONY: fmt fmt: ## Format code (all modules) @@ -274,12 +346,15 @@ fmt: ## Format code (all modules) cd sdk && gofmt -s -w . cd plugins/contrib && go fmt ./... cd plugins/contrib && gofmt -s -w . + cd admin && go fmt ./... + cd admin && gofmt -s -w . .PHONY: vet -vet: ## Run go vet +vet: ## Run go vet (all modules) go vet ./... cd sdk && go vet ./... cd plugins/contrib && go vet ./... + cd admin && go vet -tags dev ./... .PHONY: tidy tidy: ## Tidy and verify go.mod (all modules) @@ -287,12 +362,14 @@ tidy: ## Tidy and verify go.mod (all modules) go mod verify cd sdk && go mod tidy cd plugins/contrib && go mod tidy && go mod verify + cd admin && go mod tidy .PHONY: gosec gosec: ## Run gosec security scanner (all modules) @if [ -x "$(GOSEC)" ]; then \ - $(GOSEC) -exclude=G706 -exclude-dir=sdk -exclude-dir=plugins ./... && \ - (cd sdk && $(GOSEC) ./...); \ + $(GOSEC) -exclude=G706 -exclude-dir=sdk -exclude-dir=plugins -exclude-dir=admin ./... && \ + (cd sdk && $(GOSEC) ./...) && \ + (cd admin && $(GOSEC) -exclude=G706 -tags=dev ./...); \ else \ echo "gosec not installed. Run: make tools"; \ exit 1; \ @@ -302,7 +379,8 @@ gosec: ## Run gosec security scanner (all modules) govulncheck: ## Run govulncheck vulnerability scanner (all modules) @if [ -x "$(GOVULNCHECK)" ]; then \ $(GOVULNCHECK) ./... && \ - (cd sdk && $(GOVULNCHECK) ./...); \ + (cd sdk && $(GOVULNCHECK) ./...) && \ + (cd admin && $(GOVULNCHECK) ./...); \ else \ echo "govulncheck not installed. Run: make tools"; \ exit 1; \ @@ -324,7 +402,8 @@ ADDLICENSE_FLAGS := -f .copyright-header.tmpl \ -ignore 'bin/**' \ -ignore 'certs/**' \ -ignore '.ai/**' \ - -ignore '.claude/**' + -ignore '.claude/**' \ + -ignore 'admin/ui/**' .PHONY: license-check license-check: ## Check that all source files have copyright headers diff --git a/admin/api/audit.go b/admin/api/audit.go new file mode 100644 index 0000000..6ae4612 --- /dev/null +++ b/admin/api/audit.go @@ -0,0 +1,117 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/cloudblue/chaperone/admin/store" +) + +// AuditHandler serves the audit log REST endpoint. +type AuditHandler struct { + store *store.Store +} + +// NewAuditHandler creates a handler for the audit log endpoint. +func NewAuditHandler(st *store.Store) *AuditHandler { + return &AuditHandler{store: st} +} + +// Register mounts audit routes on the given mux. +func (h *AuditHandler) Register(mux *http.ServeMux) { + mux.HandleFunc("GET /api/audit", h.list) +} + +func (h *AuditHandler) list(w http.ResponseWriter, r *http.Request) { + filter, err := parseAuditFilter(r.URL.Query()) + if err != nil { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", err.Error()) + return + } + + page, err := h.store.ListAuditEntries(r.Context(), filter) + if err != nil { + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to list audit entries") + return + } + + respondJSON(w, http.StatusOK, page) +} + +func parseAuditFilter(q url.Values) (store.AuditFilter, error) { + filter := store.AuditFilter{ + Action: strings.TrimSpace(q.Get("action")), + Query: strings.TrimSpace(q.Get("q")), + Page: 1, + PerPage: 20, + } + + if err := parseIDParam(q, "user", &filter.UserID); err != nil { + return filter, err + } + if err := parseIDParam(q, "instance_id", &filter.InstanceID); err != nil { + return filter, err + } + if err := parseTimeParam(q, "from", &filter.From); err != nil { + return filter, err + } + if err := parseTimeParam(q, "to", &filter.To); err != nil { + return filter, err + } + if err := parsePageParams(q, &filter.Page, &filter.PerPage); err != nil { + return filter, err + } + + return filter, nil +} + +func parseIDParam(q url.Values, key string, dst **int64) error { + v := q.Get(key) + if v == "" { + return nil + } + id, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid %s: %q", key, v) + } + *dst = &id + return nil +} + +func parseTimeParam(q url.Values, key string, dst **time.Time) error { + v := q.Get(key) + if v == "" { + return nil + } + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return fmt.Errorf("invalid %s: %q (expected RFC 3339)", key, v) + } + *dst = &t + return nil +} + +func parsePageParams(q url.Values, page, perPage *int) error { + if v := q.Get("page"); v != "" { + p, err := strconv.Atoi(v) + if err != nil || p < 1 { + return fmt.Errorf("invalid page: %q", v) + } + *page = p + } + if v := q.Get("per_page"); v != "" { + pp, err := strconv.Atoi(v) + if err != nil || pp < 1 || pp > 100 { + return fmt.Errorf("invalid per_page: %q (must be 1-100)", v) + } + *perPage = pp + } + return nil +} diff --git a/admin/api/audit_actions.go b/admin/api/audit_actions.go new file mode 100644 index 0000000..0275ecd --- /dev/null +++ b/admin/api/audit_actions.go @@ -0,0 +1,15 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +// Audit action constants logged for each portal operation. +// Keep in sync with the frontend labels in admin/ui/src/utils/audit.js. +const ( + AuditActionInstanceCreate = "instance.create" + AuditActionInstanceUpdate = "instance.update" + AuditActionInstanceDelete = "instance.delete" + AuditActionUserLogin = "user.login" + AuditActionUserLogout = "user.logout" + AuditActionPasswordChange = "user.password_change" +) diff --git a/admin/api/audit_test.go b/admin/api/audit_test.go new file mode 100644 index 0000000..010562c --- /dev/null +++ b/admin/api/audit_test.go @@ -0,0 +1,241 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/cloudblue/chaperone/admin/store" +) + +func newAuditTestMux(t *testing.T) (*http.ServeMux, *store.Store) { + t.Helper() + st := openTestStore(t) + h := NewAuditHandler(st) + mux := http.NewServeMux() + h.Register(mux) + return mux, st +} + +func seedAuditData(t *testing.T, st *store.Store) int64 { + t.Helper() + ctx := context.Background() + user, err := st.CreateUser(ctx, "admin", "$2a$10$abcdefghijklmnopqrstuuABCDEFGHIJKLMNOPQRSTUVWXYZ01234") + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + inst, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + entries := []struct { + action string + instanceID *int64 + detail string + }{ + {"instance.create", &inst.ID, "Created instance proxy-1 at 10.0.0.1:9090"}, + {"instance.update", &inst.ID, "Updated instance proxy-1"}, + {"user.login", nil, "User admin logged in"}, + } + for _, e := range entries { + if err := st.InsertAuditEntry(ctx, user.ID, e.action, e.instanceID, e.detail); err != nil { + t.Fatalf("InsertAuditEntry() error = %v", err) + } + } + return user.ID +} + +func TestAuditList_Empty_ReturnsEmptyPage(t *testing.T) { + t.Parallel() + mux, _ := newAuditTestMux(t) + + req := httptest.NewRequest(http.MethodGet, "/api/audit", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusOK, rec.Body.String()) + } + + var page store.AuditPage + if err := json.NewDecoder(rec.Body).Decode(&page); err != nil { + t.Fatalf("decoding response: %v", err) + } + if page.Total != 0 { + t.Errorf("Total = %d, want 0", page.Total) + } + if len(page.Items) != 0 { + t.Errorf("len(Items) = %d, want 0", len(page.Items)) + } +} + +func TestAuditList_ReturnsEntries(t *testing.T) { + t.Parallel() + mux, st := newAuditTestMux(t) + seedAuditData(t, st) + + req := httptest.NewRequest(http.MethodGet, "/api/audit", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var page store.AuditPage + if err := json.NewDecoder(rec.Body).Decode(&page); err != nil { + t.Fatalf("decoding: %v", err) + } + if page.Total != 3 { + t.Errorf("Total = %d, want 3", page.Total) + } + if len(page.Items) != 3 { + t.Errorf("len(Items) = %d, want 3", len(page.Items)) + } +} + +func TestAuditList_FilterByAction(t *testing.T) { + t.Parallel() + mux, st := newAuditTestMux(t) + seedAuditData(t, st) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?action=user.login", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d", rec.Code) + } + + var page store.AuditPage + if err := json.NewDecoder(rec.Body).Decode(&page); err != nil { + t.Fatalf("decoding: %v", err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } +} + +func TestAuditList_FilterByUser(t *testing.T) { + t.Parallel() + mux, st := newAuditTestMux(t) + userID := seedAuditData(t, st) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?user="+itoa(userID), nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + var page store.AuditPage + if err := json.NewDecoder(rec.Body).Decode(&page); err != nil { + t.Fatalf("decoding: %v", err) + } + if page.Total != 3 { + t.Errorf("Total = %d, want 3", page.Total) + } +} + +func TestAuditList_FullTextSearch(t *testing.T) { + t.Parallel() + mux, st := newAuditTestMux(t) + seedAuditData(t, st) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?q=proxy-1", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + var page store.AuditPage + if err := json.NewDecoder(rec.Body).Decode(&page); err != nil { + t.Fatalf("decoding: %v", err) + } + // "proxy-1" appears in instance.create and instance.update details. + if page.Total != 2 { + t.Errorf("Total = %d, want 2", page.Total) + } +} + +func TestAuditList_Pagination(t *testing.T) { + t.Parallel() + mux, st := newAuditTestMux(t) + seedAuditData(t, st) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?page=1&per_page=2", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + var page store.AuditPage + if err := json.NewDecoder(rec.Body).Decode(&page); err != nil { + t.Fatalf("decoding: %v", err) + } + if page.Total != 3 { + t.Errorf("Total = %d, want 3", page.Total) + } + if len(page.Items) != 2 { + t.Errorf("len(Items) = %d, want 2", len(page.Items)) + } + if page.Page != 1 { + t.Errorf("Page = %d, want 1", page.Page) + } +} + +func TestAuditList_InvalidPage_Returns400(t *testing.T) { + t.Parallel() + mux, _ := newAuditTestMux(t) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?page=abc", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestAuditList_InvalidPerPage_Returns400(t *testing.T) { + t.Parallel() + mux, _ := newAuditTestMux(t) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?per_page=999", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestAuditList_InvalidUserID_Returns400(t *testing.T) { + t.Parallel() + mux, _ := newAuditTestMux(t) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?user=notanumber", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestAuditList_InvalidFromDate_Returns400(t *testing.T) { + t.Parallel() + mux, _ := newAuditTestMux(t) + + req := httptest.NewRequest(http.MethodGet, "/api/audit?from=not-a-date", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func itoa(n int64) string { + return strconv.FormatInt(n, 10) +} diff --git a/admin/api/auth.go b/admin/api/auth.go new file mode 100644 index 0000000..e716741 --- /dev/null +++ b/admin/api/auth.go @@ -0,0 +1,250 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net" + "net/http" + "time" + + "github.com/cloudblue/chaperone/admin/auth" + "github.com/cloudblue/chaperone/admin/store" +) + +// AuthHandler handles login, logout, and password change endpoints. +type AuthHandler struct { + auth *auth.Service + store *store.Store + secureCookies bool + sessionMaxAge time.Duration +} + +// NewAuthHandler creates a handler for auth endpoints. +func NewAuthHandler(authService *auth.Service, st *store.Store, secureCookies bool, sessionMaxAge time.Duration) *AuthHandler { + return &AuthHandler{ + auth: authService, + store: st, + secureCookies: secureCookies, + sessionMaxAge: sessionMaxAge, + } +} + +// Register mounts auth routes on the given mux. +func (h *AuthHandler) Register(mux *http.ServeMux) { + mux.HandleFunc("POST /api/login", h.login) + mux.HandleFunc("POST /api/logout", h.logout) + mux.HandleFunc("GET /api/me", h.me) + mux.HandleFunc("PUT /api/user/password", h.changePassword) +} + +type loginRequest struct { + Username string `json:"username"` + Password string `json:"password"` // #nosec G117 -- request field, not a hardcoded secret +} + +type loginResponse struct { + User loginUser `json:"user"` +} + +type loginUser struct { + ID int64 `json:"id"` + Username string `json:"username"` +} + +func (h *AuthHandler) login(w http.ResponseWriter, r *http.Request) { + var req loginRequest + if !decodeJSON(w, r, &req) { + return + } + + if req.Username == "" || req.Password == "" { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", "username and password are required") + return + } + + ip := clientIP(r) + result, err := h.auth.Login(r.Context(), ip, req.Username, req.Password) + if errors.Is(err, auth.ErrRateLimited) { + w.Header().Set("Retry-After", "60") + respondError(w, http.StatusTooManyRequests, "RATE_LIMITED", "Too many failed login attempts. Try again later.") + return + } + if errors.Is(err, auth.ErrInvalidCredentials) { + respondError(w, http.StatusUnauthorized, "UNAUTHORIZED", "Invalid username or password") + return + } + if err != nil { + slog.Error("login failed", "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Login failed") + return + } + + h.setSessionCookie(w, result.SessionToken) + h.setCSRFCookie(w) + + h.auditLog(r.Context(), result.User.ID, AuditActionUserLogin, + fmt.Sprintf("User %q logged in from %s", result.User.Username, ip)) + + respondJSON(w, http.StatusOK, loginResponse{ + User: loginUser{ + ID: result.User.ID, + Username: result.User.Username, + }, + }) +} + +func (h *AuthHandler) me(w http.ResponseWriter, r *http.Request) { + user := auth.ContextUser(r.Context()) + if user == nil { + respondError(w, http.StatusUnauthorized, "UNAUTHORIZED", "Authentication required") + return + } + respondJSON(w, http.StatusOK, loginResponse{ + User: loginUser{ID: user.ID, Username: user.Username}, + }) +} + +func (h *AuthHandler) logout(w http.ResponseWriter, r *http.Request) { + user := auth.ContextUser(r.Context()) + + cookie, err := r.Cookie(auth.SessionCookieName) + if err == nil { + if logoutErr := h.auth.Logout(r.Context(), cookie.Value); logoutErr != nil { + slog.Error("logout session deletion", "error", logoutErr) + } + } + + if user != nil { + h.auditLog(r.Context(), user.ID, AuditActionUserLogout, + fmt.Sprintf("User %q logged out", user.Username)) + } + + h.clearCookies(w) + w.WriteHeader(http.StatusNoContent) +} + +type changePasswordRequest struct { + CurrentPassword string `json:"current_password"` + NewPassword string `json:"new_password"` +} + +func (h *AuthHandler) changePassword(w http.ResponseWriter, r *http.Request) { + user := auth.ContextUser(r.Context()) + if user == nil { + respondError(w, http.StatusUnauthorized, "UNAUTHORIZED", "Authentication required") + return + } + + cookie, err := r.Cookie(auth.SessionCookieName) + if err != nil { + respondError(w, http.StatusUnauthorized, "UNAUTHORIZED", "Authentication required") + return + } + + var req changePasswordRequest + if !decodeJSON(w, r, &req) { + return + } + + if req.CurrentPassword == "" || req.NewPassword == "" { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", "current_password and new_password are required") + return + } + + err = h.auth.ChangePassword(r.Context(), user.ID, cookie.Value, req.CurrentPassword, req.NewPassword) + if errors.Is(err, auth.ErrInvalidCredentials) { + respondError(w, http.StatusForbidden, "INVALID_PASSWORD", "Current password is incorrect") + return + } + if errors.Is(err, auth.ErrPasswordTooShort) { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", + fmt.Sprintf("Password must be at least %d characters", auth.MinPasswordLength)) + return + } + if errors.Is(err, auth.ErrPasswordTooLong) { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", + fmt.Sprintf("Password must be at most %d characters", auth.MaxPasswordLength)) + return + } + if err != nil { + slog.Error("password change failed", "user_id", user.ID, "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to change password") + return + } + + h.auditLog(r.Context(), user.ID, AuditActionPasswordChange, + fmt.Sprintf("User %q changed password", user.Username)) + w.WriteHeader(http.StatusNoContent) +} + +func (h *AuthHandler) setSessionCookie(w http.ResponseWriter, token string) { + http.SetCookie(w, &http.Cookie{ + Name: auth.SessionCookieName, + Value: token, + Path: "/", + MaxAge: int(h.sessionMaxAge.Seconds()), + HttpOnly: true, + Secure: h.secureCookies, + SameSite: http.SameSiteLaxMode, + }) +} + +func (h *AuthHandler) setCSRFCookie(w http.ResponseWriter) { + token, err := auth.GenerateToken(16) + if err != nil { + slog.Error("generating CSRF token", "error", err) + return + } + http.SetCookie(w, &http.Cookie{ + Name: auth.CSRFCookieName, + Value: token, + Path: "/", + MaxAge: int(h.sessionMaxAge.Seconds()), + HttpOnly: false, + Secure: h.secureCookies, + SameSite: http.SameSiteStrictMode, + }) +} + +func (h *AuthHandler) clearCookies(w http.ResponseWriter) { + http.SetCookie(w, &http.Cookie{ + Name: auth.SessionCookieName, + Value: "", + Path: "/", + MaxAge: -1, + HttpOnly: true, + Secure: h.secureCookies, + SameSite: http.SameSiteLaxMode, + }) + http.SetCookie(w, &http.Cookie{ + Name: auth.CSRFCookieName, + Value: "", + Path: "/", + MaxAge: -1, + HttpOnly: false, + Secure: h.secureCookies, + SameSite: http.SameSiteStrictMode, + }) +} + +func (h *AuthHandler) auditLog(ctx context.Context, userID int64, action, detail string) { + if err := h.store.InsertAuditEntry(ctx, userID, action, nil, detail); err != nil { + slog.Error("writing audit entry", "action", action, "error", err) + } +} + +// clientIP extracts the client IP from the request's TCP peer address. +// The admin portal is deployed direct-to-network within Distributor infrastructure; +// X-Forwarded-For is not trusted and must be ignored for rate-limiting. +func clientIP(r *http.Request) string { + host, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return r.RemoteAddr + } + return host +} diff --git a/admin/api/auth_test.go b/admin/api/auth_test.go new file mode 100644 index 0000000..fbaa1b6 --- /dev/null +++ b/admin/api/auth_test.go @@ -0,0 +1,293 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/cloudblue/chaperone/admin/auth" +) + +const testPassword = "securepassword12" + +func newTestAuthMux(t *testing.T) (*http.ServeMux, *auth.Service) { + t.Helper() + st := openTestStore(t) + svc := auth.NewService(st, 24*time.Hour, 2*time.Hour) + h := NewAuthHandler(svc, st, false, 24*time.Hour) + mux := http.NewServeMux() + h.Register(mux) + return mux, svc +} + +func createTestUser(t *testing.T, svc *auth.Service) { + t.Helper() + if err := svc.CreateUser(context.Background(), "admin", testPassword); err != nil { + t.Fatalf("CreateUser() error = %v", err) + } +} + +// --- Login --- + +func TestLogin_Success_Returns200WithCookies(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + + body := `{"username":"admin","password":"` + testPassword + `"}` + req := httptest.NewRequest(http.MethodPost, "/api/login", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusOK, rec.Body.String()) + } + + var resp loginResponse + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("decode error: %v", err) + } + if resp.User.Username != "admin" { + t.Errorf("username = %q, want %q", resp.User.Username, "admin") + } + + cookies := rec.Result().Cookies() + var sessionCookie, csrfCookie *http.Cookie + for _, c := range cookies { + switch c.Name { + case auth.SessionCookieName: + sessionCookie = c + case auth.CSRFCookieName: + csrfCookie = c + } + } + + if sessionCookie == nil { + t.Fatal("missing session cookie") + } + if !sessionCookie.HttpOnly { + t.Error("session cookie should be HttpOnly") + } + if sessionCookie.Secure { + t.Error("session cookie should not be Secure in test (secureCookies=false)") + } + + if csrfCookie == nil { + t.Fatal("missing CSRF cookie") + } + if csrfCookie.HttpOnly { + t.Error("CSRF cookie should NOT be HttpOnly") + } +} + +func TestLogin_WrongPassword_Returns401(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + + body := `{"username":"admin","password":"wrongpassword1"}` + req := httptest.NewRequest(http.MethodPost, "/api/login", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("status = %d, want %d; body = %s", rec.Code, http.StatusUnauthorized, rec.Body.String()) + } +} + +func TestLogin_MissingFields_Returns400(t *testing.T) { + t.Parallel() + mux, _ := newTestAuthMux(t) + + body := `{"username":"admin"}` + req := httptest.NewRequest(http.MethodPost, "/api/login", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestLogin_RateLimited_Returns429(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + + for range 5 { + body := `{"username":"admin","password":"badpassword00"}` + req := httptest.NewRequest(http.MethodPost, "/api/login", strings.NewReader(body)) + req.RemoteAddr = "10.0.0.1:12345" + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + } + + body := `{"username":"admin","password":"` + testPassword + `"}` + req := httptest.NewRequest(http.MethodPost, "/api/login", strings.NewReader(body)) + req.RemoteAddr = "10.0.0.1:12345" + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusTooManyRequests { + t.Errorf("status = %d, want %d", rec.Code, http.StatusTooManyRequests) + } + if got := rec.Header().Get("Retry-After"); got != "60" { + t.Errorf("Retry-After = %q, want %q", got, "60") + } +} + +// --- Logout --- + +func TestLogout_Returns204_ClearsCookies(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + req := httptest.NewRequest(http.MethodPost, "/api/logout", nil) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: result.SessionToken}) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNoContent { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNoContent) + } + + for _, c := range rec.Result().Cookies() { + if c.Name == auth.SessionCookieName && c.MaxAge != -1 { + t.Error("session cookie should be cleared (MaxAge=-1)") + } + if c.Name == auth.CSRFCookieName && c.MaxAge != -1 { + t.Error("CSRF cookie should be cleared (MaxAge=-1)") + } + } +} + +// --- ChangePassword --- + +func TestChangePassword_Success_Returns204(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + body := `{"current_password":"` + testPassword + `","new_password":"newpassword1234"}` + req := httptest.NewRequest(http.MethodPut, "/api/user/password", strings.NewReader(body)) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: result.SessionToken}) + req = req.WithContext(auth.WithUser(req.Context(), &auth.User{ + ID: result.User.ID, + Username: result.User.Username, + })) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNoContent { + t.Errorf("status = %d, want %d; body = %s", rec.Code, http.StatusNoContent, rec.Body.String()) + } +} + +func TestChangePassword_WrongCurrent_Returns403(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + body := `{"current_password":"wrongcurrent1","new_password":"newpassword1234"}` + req := httptest.NewRequest(http.MethodPut, "/api/user/password", strings.NewReader(body)) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: result.SessionToken}) + req = req.WithContext(auth.WithUser(req.Context(), &auth.User{ + ID: result.User.ID, + Username: result.User.Username, + })) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusForbidden { + t.Errorf("status = %d, want %d", rec.Code, http.StatusForbidden) + } +} + +func TestChangePassword_TooShort_Returns400(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + body := `{"current_password":"` + testPassword + `","new_password":"short"}` + req := httptest.NewRequest(http.MethodPut, "/api/user/password", strings.NewReader(body)) + req.AddCookie(&http.Cookie{Name: auth.SessionCookieName, Value: result.SessionToken}) + req = req.WithContext(auth.WithUser(req.Context(), &auth.User{ + ID: result.User.ID, + Username: result.User.Username, + })) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestChangePassword_NoUser_Returns401(t *testing.T) { + t.Parallel() + mux, _ := newTestAuthMux(t) + + body := `{"current_password":"old","new_password":"newpassword1234"}` + req := httptest.NewRequest(http.MethodPut, "/api/user/password", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) + } +} + +// --- Me --- + +func TestMe_Authenticated_Returns200(t *testing.T) { + t.Parallel() + mux, svc := newTestAuthMux(t) + createTestUser(t, svc) + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + req := httptest.NewRequest(http.MethodGet, "/api/me", nil) + req = req.WithContext(auth.WithUser(req.Context(), &auth.User{ + ID: result.User.ID, + Username: result.User.Username, + })) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusOK, rec.Body.String()) + } + + var resp loginResponse + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("decode error: %v", err) + } + if resp.User.Username != "admin" { + t.Errorf("username = %q, want %q", resp.User.Username, "admin") + } +} + +func TestMe_Unauthenticated_Returns401(t *testing.T) { + t.Parallel() + mux, _ := newTestAuthMux(t) + + req := httptest.NewRequest(http.MethodGet, "/api/me", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) + } +} diff --git a/admin/api/instance.go b/admin/api/instance.go new file mode 100644 index 0000000..de00b9b --- /dev/null +++ b/admin/api/instance.go @@ -0,0 +1,259 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "encoding/json" + "errors" + "fmt" + "log/slog" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/cloudblue/chaperone/admin/auth" + "github.com/cloudblue/chaperone/admin/poller" + "github.com/cloudblue/chaperone/admin/store" +) + +// InstanceHandler handles instance CRUD and test-connection endpoints. +type InstanceHandler struct { + store *store.Store + client *http.Client +} + +// NewInstanceHandler creates a handler with the given store and probe timeout. +func NewInstanceHandler(st *store.Store, probeTimeout time.Duration) *InstanceHandler { + return &InstanceHandler{ + store: st, + client: &http.Client{Timeout: probeTimeout}, + } +} + +// Register mounts instance routes on the given mux. +func (h *InstanceHandler) Register(mux *http.ServeMux) { + mux.HandleFunc("GET /api/instances", h.list) + mux.HandleFunc("POST /api/instances", h.create) + mux.HandleFunc("POST /api/instances/test", h.testConnection) + mux.HandleFunc("GET /api/instances/{id}", h.get) + mux.HandleFunc("PUT /api/instances/{id}", h.update) + mux.HandleFunc("DELETE /api/instances/{id}", h.delete) +} + +func (h *InstanceHandler) list(w http.ResponseWriter, r *http.Request) { + instances, err := h.store.ListInstances(r.Context()) + if err != nil { + slog.Error("listing instances", "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to list instances") + return + } + if instances == nil { + instances = []store.Instance{} + } + respondJSON(w, http.StatusOK, instances) +} + +func (h *InstanceHandler) get(w http.ResponseWriter, r *http.Request) { + id, ok := parseID(w, r) + if !ok { + return + } + + inst, err := h.store.GetInstance(r.Context(), id) + if errors.Is(err, store.ErrInstanceNotFound) { + respondError(w, http.StatusNotFound, "INSTANCE_NOT_FOUND", fmt.Sprintf("No instance with ID %d", id)) + return + } + if err != nil { + slog.Error("getting instance", "id", id, "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to get instance") + return + } + respondJSON(w, http.StatusOK, inst) +} + +type instanceRequest struct { + Name string `json:"name"` + Address string `json:"address"` +} + +func (h *InstanceHandler) create(w http.ResponseWriter, r *http.Request) { + var req instanceRequest + if !decodeJSON(w, r, &req) { + return + } + if !validateInstanceRequest(w, &req) { + return + } + + inst, err := h.store.CreateInstance(r.Context(), req.Name, req.Address) + if errors.Is(err, store.ErrDuplicateAddress) { + respondError(w, http.StatusConflict, "DUPLICATE_ADDRESS", + fmt.Sprintf("An instance with address %q is already registered", req.Address)) + return + } + if err != nil { + slog.Error("creating instance", "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to create instance") + return + } + + h.audit(r, AuditActionInstanceCreate, &inst.ID, + fmt.Sprintf("Created instance %q at %s", inst.Name, inst.Address)) + respondJSON(w, http.StatusCreated, inst) +} + +func (h *InstanceHandler) update(w http.ResponseWriter, r *http.Request) { + id, ok := parseID(w, r) + if !ok { + return + } + + var req instanceRequest + if !decodeJSON(w, r, &req) { + return + } + if !validateInstanceRequest(w, &req) { + return + } + + inst, err := h.store.UpdateInstance(r.Context(), id, req.Name, req.Address) + if errors.Is(err, store.ErrInstanceNotFound) { + respondError(w, http.StatusNotFound, "INSTANCE_NOT_FOUND", fmt.Sprintf("No instance with ID %d", id)) + return + } + if errors.Is(err, store.ErrDuplicateAddress) { + respondError(w, http.StatusConflict, "DUPLICATE_ADDRESS", + fmt.Sprintf("An instance with address %q is already registered", req.Address)) + return + } + if err != nil { + slog.Error("updating instance", "id", id, "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to update instance") + return + } + + h.audit(r, AuditActionInstanceUpdate, &inst.ID, + fmt.Sprintf("Updated instance %q (address: %s)", inst.Name, inst.Address)) + respondJSON(w, http.StatusOK, inst) +} + +func (h *InstanceHandler) delete(w http.ResponseWriter, r *http.Request) { + id, ok := parseID(w, r) + if !ok { + return + } + + // Fetch instance name before deletion for the audit detail. + inst, getErr := h.store.GetInstance(r.Context(), id) + + err := h.store.DeleteInstance(r.Context(), id) + if errors.Is(err, store.ErrInstanceNotFound) { + respondError(w, http.StatusNotFound, "INSTANCE_NOT_FOUND", fmt.Sprintf("No instance with ID %d", id)) + return + } + if err != nil { + slog.Error("deleting instance", "id", id, "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to delete instance") + return + } + + detail := fmt.Sprintf("Deleted instance ID %d", id) + if getErr == nil { + detail = fmt.Sprintf("Deleted instance %q (%s)", inst.Name, inst.Address) + } + h.audit(r, AuditActionInstanceDelete, nil, detail) + w.WriteHeader(http.StatusNoContent) +} + +func (h *InstanceHandler) testConnection(w http.ResponseWriter, r *http.Request) { + var req struct { + Address string `json:"address"` + } + if !decodeJSON(w, r, &req) { + return + } + + addr := strings.TrimSpace(req.Address) + if addr == "" { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", "address is required") + return + } + if err := validHostPort(addr); err != nil { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", err.Error()) + return + } + + result := poller.Probe(r.Context(), h.client, addr) + respondJSON(w, http.StatusOK, result) +} + +// parseID extracts and validates the {id} path parameter. +func parseID(w http.ResponseWriter, r *http.Request) (int64, bool) { + raw := r.PathValue("id") + id, err := strconv.ParseInt(raw, 10, 64) + if err != nil || id <= 0 { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", fmt.Sprintf("Invalid instance ID: %q", raw)) + return 0, false + } + return id, true +} + +// decodeJSON reads and decodes a JSON request body (max 1 MB). +func decodeJSON(w http.ResponseWriter, r *http.Request, dst any) bool { + r.Body = http.MaxBytesReader(w, r.Body, 1<<20) + if err := json.NewDecoder(r.Body).Decode(dst); err != nil { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", "Invalid JSON request body") + return false + } + return true +} + +func validateInstanceRequest(w http.ResponseWriter, req *instanceRequest) bool { + req.Name = strings.TrimSpace(req.Name) + req.Address = strings.TrimSpace(req.Address) + + if req.Name == "" { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", "name is required") + return false + } + if req.Address == "" { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", "address is required") + return false + } + if err := validHostPort(req.Address); err != nil { + respondError(w, http.StatusBadRequest, "VALIDATION_ERROR", err.Error()) + return false + } + return true +} + +var errInvalidHostPort = errors.New("address must be a valid host:port (e.g. 192.168.1.10:9090)") + +func (h *InstanceHandler) audit(r *http.Request, action string, instanceID *int64, detail string) { + user := auth.ContextUser(r.Context()) + if user == nil { + return + } + if err := h.store.InsertAuditEntry(r.Context(), user.ID, action, instanceID, detail); err != nil { + slog.Error("writing audit entry", "action", action, "error", err) + } +} + +func validHostPort(addr string) error { + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return errInvalidHostPort + } + if host == "" { + return errInvalidHostPort + } + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil || port == 0 { + return errInvalidHostPort + } + return nil +} diff --git a/admin/api/instance_test.go b/admin/api/instance_test.go new file mode 100644 index 0000000..d424975 --- /dev/null +++ b/admin/api/instance_test.go @@ -0,0 +1,469 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/cloudblue/chaperone/admin/store" +) + +func openTestStore(t *testing.T) *store.Store { + t.Helper() + dbPath := filepath.Join(t.TempDir(), "test.db") + st, err := store.Open(context.Background(), dbPath) + if err != nil { + t.Fatalf("Open(%q) failed: %v", dbPath, err) + } + t.Cleanup(func() { st.Close() }) + return st +} + +func newTestHandler(t *testing.T) *http.ServeMux { + t.Helper() + st := openTestStore(t) + h := NewInstanceHandler(st, 2*time.Second) + mux := http.NewServeMux() + h.Register(mux) + return mux +} + +func TestListInstances_Empty_ReturnsEmptyArray(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + if body := strings.TrimSpace(rec.Body.String()); body != "[]" { + t.Errorf("body = %s, want []", body) + } +} + +func TestCreateInstance_Success_Returns201(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"name":"proxy-1","address":"10.0.0.1:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusCreated { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusCreated, rec.Body.String()) + } + + var inst store.Instance + if err := json.NewDecoder(rec.Body).Decode(&inst); err != nil { + t.Fatalf("decoding response: %v", err) + } + if inst.Name != "proxy-1" { + t.Errorf("Name = %q, want %q", inst.Name, "proxy-1") + } + if inst.Address != "10.0.0.1:9090" { + t.Errorf("Address = %q, want %q", inst.Address, "10.0.0.1:9090") + } +} + +func TestCreateInstance_DuplicateAddress_Returns409(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"name":"proxy-1","address":"10.0.0.1:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusCreated { + t.Fatalf("first create: status = %d, want %d", rec.Code, http.StatusCreated) + } + + // Second create with same address. + req = httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusConflict { + t.Errorf("status = %d, want %d", rec.Code, http.StatusConflict) + } + assertErrorCode(t, rec, "DUPLICATE_ADDRESS") +} + +func TestCreateInstance_MissingName_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"name":"","address":"10.0.0.1:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } + assertErrorCode(t, rec, "VALIDATION_ERROR") +} + +func TestCreateInstance_MissingAddress_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"name":"proxy-1","address":""}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestCreateInstance_InvalidJSON_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader("not json")) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestGetInstance_Exists_Returns200(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + // Create first. + body := `{"name":"proxy-1","address":"10.0.0.1:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + var created store.Instance + json.NewDecoder(rec.Body).Decode(&created) + + // Get by ID. + req = httptest.NewRequest(http.MethodGet, "/api/instances/1", nil) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } +} + +func TestGetInstance_NotFound_Returns404(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + req := httptest.NewRequest(http.MethodGet, "/api/instances/999", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNotFound { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound) + } + assertErrorCode(t, rec, "INSTANCE_NOT_FOUND") +} + +func TestGetInstance_InvalidID_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + req := httptest.NewRequest(http.MethodGet, "/api/instances/abc", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestUpdateInstance_Success_Returns200(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + // Create. + create := `{"name":"proxy-1","address":"10.0.0.1:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(create)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + // Update. + update := `{"name":"proxy-1-updated","address":"10.0.0.2:9090"}` + req = httptest.NewRequest(http.MethodPut, "/api/instances/1", strings.NewReader(update)) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusOK, rec.Body.String()) + } + + var inst store.Instance + json.NewDecoder(rec.Body).Decode(&inst) + if inst.Name != "proxy-1-updated" { + t.Errorf("Name = %q, want %q", inst.Name, "proxy-1-updated") + } +} + +func TestDeleteInstance_Success_Returns204(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + // Create. + create := `{"name":"proxy-1","address":"10.0.0.1:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(create)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + // Delete. + req = httptest.NewRequest(http.MethodDelete, "/api/instances/1", nil) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNoContent { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNoContent) + } + + // Verify gone. + req = httptest.NewRequest(http.MethodGet, "/api/instances/1", nil) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNotFound { + t.Errorf("after delete: status = %d, want %d", rec.Code, http.StatusNotFound) + } +} + +func TestDeleteInstance_NotFound_Returns404(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + req := httptest.NewRequest(http.MethodDelete, "/api/instances/999", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNotFound { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound) + } +} + +func TestTestConnection_Success(t *testing.T) { + t.Parallel() + + // Start a fake proxy admin server. + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/_ops/health": + w.Write([]byte(`{"status":"alive"}`)) + case "/_ops/version": + w.Write([]byte(`{"version":"1.2.3"}`)) + default: + http.NotFound(w, r) + } + })) + defer proxy.Close() + + mux := newTestHandler(t) + addr := strings.TrimPrefix(proxy.URL, "http://") + body := `{"address":"` + addr + `"}` + + req := httptest.NewRequest(http.MethodPost, "/api/instances/test", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusOK, rec.Body.String()) + } + + var result struct { + OK bool `json:"ok"` + Version string `json:"version"` + } + json.NewDecoder(rec.Body).Decode(&result) + + if !result.OK { + t.Error("expected ok=true") + } + if result.Version != "1.2.3" { + t.Errorf("Version = %q, want %q", result.Version, "1.2.3") + } +} + +func TestTestConnection_Unreachable(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"address":"127.0.0.1:1"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances/test", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var result struct { + OK bool `json:"ok"` + Error string `json:"error"` + } + json.NewDecoder(rec.Body).Decode(&result) + + if result.OK { + t.Error("expected ok=false for unreachable address") + } + if result.Error == "" { + t.Error("expected non-empty error message") + } +} + +func TestTestConnection_EmptyAddress_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"address":""}` + req := httptest.NewRequest(http.MethodPost, "/api/instances/test", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestCreateInstance_InvalidAddress_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + tests := []struct { + name string + address string + }{ + {"no port", "not-a-host-port"}, + {"empty host", ":9090"}, + {"non-numeric port", "example.com:abc"}, + {"port zero", "example.com:0"}, + {"port too large", "example.com:70000"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + body := `{"name":"proxy-1","address":"` + tt.address + `"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("address %q: status = %d, want %d", tt.address, rec.Code, http.StatusBadRequest) + } + }) + } +} + +func TestTestConnection_InvalidAddress_Returns400(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + tests := []struct { + name string + address string + }{ + {"no port", "no-port-here"}, + {"empty host", ":9090"}, + {"non-numeric port", "example.com:abc"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + body := `{"address":"` + tt.address + `"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances/test", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("address %q: status = %d, want %d", tt.address, rec.Code, http.StatusBadRequest) + } + }) + } +} + +func TestCreateInstance_WhitespaceTrimmed(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + body := `{"name":" proxy-1 ","address":" 10.0.0.1:9090 "}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusCreated { + t.Fatalf("status = %d, want %d; body = %s", rec.Code, http.StatusCreated, rec.Body.String()) + } + + var inst store.Instance + if err := json.NewDecoder(rec.Body).Decode(&inst); err != nil { + t.Fatalf("decoding response: %v", err) + } + if inst.Name != "proxy-1" { + t.Errorf("Name = %q, want %q (should be trimmed)", inst.Name, "proxy-1") + } + if inst.Address != "10.0.0.1:9090" { + t.Errorf("Address = %q, want %q (should be trimmed)", inst.Address, "10.0.0.1:9090") + } +} + +func TestListInstances_AfterCreate_ReturnsInstances(t *testing.T) { + t.Parallel() + mux := newTestHandler(t) + + // Create two instances. + for _, name := range []string{"alpha", "bravo"} { + body := `{"name":"` + name + `","address":"` + name + `:9090"}` + req := httptest.NewRequest(http.MethodPost, "/api/instances", strings.NewReader(body)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + if rec.Code != http.StatusCreated { + t.Fatalf("create %s: status = %d", name, rec.Code) + } + } + + // List. + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var instances []store.Instance + json.NewDecoder(rec.Body).Decode(&instances) + if len(instances) != 2 { + t.Errorf("len = %d, want 2", len(instances)) + } +} + +// assertErrorCode checks that the response body contains the expected error code. +func assertErrorCode(t *testing.T, rec *httptest.ResponseRecorder, wantCode string) { + t.Helper() + var resp errorResponse + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("decoding error response: %v", err) + } + if resp.Error.Code != wantCode { + t.Errorf("error code = %q, want %q", resp.Error.Code, wantCode) + } +} diff --git a/admin/api/metrics.go b/admin/api/metrics.go new file mode 100644 index 0000000..61e8cb1 --- /dev/null +++ b/admin/api/metrics.go @@ -0,0 +1,61 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "log/slog" + "net/http" + + "github.com/cloudblue/chaperone/admin/metrics" + "github.com/cloudblue/chaperone/admin/store" +) + +// MetricsHandler serves computed metrics via the REST API. +type MetricsHandler struct { + store *store.Store + collector *metrics.Collector +} + +// NewMetricsHandler creates a handler backed by the given store and collector. +func NewMetricsHandler(st *store.Store, c *metrics.Collector) *MetricsHandler { + return &MetricsHandler{store: st, collector: c} +} + +// Register mounts metrics routes on the given mux. +func (h *MetricsHandler) Register(mux *http.ServeMux) { + mux.HandleFunc("GET /api/metrics/fleet", h.fleet) + mux.HandleFunc("GET /api/metrics/{id}", h.instance) +} + +func (h *MetricsHandler) fleet(w http.ResponseWriter, r *http.Request) { + instances, err := h.store.ListInstances(r.Context()) + if err != nil { + slog.Error("listing instances for fleet metrics", "error", err) + respondError(w, http.StatusInternalServerError, "INTERNAL_ERROR", "Failed to list instances") + return + } + + ids := make([]int64, len(instances)) + for i := range instances { + ids[i] = instances[i].ID + } + + fm := h.collector.GetFleetMetrics(ids) + respondJSON(w, http.StatusOK, fm) +} + +func (h *MetricsHandler) instance(w http.ResponseWriter, r *http.Request) { + id, ok := parseID(w, r) + if !ok { + return + } + + im := h.collector.GetInstanceMetrics(id) + if im == nil { + respondError(w, http.StatusNotFound, "NO_METRICS", "No metric data available for this instance") + return + } + + respondJSON(w, http.StatusOK, im) +} diff --git a/admin/api/metrics_test.go b/admin/api/metrics_test.go new file mode 100644 index 0000000..43f598d --- /dev/null +++ b/admin/api/metrics_test.go @@ -0,0 +1,177 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/cloudblue/chaperone/admin/metrics" +) + +func makeSnapshot(t time.Time, totalReq, errReq, active, panics float64) metrics.Snapshot { + return metrics.Snapshot{ + Time: t, + Vendors: map[string]*metrics.VendorSnapshot{ + "acme": { + RequestsTotal: totalReq, + RequestsErrors: errReq, + Duration: metrics.Histogram{ + Count: totalReq, + Buckets: []metrics.Bucket{ + {UpperBound: 0.1, Count: totalReq * 0.5}, + {UpperBound: 0.5, Count: totalReq * 0.9}, + {UpperBound: 1.0, Count: totalReq}, + }, + }, + }, + }, + ActiveConnections: active, + PanicsTotal: panics, + } +} + +func TestMetricsHandler_Fleet_ReturnsAggregated(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + c.Record(inst.ID, makeSnapshot(t0, 1000, 50, 10, 1)) + c.Record(inst.ID, makeSnapshot(t0.Add(10*time.Second), 1100, 55, 12, 2)) + + h := NewMetricsHandler(st, c) + mux := http.NewServeMux() + h.Register(mux) + + req := httptest.NewRequest(http.MethodGet, "/api/metrics/fleet", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var fm metrics.FleetMetrics + if err := json.NewDecoder(rec.Body).Decode(&fm); err != nil { + t.Fatalf("decode error: %v", err) + } + if fm.TotalRPS <= 0 { + t.Errorf("TotalRPS = %v, want > 0", fm.TotalRPS) + } + if len(fm.Instances) != 1 { + t.Errorf("Instances = %d, want 1", len(fm.Instances)) + } +} + +func TestMetricsHandler_Instance_ReturnsMetrics(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + c.Record(inst.ID, makeSnapshot(t0, 1000, 50, 10, 1)) + c.Record(inst.ID, makeSnapshot(t0.Add(10*time.Second), 1100, 55, 12, 2)) + + h := NewMetricsHandler(st, c) + mux := http.NewServeMux() + h.Register(mux) + + req := httptest.NewRequest(http.MethodGet, "/api/metrics/1", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var im metrics.InstanceMetrics + if err := json.NewDecoder(rec.Body).Decode(&im); err != nil { + t.Fatalf("decode error: %v", err) + } + if im.RPS <= 0 { + t.Errorf("RPS = %v, want > 0", im.RPS) + } + if im.DataPoints != 2 { + t.Errorf("DataPoints = %d, want 2", im.DataPoints) + } +} + +func TestMetricsHandler_Instance_NoData_Returns404(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + + h := NewMetricsHandler(st, c) + mux := http.NewServeMux() + h.Register(mux) + + req := httptest.NewRequest(http.MethodGet, "/api/metrics/99", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNotFound { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound) + } +} + +func TestMetricsHandler_Instance_InvalidID_Returns400(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + + h := NewMetricsHandler(st, c) + mux := http.NewServeMux() + h.Register(mux) + + req := httptest.NewRequest(http.MethodGet, "/api/metrics/abc", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestMetricsHandler_Fleet_EmptyFleet_ReturnsEmptyInstances(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + + h := NewMetricsHandler(st, c) + mux := http.NewServeMux() + h.Register(mux) + + req := httptest.NewRequest(http.MethodGet, "/api/metrics/fleet", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var fm metrics.FleetMetrics + if err := json.NewDecoder(rec.Body).Decode(&fm); err != nil { + t.Fatalf("decode error: %v", err) + } + if len(fm.Instances) != 0 { + t.Errorf("Instances = %d, want 0", len(fm.Instances)) + } +} diff --git a/admin/api/respond.go b/admin/api/respond.go new file mode 100644 index 0000000..6cb4e38 --- /dev/null +++ b/admin/api/respond.go @@ -0,0 +1,35 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package api + +import ( + "encoding/json" + "log/slog" + "net/http" +) + +type errorResponse struct { + Error errorDetail `json:"error"` +} + +type errorDetail struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// respondJSON writes a JSON response with the given status code. +func respondJSON(w http.ResponseWriter, status int, v any) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + if err := json.NewEncoder(w).Encode(v); err != nil { + slog.Error("encoding JSON response", "error", err) + } +} + +// respondError writes a structured error response matching the DR error format. +func respondError(w http.ResponseWriter, status int, code, message string) { + respondJSON(w, status, errorResponse{ + Error: errorDetail{Code: code, Message: message}, + }) +} diff --git a/admin/auth/auth.go b/admin/auth/auth.go new file mode 100644 index 0000000..fde53ba --- /dev/null +++ b/admin/auth/auth.go @@ -0,0 +1,302 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package auth + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "log/slog" + "net/http" + "time" + + "golang.org/x/crypto/bcrypt" + + "github.com/cloudblue/chaperone/admin/store" +) + +// Cookie and header names used by the auth system. +const ( + SessionCookieName = "session" + CSRFCookieName = "csrf_token" + CSRFHeaderName = "X-CSRF-Token" + MinPasswordLength = 12 + MaxPasswordLength = 72 // bcrypt silently truncates beyond this + MaxUsernameLength = 64 +) + +// Sentinel errors for authentication operations. +var ( + ErrUnauthenticated = errors.New("unauthenticated") + ErrInvalidCredentials = errors.New("invalid credentials") + ErrPasswordTooShort = errors.New("password too short") + ErrPasswordTooLong = errors.New("password too long") + ErrInvalidUsername = errors.New("invalid username") + ErrRateLimited = errors.New("rate limited") + ErrSessionExpired = errors.New("session expired") +) + +// dummyHash is a pre-computed bcrypt hash used when a user is not found, +// to prevent timing-based username enumeration. +// +//nolint:errcheck // bcrypt.GenerateFromPassword with DefaultCost never fails +var dummyHash, _ = bcrypt.GenerateFromPassword([]byte("dummy-password-for-timing"), bcrypt.DefaultCost) + +// Authenticator validates a request and returns the authenticated user. +// This interface enables future auth backends (OIDC, etc.) without +// changing middleware or handlers. +type Authenticator interface { + Authenticate(r *http.Request) (*User, error) +} + +// User represents an authenticated portal user. +type User struct { + ID int64 + Username string +} + +// LoginResult holds the outcome of a successful login. +type LoginResult struct { + SessionToken string // #nosec G117 -- this is a session token, not a hardcoded secret + User User +} + +// Service implements local authentication using SQLite-backed users +// with bcrypt password hashing and session cookies. +type Service struct { + store *store.Store + limiter *RateLimiter + maxAge time.Duration + idleTimeout time.Duration +} + +// NewService creates an auth service with the given session parameters. +func NewService(st *store.Store, maxAge, idleTimeout time.Duration) *Service { + return &Service{ + store: st, + limiter: NewRateLimiter(5, time.Minute), + maxAge: maxAge, + idleTimeout: idleTimeout, + } +} + +// SweepRateLimiter removes expired entries from the rate limiter. +func (s *Service) SweepRateLimiter() { + s.limiter.Sweep() +} + +// Authenticate validates the session cookie on an HTTP request. +// It checks absolute TTL, idle timeout, and touches the session. +func (s *Service) Authenticate(r *http.Request) (*User, error) { + cookie, err := r.Cookie(SessionCookieName) + if err != nil { + return nil, ErrUnauthenticated + } + + rawToken := cookie.Value + sess, err := s.store.GetSessionByToken(r.Context(), rawToken) + if errors.Is(err, store.ErrSessionNotFound) { + return nil, ErrUnauthenticated + } + if err != nil { + return nil, fmt.Errorf("validating session: %w", err) + } + + now := time.Now() + if now.After(sess.ExpiresAt) { + if delErr := s.store.DeleteSession(r.Context(), rawToken); delErr != nil { + slog.Error("deleting expired session", "error", delErr) + } + return nil, ErrSessionExpired + } + if now.Sub(sess.LastActiveAt) > s.idleTimeout { + if delErr := s.store.DeleteSession(r.Context(), rawToken); delErr != nil { + slog.Error("deleting idle session", "error", delErr) + } + return nil, ErrSessionExpired + } + + if touchErr := s.store.TouchSession(r.Context(), rawToken); touchErr != nil { + slog.Error("touching session", "error", touchErr) + } + + user, err := s.store.GetUserByID(r.Context(), sess.UserID) + if err != nil { + return nil, fmt.Errorf("getting user for session: %w", err) + } + + return &User{ID: user.ID, Username: user.Username}, nil +} + +// Login authenticates credentials and creates a new session. +// It enforces rate limiting per IP and uses constant-time comparison +// to prevent username enumeration. +func (s *Service) Login(ctx context.Context, ip, username, password string) (*LoginResult, error) { + if !s.limiter.Allow(ip) { + return nil, ErrRateLimited + } + + user, err := s.store.GetUserByUsername(ctx, username) + if errors.Is(err, store.ErrUserNotFound) { + _ = bcrypt.CompareHashAndPassword(dummyHash, []byte(password)) + s.limiter.Record(ip) + return nil, ErrInvalidCredentials + } + if err != nil { + return nil, fmt.Errorf("looking up user: %w", err) + } + + err = bcrypt.CompareHashAndPassword([]byte(user.PasswordHash), []byte(password)) + if err != nil { + s.limiter.Record(ip) + return nil, ErrInvalidCredentials + } + + s.limiter.Reset(ip) + + token, err := GenerateToken(32) + if err != nil { + return nil, err + } + + expiresAt := time.Now().Add(s.maxAge) + if err := s.store.CreateSession(ctx, user.ID, token, expiresAt); err != nil { + return nil, fmt.Errorf("creating session: %w", err) + } + + return &LoginResult{ + SessionToken: token, + User: User{ID: user.ID, Username: user.Username}, + }, nil +} + +// Logout invalidates a session by its token. +func (s *Service) Logout(ctx context.Context, token string) error { + return s.store.DeleteSession(ctx, token) +} + +// ChangePassword verifies the current password, updates to a new one, +// and invalidates all sessions except the caller's. +func (s *Service) ChangePassword(ctx context.Context, userID int64, currentToken, currentPassword, newPassword string) error { + if err := validatePassword(newPassword); err != nil { + return err + } + + user, err := s.store.GetUserByID(ctx, userID) + if err != nil { + return fmt.Errorf("getting user: %w", err) + } + + err = bcrypt.CompareHashAndPassword([]byte(user.PasswordHash), []byte(currentPassword)) + if err != nil { + return ErrInvalidCredentials + } + + hash, err := bcrypt.GenerateFromPassword([]byte(newPassword), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("hashing password: %w", err) + } + + if err := s.store.UpdateUserPassword(ctx, userID, string(hash)); err != nil { + return err + } + + if err := s.store.DeleteOtherSessions(ctx, userID, currentToken); err != nil { + return fmt.Errorf("invalidating other sessions: %w", err) + } + + return nil +} + +// CreateUser creates a new portal user (CLI operation). +func (s *Service) CreateUser(ctx context.Context, username, password string) error { + if err := validateUsername(username); err != nil { + return err + } + if err := validatePassword(password); err != nil { + return err + } + + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("hashing password: %w", err) + } + + _, err = s.store.CreateUser(ctx, username, string(hash)) + return err +} + +// ResetPassword changes a user's password and invalidates all their sessions (CLI operation). +func (s *Service) ResetPassword(ctx context.Context, username, password string) error { + if err := validatePassword(password); err != nil { + return err + } + + user, err := s.store.GetUserByUsername(ctx, username) + if err != nil { + return fmt.Errorf("looking up user: %w", err) + } + + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("hashing password: %w", err) + } + + if err := s.store.UpdateUserPassword(ctx, user.ID, string(hash)); err != nil { + return fmt.Errorf("updating password: %w", err) + } + + if err := s.store.DeleteUserSessions(ctx, user.ID); err != nil { + return fmt.Errorf("invalidating sessions: %w", err) + } + + return nil +} + +func validatePassword(password string) error { + if len(password) < MinPasswordLength { + return ErrPasswordTooShort + } + if len(password) > MaxPasswordLength { + return ErrPasswordTooLong + } + return nil +} + +func validateUsername(username string) error { + if username == "" || len(username) > MaxUsernameLength { + return ErrInvalidUsername + } + for _, r := range username { + if r < 0x20 || r > 0x7E { + return ErrInvalidUsername + } + } + return nil +} + +// GenerateToken returns a cryptographically random hex-encoded token. +func GenerateToken(byteLen int) (string, error) { + b := make([]byte, byteLen) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("generating random token: %w", err) + } + return hex.EncodeToString(b), nil +} + +type contextKey struct{} + +// WithUser stores an authenticated user in the request context. +func WithUser(ctx context.Context, u *User) context.Context { + return context.WithValue(ctx, contextKey{}, u) +} + +// ContextUser extracts the authenticated user from a request context. +// Returns nil if no user is present (unauthenticated request). +func ContextUser(ctx context.Context) *User { + u, _ := ctx.Value(contextKey{}).(*User) + return u +} diff --git a/admin/auth/auth_test.go b/admin/auth/auth_test.go new file mode 100644 index 0000000..07ac071 --- /dev/null +++ b/admin/auth/auth_test.go @@ -0,0 +1,500 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package auth + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/cloudblue/chaperone/admin/store" +) + +const testPassword = "securepassword12" + +func newTestService(t *testing.T) *Service { + t.Helper() + dbPath := filepath.Join(t.TempDir(), "test.db") + st, err := store.Open(context.Background(), dbPath) + if err != nil { + t.Fatalf("Open(%q) failed: %v", dbPath, err) + } + t.Cleanup(func() { st.Close() }) + return NewService(st, 24*time.Hour, 2*time.Hour) +} + +func createTestUser(t *testing.T, svc *Service) { + t.Helper() + if err := svc.CreateUser(context.Background(), "admin", testPassword); err != nil { + t.Fatalf("CreateUser() error = %v", err) + } +} + +func loginTestUser(t *testing.T, svc *Service) string { + t.Helper() + result, err := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + if err != nil { + t.Fatalf("Login() error = %v", err) + } + return result.SessionToken +} + +// --- CreateUser --- + +func TestCreateUser_Success(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + err := svc.CreateUser(context.Background(), "admin", testPassword) + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } +} + +func TestCreateUser_TooShort_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + err := svc.CreateUser(context.Background(), "admin", "short") + if !errors.Is(err, ErrPasswordTooShort) { + t.Errorf("error = %v, want %v", err, ErrPasswordTooShort) + } +} + +func TestCreateUser_TooLong_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + longPass := strings.Repeat("a", MaxPasswordLength+1) + err := svc.CreateUser(context.Background(), "admin", longPass) + if !errors.Is(err, ErrPasswordTooLong) { + t.Errorf("error = %v, want %v", err, ErrPasswordTooLong) + } +} + +func TestCreateUser_EmptyUsername_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + err := svc.CreateUser(context.Background(), "", testPassword) + if !errors.Is(err, ErrInvalidUsername) { + t.Errorf("error = %v, want %v", err, ErrInvalidUsername) + } +} + +func TestCreateUser_UsernameTooLong_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + longName := strings.Repeat("a", MaxUsernameLength+1) + err := svc.CreateUser(context.Background(), longName, testPassword) + if !errors.Is(err, ErrInvalidUsername) { + t.Errorf("error = %v, want %v", err, ErrInvalidUsername) + } +} + +func TestCreateUser_ControlCharsInUsername_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + err := svc.CreateUser(context.Background(), "admin\x00", testPassword) + if !errors.Is(err, ErrInvalidUsername) { + t.Errorf("error = %v, want %v", err, ErrInvalidUsername) + } +} + +func TestCreateUser_Duplicate_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + ctx := context.Background() + + if err := svc.CreateUser(ctx, "admin", testPassword); err != nil { + t.Fatalf("first CreateUser() error = %v", err) + } + + err := svc.CreateUser(ctx, "admin", testPassword) + if !errors.Is(err, store.ErrDuplicateUsername) { + t.Errorf("error = %v, want %v", err, store.ErrDuplicateUsername) + } +} + +// --- Login --- + +func TestLogin_Success(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + + result, err := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + if err != nil { + t.Fatalf("Login() error = %v", err) + } + if result.SessionToken == "" { + t.Error("expected non-empty session token") + } + if result.User.Username != "admin" { + t.Errorf("Username = %q, want %q", result.User.Username, "admin") + } +} + +func TestLogin_WrongPassword_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + + _, err := svc.Login(context.Background(), "127.0.0.1", "admin", "wrongpassword1") + if !errors.Is(err, ErrInvalidCredentials) { + t.Errorf("error = %v, want %v", err, ErrInvalidCredentials) + } +} + +func TestLogin_UserNotFound_ReturnsInvalidCredentials(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + _, err := svc.Login(context.Background(), "127.0.0.1", "nobody", testPassword) + if !errors.Is(err, ErrInvalidCredentials) { + t.Errorf("error = %v, want %v", err, ErrInvalidCredentials) + } +} + +func TestLogin_RateLimited_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + ctx := context.Background() + + for range 5 { + svc.Login(ctx, "10.0.0.1", "admin", "badpassword00") + } + + _, err := svc.Login(ctx, "10.0.0.1", "admin", testPassword) + if !errors.Is(err, ErrRateLimited) { + t.Errorf("error = %v, want %v", err, ErrRateLimited) + } +} + +func TestLogin_RateLimit_ResetsOnSuccess(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + ctx := context.Background() + + // 4 failures (under limit of 5). + for range 4 { + svc.Login(ctx, "10.0.0.2", "admin", "badpassword00") + } + + // Successful login resets counter. + if _, err := svc.Login(ctx, "10.0.0.2", "admin", testPassword); err != nil { + t.Fatalf("Login() error = %v", err) + } + + // 4 more failures should be allowed (counter was reset). + for range 4 { + svc.Login(ctx, "10.0.0.2", "admin", "badpassword00") + } + + // 5th failure should still be under limit. + _, err := svc.Login(ctx, "10.0.0.2", "admin", "badpassword00") + if !errors.Is(err, ErrInvalidCredentials) { + t.Errorf("error = %v, want %v (should still be under limit)", err, ErrInvalidCredentials) + } +} + +// --- Authenticate --- + +func TestAuthenticate_ValidSession_ReturnsUser(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + token := loginTestUser(t, svc) + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: token}) + + user, err := svc.Authenticate(req) + if err != nil { + t.Fatalf("Authenticate() error = %v", err) + } + if user.Username != "admin" { + t.Errorf("Username = %q, want %q", user.Username, "admin") + } +} + +func TestAuthenticate_NoCookie_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + + _, err := svc.Authenticate(req) + if !errors.Is(err, ErrUnauthenticated) { + t.Errorf("error = %v, want %v", err, ErrUnauthenticated) + } +} + +func TestAuthenticate_InvalidToken_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: "bad-token"}) + + _, err := svc.Authenticate(req) + if !errors.Is(err, ErrUnauthenticated) { + t.Errorf("error = %v, want %v", err, ErrUnauthenticated) + } +} + +func TestAuthenticate_ExpiredSession_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + // Use very short maxAge so session expires immediately. + svc.maxAge = time.Millisecond + createTestUser(t, svc) + token := loginTestUser(t, svc) + + time.Sleep(5 * time.Millisecond) + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: token}) + + _, err := svc.Authenticate(req) + if !errors.Is(err, ErrSessionExpired) { + t.Errorf("error = %v, want %v", err, ErrSessionExpired) + } +} + +func TestAuthenticate_IdleSession_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + // Use very short idle timeout. + svc.idleTimeout = time.Millisecond + createTestUser(t, svc) + token := loginTestUser(t, svc) + + time.Sleep(5 * time.Millisecond) + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: token}) + + _, err := svc.Authenticate(req) + if !errors.Is(err, ErrSessionExpired) { + t.Errorf("error = %v, want %v", err, ErrSessionExpired) + } +} + +// --- Logout --- + +func TestLogout_DeletesSession(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + token := loginTestUser(t, svc) + + if err := svc.Logout(context.Background(), token); err != nil { + t.Fatalf("Logout() error = %v", err) + } + + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: token}) + + _, err := svc.Authenticate(req) + if !errors.Is(err, ErrUnauthenticated) { + t.Errorf("after logout: error = %v, want %v", err, ErrUnauthenticated) + } +} + +// --- ChangePassword --- + +func TestChangePassword_Success(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + ctx := context.Background() + + result, _ := svc.Login(ctx, "127.0.0.1", "admin", testPassword) + + newPass := "newpassword1234" + if err := svc.ChangePassword(ctx, result.User.ID, result.SessionToken, testPassword, newPass); err != nil { + t.Fatalf("ChangePassword() error = %v", err) + } + + // Old password should fail. + _, err := svc.Login(ctx, "127.0.0.1", "admin", testPassword) + if !errors.Is(err, ErrInvalidCredentials) { + t.Errorf("old password: error = %v, want %v", err, ErrInvalidCredentials) + } + + // New password should work. + if _, err := svc.Login(ctx, "127.0.0.1", "admin", newPass); err != nil { + t.Errorf("new password: unexpected error = %v", err) + } +} + +func TestChangePassword_InvalidatesOtherSessions(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + ctx := context.Background() + + // Login twice to create two sessions. + result1, _ := svc.Login(ctx, "127.0.0.1", "admin", testPassword) + result2, _ := svc.Login(ctx, "127.0.0.2", "admin", testPassword) + + // Change password using session 1. + newPass := "newpassword1234" + if err := svc.ChangePassword(ctx, result1.User.ID, result1.SessionToken, testPassword, newPass); err != nil { + t.Fatalf("ChangePassword() error = %v", err) + } + + // Session 1 (caller) should still work. + req1 := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req1.AddCookie(&http.Cookie{Name: SessionCookieName, Value: result1.SessionToken}) + if _, err := svc.Authenticate(req1); err != nil { + t.Errorf("caller session should remain valid: %v", err) + } + + // Session 2 (other) should be invalidated. + req2 := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req2.AddCookie(&http.Cookie{Name: SessionCookieName, Value: result2.SessionToken}) + _, err := svc.Authenticate(req2) + if !errors.Is(err, ErrUnauthenticated) { + t.Errorf("other session: error = %v, want %v", err, ErrUnauthenticated) + } +} + +func TestChangePassword_WrongCurrent_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + err := svc.ChangePassword(context.Background(), result.User.ID, result.SessionToken, "wrongcurrent1", "newpassword1234") + if !errors.Is(err, ErrInvalidCredentials) { + t.Errorf("error = %v, want %v", err, ErrInvalidCredentials) + } +} + +func TestChangePassword_TooShort_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + err := svc.ChangePassword(context.Background(), result.User.ID, result.SessionToken, testPassword, "short") + if !errors.Is(err, ErrPasswordTooShort) { + t.Errorf("error = %v, want %v", err, ErrPasswordTooShort) + } +} + +func TestChangePassword_TooLong_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + + result, _ := svc.Login(context.Background(), "127.0.0.1", "admin", testPassword) + + longPass := strings.Repeat("a", MaxPasswordLength+1) + err := svc.ChangePassword(context.Background(), result.User.ID, result.SessionToken, testPassword, longPass) + if !errors.Is(err, ErrPasswordTooLong) { + t.Errorf("error = %v, want %v", err, ErrPasswordTooLong) + } +} + +// --- ResetPassword --- + +func TestResetPassword_Success_InvalidatesSessions(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + token := loginTestUser(t, svc) + + newPass := "resetpassword12" + if err := svc.ResetPassword(context.Background(), "admin", newPass); err != nil { + t.Fatalf("ResetPassword() error = %v", err) + } + + // Old session should be invalid. + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: token}) + _, err := svc.Authenticate(req) + if !errors.Is(err, ErrUnauthenticated) { + t.Errorf("old session: error = %v, want %v", err, ErrUnauthenticated) + } + + // New password should work. + if _, err := svc.Login(context.Background(), "127.0.0.1", "admin", newPass); err != nil { + t.Errorf("new password: unexpected error = %v", err) + } +} + +func TestResetPassword_UserNotFound_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + + err := svc.ResetPassword(context.Background(), "nobody", "newpassword1234") + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestResetPassword_TooShort_ReturnsError(t *testing.T) { + t.Parallel() + svc := newTestService(t) + createTestUser(t, svc) + + err := svc.ResetPassword(context.Background(), "admin", "short") + if !errors.Is(err, ErrPasswordTooShort) { + t.Errorf("error = %v, want %v", err, ErrPasswordTooShort) + } +} + +// --- GenerateToken --- + +func TestGenerateToken_ReturnsUniqueTokens(t *testing.T) { + t.Parallel() + + t1, err := GenerateToken(32) + if err != nil { + t.Fatalf("GenerateToken() error = %v", err) + } + t2, err := GenerateToken(32) + if err != nil { + t.Fatalf("GenerateToken() error = %v", err) + } + + if len(t1) != 64 { + t.Errorf("token length = %d, want 64", len(t1)) + } + if t1 == t2 { + t.Error("consecutive tokens should be unique") + } +} + +// --- Context helpers --- + +func TestContextUser_RoundTrip(t *testing.T) { + t.Parallel() + + ctx := context.Background() + if got := ContextUser(ctx); got != nil { + t.Error("expected nil user from empty context") + } + + user := &User{ID: 42, Username: "admin"} + ctx = WithUser(ctx, user) + got := ContextUser(ctx) + if got == nil || got.ID != 42 || got.Username != "admin" { + t.Errorf("ContextUser() = %v, want %v", got, user) + } +} diff --git a/admin/auth/middleware.go b/admin/auth/middleware.go new file mode 100644 index 0000000..4759215 --- /dev/null +++ b/admin/auth/middleware.go @@ -0,0 +1,103 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package auth + +import ( + "crypto/subtle" + "encoding/json" + "log/slog" + "net/http" + "strings" +) + +// RequireAuth wraps an http.Handler and enforces session authentication +// on all /api/* routes except POST /api/login and GET /api/health. +func RequireAuth(auth Authenticator, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(r) { + next.ServeHTTP(w, r) + return + } + + user, err := auth.Authenticate(r) + if err != nil { + slog.Debug("authentication failed", "path", r.URL.Path, "error", err) + writeError(w, http.StatusUnauthorized, "UNAUTHORIZED", "Authentication required") + return + } + + next.ServeHTTP(w, r.WithContext(WithUser(r.Context(), user))) + }) +} + +// CSRFProtection validates the double-submit cookie pattern on all +// write requests to /api/* (except POST /api/login which has no session yet). +func CSRFProtection(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !requiresCSRF(r) { + next.ServeHTTP(w, r) + return + } + + cookie, err := r.Cookie(CSRFCookieName) + if err != nil { + writeError(w, http.StatusForbidden, "CSRF_ERROR", "Missing CSRF token") + return + } + + header := r.Header.Get(CSRFHeaderName) + if header == "" || subtle.ConstantTimeCompare([]byte(header), []byte(cookie.Value)) != 1 { + writeError(w, http.StatusForbidden, "CSRF_ERROR", "Invalid CSRF token") + return + } + + next.ServeHTTP(w, r) + }) +} + +func requiresAuth(r *http.Request) bool { + if !strings.HasPrefix(r.URL.Path, "/api/") { + return false + } + if r.Method == http.MethodPost && r.URL.Path == "/api/login" { + return false + } + if r.Method == http.MethodGet && r.URL.Path == "/api/health" { + return false + } + return true +} + +func requiresCSRF(r *http.Request) bool { + switch r.Method { + case http.MethodGet, http.MethodHead, http.MethodOptions: + return false + } + if !strings.HasPrefix(r.URL.Path, "/api/") { + return false + } + if r.URL.Path == "/api/login" { + return false + } + return true +} + +type middlewareError struct { + Error middlewareErrorDetail `json:"error"` +} + +type middlewareErrorDetail struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func writeError(w http.ResponseWriter, status int, code, message string) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + if err := json.NewEncoder(w).Encode(middlewareError{ + Error: middlewareErrorDetail{Code: code, Message: message}, + }); err != nil { + slog.Error("writing middleware error response", "error", err) + } +} diff --git a/admin/auth/middleware_test.go b/admin/auth/middleware_test.go new file mode 100644 index 0000000..ffabab9 --- /dev/null +++ b/admin/auth/middleware_test.go @@ -0,0 +1,235 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package auth + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" +) + +type mockAuthenticator struct { + user *User + err error +} + +func (m *mockAuthenticator) Authenticate(_ *http.Request) (*User, error) { + return m.user, m.err +} + +func echoUserHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + user := ContextUser(r.Context()) + if user != nil { + w.Header().Set("X-User", user.Username) + } + w.WriteHeader(http.StatusOK) + }) +} + +// --- RequireAuth --- + +func TestRequireAuth_ProtectedRoute_Unauthenticated_Returns401(t *testing.T) { + t.Parallel() + + handler := RequireAuth(&mockAuthenticator{err: ErrUnauthenticated}, echoUserHandler()) + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) + } + + var resp middlewareError + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("decode error: %v", err) + } + if resp.Error.Code != "UNAUTHORIZED" { + t.Errorf("code = %q, want %q", resp.Error.Code, "UNAUTHORIZED") + } +} + +func TestRequireAuth_ProtectedRoute_Authenticated_PassesThrough(t *testing.T) { + t.Parallel() + + user := &User{ID: 1, Username: "admin"} + handler := RequireAuth(&mockAuthenticator{user: user}, echoUserHandler()) + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + if got := rec.Header().Get("X-User"); got != "admin" { + t.Errorf("X-User = %q, want %q", got, "admin") + } +} + +func TestRequireAuth_LoginRoute_SkipsAuth(t *testing.T) { + t.Parallel() + + handler := RequireAuth(&mockAuthenticator{err: ErrUnauthenticated}, echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/api/login", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d (login should skip auth)", rec.Code, http.StatusOK) + } +} + +func TestRequireAuth_HealthRoute_SkipsAuth(t *testing.T) { + t.Parallel() + + handler := RequireAuth(&mockAuthenticator{err: ErrUnauthenticated}, echoUserHandler()) + req := httptest.NewRequest(http.MethodGet, "/api/health", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d (health should skip auth)", rec.Code, http.StatusOK) + } +} + +func TestRequireAuth_SPARoute_SkipsAuth(t *testing.T) { + t.Parallel() + + handler := RequireAuth(&mockAuthenticator{err: ErrUnauthenticated}, echoUserHandler()) + req := httptest.NewRequest(http.MethodGet, "/dashboard", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d (non-API should skip auth)", rec.Code, http.StatusOK) + } +} + +// --- CSRFProtection --- + +func TestCSRF_SafeMethod_SkipsCheck(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodGet, "/api/instances", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d (GET should skip CSRF)", rec.Code, http.StatusOK) + } +} + +func TestCSRF_LoginRoute_SkipsCheck(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/api/login", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d (login should skip CSRF)", rec.Code, http.StatusOK) + } +} + +func TestCSRF_WriteRequest_MissingCookie_Returns403(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/api/instances", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusForbidden { + t.Errorf("status = %d, want %d", rec.Code, http.StatusForbidden) + } +} + +func TestCSRF_WriteRequest_MissingHeader_Returns403(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: CSRFCookieName, Value: "token123"}) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusForbidden { + t.Errorf("status = %d, want %d", rec.Code, http.StatusForbidden) + } +} + +func TestCSRF_WriteRequest_MismatchedToken_Returns403(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: CSRFCookieName, Value: "token123"}) + req.Header.Set(CSRFHeaderName, "different-token") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusForbidden { + t.Errorf("status = %d, want %d", rec.Code, http.StatusForbidden) + } +} + +func TestCSRF_WriteRequest_ValidToken_PassesThrough(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/api/instances", nil) + req.AddCookie(&http.Cookie{Name: CSRFCookieName, Value: "token123"}) + req.Header.Set(CSRFHeaderName, "token123") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } +} + +func TestCSRF_DeleteRequest_RequiresToken(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodDelete, "/api/instances/1", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusForbidden { + t.Errorf("status = %d, want %d (DELETE should require CSRF)", rec.Code, http.StatusForbidden) + } +} + +func TestCSRF_PutRequest_ValidToken_PassesThrough(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPut, "/api/user/password", nil) + req.AddCookie(&http.Cookie{Name: CSRFCookieName, Value: "csrf-val"}) + req.Header.Set(CSRFHeaderName, "csrf-val") + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } +} + +func TestCSRF_NonAPIRoute_SkipsCheck(t *testing.T) { + t.Parallel() + + handler := CSRFProtection(echoUserHandler()) + req := httptest.NewRequest(http.MethodPost, "/some/form", nil) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d (non-API should skip CSRF)", rec.Code, http.StatusOK) + } +} diff --git a/admin/auth/ratelimit.go b/admin/auth/ratelimit.go new file mode 100644 index 0000000..3da339f --- /dev/null +++ b/admin/auth/ratelimit.go @@ -0,0 +1,95 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package auth + +import ( + "sync" + "time" +) + +// RateLimiter tracks failed login attempts per IP using a sliding window. +type RateLimiter struct { + mu sync.Mutex + attempts map[string][]time.Time + maxAttempts int + window time.Duration + now func() time.Time // injectable clock for testing +} + +// NewRateLimiter creates a rate limiter that allows maxAttempts failures +// within the given window duration per IP. +func NewRateLimiter(maxAttempts int, window time.Duration) *RateLimiter { + return &RateLimiter{ + attempts: make(map[string][]time.Time), + maxAttempts: maxAttempts, + window: window, + now: time.Now, + } +} + +// Allow returns true if the IP has not exceeded the failure limit. +func (rl *RateLimiter) Allow(ip string) bool { + rl.mu.Lock() + defer rl.mu.Unlock() + + rl.prune(ip) + return len(rl.attempts[ip]) < rl.maxAttempts +} + +// Record logs a failed login attempt for the given IP. +func (rl *RateLimiter) Record(ip string) { + rl.mu.Lock() + defer rl.mu.Unlock() + + rl.attempts[ip] = append(rl.attempts[ip], rl.now()) +} + +// Reset clears the failure counter for an IP (called on successful login). +func (rl *RateLimiter) Reset(ip string) { + rl.mu.Lock() + defer rl.mu.Unlock() + + delete(rl.attempts, ip) +} + +// prune removes attempts older than the sliding window. Must be called under lock. +func (rl *RateLimiter) prune(ip string) { + attempts := rl.attempts[ip] + if len(attempts) == 0 { + return + } + + cutoff := rl.now().Add(-rl.window) + i := 0 + for i < len(attempts) && attempts[i].Before(cutoff) { + i++ + } + if i > 0 { + rl.attempts[ip] = attempts[i:] + } + if len(rl.attempts[ip]) == 0 { + delete(rl.attempts, ip) + } +} + +// Sweep removes all expired entries across all IPs. +// Call periodically from a background goroutine to prevent unbounded growth +// from IPs that record failures but never return. +func (rl *RateLimiter) Sweep() { + rl.mu.Lock() + defer rl.mu.Unlock() + + cutoff := rl.now().Add(-rl.window) + for ip, attempts := range rl.attempts { + i := 0 + for i < len(attempts) && attempts[i].Before(cutoff) { + i++ + } + if i == len(attempts) { + delete(rl.attempts, ip) + } else if i > 0 { + rl.attempts[ip] = attempts[i:] + } + } +} diff --git a/admin/auth/ratelimit_test.go b/admin/auth/ratelimit_test.go new file mode 100644 index 0000000..b552063 --- /dev/null +++ b/admin/auth/ratelimit_test.go @@ -0,0 +1,159 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package auth + +import ( + "testing" + "time" +) + +func TestRateLimiter_AllowsUnderLimit(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(3, time.Minute) + + for i := range 3 { + if !rl.Allow("1.2.3.4") { + t.Fatalf("attempt %d should be allowed", i+1) + } + rl.Record("1.2.3.4") + } +} + +func TestRateLimiter_BlocksAtLimit(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(3, time.Minute) + + for range 3 { + rl.Record("1.2.3.4") + } + + if rl.Allow("1.2.3.4") { + t.Error("should be blocked after 3 failures") + } +} + +func TestRateLimiter_DifferentIPs_Independent(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(2, time.Minute) + + rl.Record("1.1.1.1") + rl.Record("1.1.1.1") + + if rl.Allow("1.1.1.1") { + t.Error("1.1.1.1 should be blocked") + } + if !rl.Allow("2.2.2.2") { + t.Error("2.2.2.2 should be allowed (separate IP)") + } +} + +func TestRateLimiter_ResetClearsCounter(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(2, time.Minute) + + rl.Record("1.2.3.4") + rl.Record("1.2.3.4") + rl.Reset("1.2.3.4") + + if !rl.Allow("1.2.3.4") { + t.Error("should be allowed after reset") + } +} + +func TestRateLimiter_SlidingWindow_PrunesOldAttempts(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(2, time.Minute) + + now := time.Now() + rl.now = func() time.Time { return now } + + rl.Record("1.2.3.4") + rl.Record("1.2.3.4") + + // Advance past the window. + rl.now = func() time.Time { return now.Add(61 * time.Second) } + + if !rl.Allow("1.2.3.4") { + t.Error("old attempts should be pruned; IP should be allowed") + } +} + +func TestRateLimiter_PartialPrune_KeepsRecentAttempts(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(2, time.Minute) + + now := time.Now() + rl.now = func() time.Time { return now } + rl.Record("1.2.3.4") // t=0 + + rl.now = func() time.Time { return now.Add(50 * time.Second) } + rl.Record("1.2.3.4") // t=50s + + // At t=61s, the first attempt is pruned but the second is still within window. + rl.now = func() time.Time { return now.Add(61 * time.Second) } + + if !rl.Allow("1.2.3.4") { + t.Error("should be allowed (only 1 recent attempt after prune)") + } + + rl.Record("1.2.3.4") // second recent attempt + + if rl.Allow("1.2.3.4") { + t.Error("should be blocked (2 recent attempts)") + } +} + +func TestRateLimiter_Sweep_RemovesExpiredEntries(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(2, time.Minute) + + now := time.Now() + rl.now = func() time.Time { return now } + + rl.Record("1.1.1.1") + rl.Record("2.2.2.2") + rl.Record("3.3.3.3") + + // Advance past the window. + rl.now = func() time.Time { return now.Add(61 * time.Second) } + + rl.Sweep() + + rl.mu.Lock() + remaining := len(rl.attempts) + rl.mu.Unlock() + + if remaining != 0 { + t.Errorf("expected 0 entries after sweep, got %d", remaining) + } +} + +func TestRateLimiter_Sweep_KeepsRecentEntries(t *testing.T) { + t.Parallel() + rl := NewRateLimiter(2, time.Minute) + + now := time.Now() + rl.now = func() time.Time { return now } + rl.Record("1.1.1.1") // old + + rl.now = func() time.Time { return now.Add(50 * time.Second) } + rl.Record("2.2.2.2") // recent + + // At t=61s, 1.1.1.1 is expired but 2.2.2.2 is still within window. + rl.now = func() time.Time { return now.Add(61 * time.Second) } + + rl.Sweep() + + rl.mu.Lock() + remaining := len(rl.attempts) + rl.mu.Unlock() + + if remaining != 1 { + t.Errorf("expected 1 entry after sweep, got %d", remaining) + } + + if !rl.Allow("1.1.1.1") { + t.Error("1.1.1.1 should be allowed after sweep removed its expired entry") + } +} diff --git a/admin/cmd/chaperone-admin/main.go b/admin/cmd/chaperone-admin/main.go new file mode 100644 index 0000000..40d2691 --- /dev/null +++ b/admin/cmd/chaperone-admin/main.go @@ -0,0 +1,339 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log/slog" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "golang.org/x/term" + + "github.com/cloudblue/chaperone/admin" + "github.com/cloudblue/chaperone/admin/auth" + "github.com/cloudblue/chaperone/admin/config" + "github.com/cloudblue/chaperone/admin/metrics" + "github.com/cloudblue/chaperone/admin/poller" + "github.com/cloudblue/chaperone/admin/store" +) + +var ( + Version = "dev" + GitCommit = "unknown" + BuildDate = "unknown" +) + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func run() error { + if len(os.Args) > 1 && !strings.HasPrefix(os.Args[1], "-") { + switch os.Args[1] { + case "create-user": + return runCreateUser(os.Args[2:]) + case "reset-password": + return runResetPassword(os.Args[2:]) + case "serve": + return runServer(os.Args[2:]) + default: + return fmt.Errorf("unknown command %q (available: serve, create-user, reset-password)", os.Args[1]) + } + } + return runServer(os.Args[1:]) +} + +func runServer(args []string) error { + fs := flag.NewFlagSet("serve", flag.ExitOnError) + fs.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage: chaperone-admin [command] [flags]\n\n") + fmt.Fprintf(os.Stderr, "Commands:\n") + fmt.Fprintf(os.Stderr, " serve Start the admin portal server (default)\n") + fmt.Fprintf(os.Stderr, " create-user Create a new admin user\n") + fmt.Fprintf(os.Stderr, " reset-password Reset a user's password\n") + fmt.Fprintf(os.Stderr, "\nServer flags:\n") + fs.PrintDefaults() + } + + configPath := fs.String("config", "", "Path to config file (default: chaperone-admin.yaml)") + showVersion := fs.Bool("version", false, "Print version and exit") + if err := fs.Parse(args); err != nil { + return err + } + + if *showVersion { + fmt.Printf("chaperone-admin %s (commit: %s, built: %s)\n", Version, GitCommit, BuildDate) + return nil + } + + cfg, err := config.Load(*configPath) + if err != nil { + return fmt.Errorf("loading configuration: %w", err) + } + + configureLogging(cfg) + + slog.Info("starting chaperone-admin", "version", Version, "commit", GitCommit, "built", BuildDate) + + st, err := store.Open(context.Background(), cfg.Database.Path) + if err != nil { + return fmt.Errorf("opening database: %w", err) + } + defer st.Close() + + collector := metrics.NewCollector(metrics.DefaultCapacity) + + srv, err := admin.NewServer(cfg, st, collector) + if err != nil { + return fmt.Errorf("creating server: %w", err) + } + + bgCtx, bgCancel := context.WithCancel(context.Background()) + defer bgCancel() + startBackground(bgCtx, cfg, st, collector, srv) + + return serve(cfg.Server.Addr, srv) +} + +func startBackground(ctx context.Context, cfg *config.Config, st *store.Store, collector *metrics.Collector, srv *admin.Server) { + p := poller.New(st, collector, cfg.Scraper.Interval.Unwrap(), cfg.Scraper.Timeout.Unwrap()) + go p.Run(ctx) + go cleanupExpiredSessions(ctx, st) + go sweepRateLimiter(ctx, srv) + + if cfg.Audit.RetentionDays == nil || *cfg.Audit.RetentionDays > 0 { + retentionDays := 90 + if cfg.Audit.RetentionDays != nil { + retentionDays = *cfg.Audit.RetentionDays + } + go cleanupOldAuditEntries(ctx, st, retentionDays) + } +} + +func runCreateUser(args []string) error { + fs := flag.NewFlagSet("create-user", flag.ExitOnError) + configPath := fs.String("config", "", "Path to config file") + username := fs.String("username", "", "Username for the new user") + if err := fs.Parse(args); err != nil { + return err + } + + if *username == "" { + return fmt.Errorf("--username is required") + } + + password, err := readPasswordConfirm("Password: ", "Confirm password: ") + if err != nil { + return err + } + + svc, cleanup, err := openAuthService(*configPath) + if err != nil { + return err + } + defer cleanup() + + if err := svc.CreateUser(context.Background(), *username, password); err != nil { + return fmt.Errorf("creating user: %w", err) + } + + fmt.Fprintf(os.Stderr, "User %q created successfully.\n", *username) + return nil +} + +func runResetPassword(args []string) error { + fs := flag.NewFlagSet("reset-password", flag.ExitOnError) + configPath := fs.String("config", "", "Path to config file") + username := fs.String("username", "", "Username to reset") + if err := fs.Parse(args); err != nil { + return err + } + + if *username == "" { + return fmt.Errorf("--username is required") + } + + password, err := readPasswordConfirm("New password: ", "Confirm password: ") + if err != nil { + return err + } + + svc, cleanup, err := openAuthService(*configPath) + if err != nil { + return err + } + defer cleanup() + + if err := svc.ResetPassword(context.Background(), *username, password); err != nil { + return fmt.Errorf("resetting password: %w", err) + } + + fmt.Fprintf(os.Stderr, "Password for %q has been reset. All existing sessions invalidated.\n", *username) + return nil +} + +func openAuthService(configPath string) (*auth.Service, func(), error) { + cfg, err := config.Load(configPath) + if err != nil { + return nil, nil, fmt.Errorf("loading configuration: %w", err) + } + + st, err := store.Open(context.Background(), cfg.Database.Path) + if err != nil { + return nil, nil, fmt.Errorf("opening database: %w", err) + } + + svc := auth.NewService(st, cfg.Session.MaxAge.Unwrap(), cfg.Session.IdleTimeout.Unwrap()) + cleanup := func() { + if err := st.Close(); err != nil { + slog.Error("closing database", "error", err) + } + } + return svc, cleanup, nil +} + +func readPasswordConfirm(prompt, confirmPrompt string) (string, error) { + password, err := readPassword(prompt) + if err != nil { + return "", err + } + confirm, err := readPassword(confirmPrompt) + if err != nil { + return "", err + } + if password != confirm { + return "", fmt.Errorf("passwords do not match") + } + return password, nil +} + +func readPassword(prompt string) (string, error) { + fmt.Fprint(os.Stderr, prompt) + password, err := term.ReadPassword(int(os.Stdin.Fd())) // #nosec G115 -- stdin fd is always 0 + fmt.Fprintln(os.Stderr) // newline after hidden input + if err != nil { + return "", fmt.Errorf("reading password: %w", err) + } + return string(password), nil +} + +func cleanupExpiredSessions(ctx context.Context, st *store.Store) { + ticker := time.NewTicker(time.Hour) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + n, err := st.DeleteExpiredSessions(ctx) + if err != nil { + slog.Error("cleaning up expired sessions", "error", err) + } else if n > 0 { + slog.Info("cleaned up expired sessions", "count", n) + } + } + } +} + +func sweepRateLimiter(ctx context.Context, srv *admin.Server) { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + srv.SweepRateLimiter() + } + } +} + +func cleanupOldAuditEntries(ctx context.Context, st *store.Store, retentionDays int) { + ticker := time.NewTicker(24 * time.Hour) + defer ticker.Stop() + + runCleanup := func() { + cutoff := time.Now().AddDate(0, 0, -retentionDays) + n, err := st.DeleteAuditEntriesBefore(ctx, cutoff) + if err != nil { + slog.Error("cleaning up old audit entries", "error", err) + } else if n > 0 { + slog.Info("cleaned up old audit entries", "count", n, "retention_days", retentionDays) + } + } + + runCleanup() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + runCleanup() + } + } +} + +func serve(addr string, srv *admin.Server) error { + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + errCh := make(chan error, 1) + go func() { + slog.Info("listening", "addr", addr) + errCh <- srv.ListenAndServe() + }() + + select { + case err := <-errCh: + if errors.Is(err, http.ErrServerClosed) { + return nil + } + return fmt.Errorf("HTTP server error: %w", err) + case <-ctx.Done(): + slog.Info("shutting down") + shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := srv.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("shutting down server: %w", err) + } + return nil + } +} + +func configureLogging(cfg *config.Config) { + var level slog.Level + switch cfg.Log.Level { + case "debug": + level = slog.LevelDebug + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + default: + level = slog.LevelInfo + } + + opts := &slog.HandlerOptions{Level: level} + var handler slog.Handler + if cfg.Log.Format == "text" { + handler = slog.NewTextHandler(os.Stdout, opts) + } else { + handler = slog.NewJSONHandler(os.Stdout, opts) + } + slog.SetDefault(slog.New(handler)) +} diff --git a/admin/cmd/seed-user/main.go b/admin/cmd/seed-user/main.go new file mode 100644 index 0000000..755ac87 --- /dev/null +++ b/admin/cmd/seed-user/main.go @@ -0,0 +1,52 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +// seed-user is a test-only tool that creates a user in the admin portal +// database without interactive terminal input. Used by E2E tests. +package main + +import ( + "context" + "flag" + "fmt" + "os" + "time" + + "github.com/cloudblue/chaperone/admin/auth" + "github.com/cloudblue/chaperone/admin/store" +) + +func main() { + if err := run(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func run() error { + dbPath := flag.String("db", "", "Path to SQLite database") + username := flag.String("username", "", "Username to create") + password := flag.String("password", "", "Password for the user") + flag.Parse() + + if *dbPath == "" || *username == "" || *password == "" { + return fmt.Errorf("usage: seed-user --db --username --password ") + } + + ctx := context.Background() + + st, err := store.Open(ctx, *dbPath) + if err != nil { + return fmt.Errorf("opening database: %w", err) + } + defer st.Close() + + svc := auth.NewService(st, 24*time.Hour, 2*time.Hour) + + if err := svc.CreateUser(ctx, *username, *password); err != nil { + return fmt.Errorf("creating user: %w", err) + } + + fmt.Printf("User %q created successfully\n", *username) + return nil +} diff --git a/admin/config/config.go b/admin/config/config.go new file mode 100644 index 0000000..82ef646 --- /dev/null +++ b/admin/config/config.go @@ -0,0 +1,93 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "fmt" + "time" + + "gopkg.in/yaml.v3" +) + +// EnvPrefix is the environment variable prefix for admin portal configuration. +const EnvPrefix = "CHAPERONE_ADMIN" + +// Default configuration values. +const ( + DefaultAddr = "127.0.0.1:8080" + DefaultDBPath = "./chaperone-admin.db" + DefaultLogLevel = "info" + DefaultLogFormat = "json" + LogFormatText = "text" +) + +// Config holds the admin portal configuration. +type Config struct { + Server ServerConfig `yaml:"server"` + Database DatabaseConfig `yaml:"database"` + Scraper ScraperConfig `yaml:"scraper"` + Session SessionConfig `yaml:"session"` + Audit AuditConfig `yaml:"audit"` + Log LogConfig `yaml:"log"` +} + +// ServerConfig configures the HTTP server. +type ServerConfig struct { + Addr string `yaml:"addr"` + SecureCookies bool `yaml:"secure_cookies"` +} + +// DatabaseConfig configures the SQLite database. +type DatabaseConfig struct { + Path string `yaml:"path"` +} + +// ScraperConfig configures the proxy metrics scraper. +type ScraperConfig struct { + Interval Duration `yaml:"interval"` + Timeout Duration `yaml:"timeout"` +} + +// SessionConfig configures session management. +type SessionConfig struct { + MaxAge Duration `yaml:"max_age"` + IdleTimeout Duration `yaml:"idle_timeout"` +} + +// AuditConfig configures the audit log. +type AuditConfig struct { + RetentionDays *int `yaml:"retention_days"` +} + +// LogConfig configures structured logging. +type LogConfig struct { + Level string `yaml:"level"` + Format string `yaml:"format"` +} + +// Duration is a time.Duration that unmarshals from YAML duration strings +// like "10s", "5m", "24h". +type Duration time.Duration + +// Unwrap returns the underlying time.Duration. +func (d Duration) Unwrap() time.Duration { + return time.Duration(d) +} + +func (d Duration) String() string { + return time.Duration(d).String() +} + +func (d *Duration) UnmarshalYAML(node *yaml.Node) error { + dur, err := time.ParseDuration(node.Value) + if err != nil { + return fmt.Errorf("invalid duration %q: %w", node.Value, err) + } + *d = Duration(dur) + return nil +} + +func (d Duration) MarshalYAML() (interface{}, error) { + return time.Duration(d).String(), nil +} diff --git a/admin/config/config_test.go b/admin/config/config_test.go new file mode 100644 index 0000000..065a143 --- /dev/null +++ b/admin/config/config_test.go @@ -0,0 +1,126 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "testing" + "time" + + "gopkg.in/yaml.v3" +) + +func TestDuration_UnmarshalYAML_ValidDuration_ParsesCorrectly(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + want time.Duration + }{ + {"seconds", `"10s"`, 10 * time.Second}, + {"minutes", `"5m"`, 5 * time.Minute}, + {"hours", `"24h"`, 24 * time.Hour}, + {"milliseconds", `"500ms"`, 500 * time.Millisecond}, + {"compound", `"1h30m"`, 90 * time.Minute}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Arrange + var d Duration + + // Act + err := yaml.Unmarshal([]byte(tt.input), &d) + + // Assert + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if d.Unwrap() != tt.want { + t.Errorf("duration = %v, want %v", d.Unwrap(), tt.want) + } + }) + } +} + +func TestDuration_UnmarshalYAML_InvalidDuration_ReturnsError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + }{ + {"no unit", `"10"`}, + {"invalid unit", `"10x"`}, + {"empty", `""`}, + {"text", `"forever"`}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Arrange + var d Duration + + // Act + err := yaml.Unmarshal([]byte(tt.input), &d) + + // Assert + if err == nil { + t.Error("expected error, got nil") + } + }) + } +} + +func TestDuration_Unwrap_ReturnsDuration(t *testing.T) { + t.Parallel() + + // Arrange + d := Duration(42 * time.Second) + + // Act + got := d.Unwrap() + + // Assert + if got != 42*time.Second { + t.Errorf("Unwrap() = %v, want %v", got, 42*time.Second) + } +} + +func TestDuration_String_FormatsCorrectly(t *testing.T) { + t.Parallel() + + // Arrange + d := Duration(90 * time.Second) + + // Act + got := d.String() + + // Assert + if got != "1m30s" { + t.Errorf("String() = %q, want %q", got, "1m30s") + } +} + +func TestDuration_MarshalYAML_FormatsCorrectly(t *testing.T) { + t.Parallel() + + // Arrange + d := Duration(10 * time.Second) + + // Act + got, err := d.MarshalYAML() + + // Assert + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != "10s" { + t.Errorf("MarshalYAML() = %q, want %q", got, "10s") + } +} diff --git a/admin/config/loader.go b/admin/config/loader.go new file mode 100644 index 0000000..341c132 --- /dev/null +++ b/admin/config/loader.go @@ -0,0 +1,148 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +// Load reads configuration from the given path, applies defaults and +// environment variable overrides, and validates the result. +func Load(path string) (*Config, error) { + path = resolveConfigPath(path) + + cfg := &Config{} + if err := loadYAML(path, cfg); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("reading config %s: %w", path, err) + } + // Config file not found — proceed with defaults + env overrides. + } + + applyDefaults(cfg) + + if err := applyEnvOverrides(cfg); err != nil { + return nil, fmt.Errorf("applying env overrides: %w", err) + } + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("validating config: %w", err) + } + + return cfg, nil +} + +func resolveConfigPath(path string) string { + if path != "" { + return path + } + if v := os.Getenv(EnvPrefix + "_CONFIG"); v != "" { + return v + } + return "chaperone-admin.yaml" +} + +func loadYAML(path string, cfg *Config) error { + // #nosec G304 -- path comes from trusted sources: CLI flag, env var, or hardcoded default. + data, err := os.ReadFile(path) + if err != nil { + return err + } + if err := yaml.Unmarshal(data, cfg); err != nil { + return fmt.Errorf("parsing YAML: %w", err) + } + return nil +} + +func applyDefaults(cfg *Config) { + if cfg.Server.Addr == "" { + cfg.Server.Addr = DefaultAddr + } + if cfg.Database.Path == "" { + cfg.Database.Path = DefaultDBPath + } + if cfg.Scraper.Interval == 0 { + cfg.Scraper.Interval = Duration(10 * time.Second) + } + if cfg.Scraper.Timeout == 0 { + cfg.Scraper.Timeout = Duration(5 * time.Second) + } + if cfg.Session.MaxAge == 0 { + cfg.Session.MaxAge = Duration(24 * time.Hour) + } + if cfg.Session.IdleTimeout == 0 { + cfg.Session.IdleTimeout = Duration(2 * time.Hour) + } + if cfg.Audit.RetentionDays == nil { + cfg.Audit.RetentionDays = intPtr(90) + } + if cfg.Log.Level == "" { + cfg.Log.Level = DefaultLogLevel + } + if cfg.Log.Format == "" { + cfg.Log.Format = DefaultLogFormat + } +} + +func applyEnvOverrides(cfg *Config) error { + var errs []error + + if v := getEnv("SERVER_ADDR"); v != "" { + cfg.Server.Addr = v + } + if v := getEnv("SERVER_SECURE_COOKIES"); v != "" { + cfg.Server.SecureCookies = v == "true" || v == "1" + } + if v := getEnv("DATABASE_PATH"); v != "" { + cfg.Database.Path = v + } + + parseDuration(&cfg.Scraper.Interval, "SCRAPER_INTERVAL", &errs) + parseDuration(&cfg.Scraper.Timeout, "SCRAPER_TIMEOUT", &errs) + parseDuration(&cfg.Session.MaxAge, "SESSION_MAX_AGE", &errs) + parseDuration(&cfg.Session.IdleTimeout, "SESSION_IDLE_TIMEOUT", &errs) + + if v := getEnv("AUDIT_RETENTION_DAYS"); v != "" { + n, err := strconv.Atoi(v) + if err != nil { + errs = append(errs, fmt.Errorf("AUDIT_RETENTION_DAYS: %w", err)) + } else { + cfg.Audit.RetentionDays = &n + } + } + if v := getEnv("LOG_LEVEL"); v != "" { + cfg.Log.Level = strings.ToLower(v) + } + if v := getEnv("LOG_FORMAT"); v != "" { + cfg.Log.Format = strings.ToLower(v) + } + + return errors.Join(errs...) +} + +func parseDuration(dst *Duration, envKey string, errs *[]error) { + v := getEnv(envKey) + if v == "" { + return + } + d, err := time.ParseDuration(v) + if err != nil { + *errs = append(*errs, fmt.Errorf("%s: %w", envKey, err)) + return + } + *dst = Duration(d) +} + +func getEnv(key string) string { + return os.Getenv(EnvPrefix + "_" + key) +} + +func intPtr(v int) *int { return &v } diff --git a/admin/config/loader_test.go b/admin/config/loader_test.go new file mode 100644 index 0000000..320597e --- /dev/null +++ b/admin/config/loader_test.go @@ -0,0 +1,324 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "os" + "path/filepath" + "testing" + "time" +) + +func writeTestConfig(t *testing.T, content string) string { + t.Helper() + dir := t.TempDir() + path := filepath.Join(dir, "config.yaml") + if err := os.WriteFile(path, []byte(content), 0o600); err != nil { + t.Fatalf("failed to write test config: %v", err) + } + return path +} + +func TestLoad_NoFile_AppliesDefaults(t *testing.T) { + t.Parallel() + + // Arrange — point to a non-existent file + path := filepath.Join(t.TempDir(), "nonexistent.yaml") + + // Act + cfg, err := Load(path) + + // Assert + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Server.Addr != "127.0.0.1:8080" { + t.Errorf("Server.Addr = %q, want %q", cfg.Server.Addr, "127.0.0.1:8080") + } + if cfg.Database.Path != "./chaperone-admin.db" { + t.Errorf("Database.Path = %q, want %q", cfg.Database.Path, "./chaperone-admin.db") + } + if cfg.Scraper.Interval.Unwrap() != 10*time.Second { + t.Errorf("Scraper.Interval = %v, want %v", cfg.Scraper.Interval.Unwrap(), 10*time.Second) + } + if cfg.Scraper.Timeout.Unwrap() != 5*time.Second { + t.Errorf("Scraper.Timeout = %v, want %v", cfg.Scraper.Timeout.Unwrap(), 5*time.Second) + } + if cfg.Session.MaxAge.Unwrap() != 24*time.Hour { + t.Errorf("Session.MaxAge = %v, want %v", cfg.Session.MaxAge.Unwrap(), 24*time.Hour) + } + if cfg.Session.IdleTimeout.Unwrap() != 2*time.Hour { + t.Errorf("Session.IdleTimeout = %v, want %v", cfg.Session.IdleTimeout.Unwrap(), 2*time.Hour) + } + if *cfg.Audit.RetentionDays != 90 { + t.Errorf("Audit.RetentionDays = %d, want %d", *cfg.Audit.RetentionDays, 90) + } + if cfg.Log.Level != "info" { + t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, "info") + } + if cfg.Log.Format != "json" { + t.Errorf("Log.Format = %q, want %q", cfg.Log.Format, "json") + } +} + +func TestLoad_ValidYAML_ParsesAllFields(t *testing.T) { + t.Parallel() + + // Arrange + path := writeTestConfig(t, ` +server: + addr: "0.0.0.0:9090" + secure_cookies: true +database: + path: "/var/lib/admin.db" +scraper: + interval: "30s" + timeout: "10s" +session: + max_age: "12h" + idle_timeout: "1h" +audit: + retention_days: 30 +log: + level: "debug" + format: "text" +`) + + // Act + cfg, err := Load(path) + + // Assert + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Server.Addr != "0.0.0.0:9090" { + t.Errorf("Server.Addr = %q, want %q", cfg.Server.Addr, "0.0.0.0:9090") + } + if !cfg.Server.SecureCookies { + t.Error("Server.SecureCookies = false, want true") + } + if cfg.Database.Path != "/var/lib/admin.db" { + t.Errorf("Database.Path = %q, want %q", cfg.Database.Path, "/var/lib/admin.db") + } + if cfg.Scraper.Interval.Unwrap() != 30*time.Second { + t.Errorf("Scraper.Interval = %v, want %v", cfg.Scraper.Interval.Unwrap(), 30*time.Second) + } + if cfg.Scraper.Timeout.Unwrap() != 10*time.Second { + t.Errorf("Scraper.Timeout = %v, want %v", cfg.Scraper.Timeout.Unwrap(), 10*time.Second) + } + if cfg.Session.MaxAge.Unwrap() != 12*time.Hour { + t.Errorf("Session.MaxAge = %v, want %v", cfg.Session.MaxAge.Unwrap(), 12*time.Hour) + } + if cfg.Session.IdleTimeout.Unwrap() != 1*time.Hour { + t.Errorf("Session.IdleTimeout = %v, want %v", cfg.Session.IdleTimeout.Unwrap(), 1*time.Hour) + } + if *cfg.Audit.RetentionDays != 30 { + t.Errorf("Audit.RetentionDays = %d, want %d", *cfg.Audit.RetentionDays, 30) + } + if cfg.Log.Level != "debug" { + t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, "debug") + } + if cfg.Log.Format != "text" { + t.Errorf("Log.Format = %q, want %q", cfg.Log.Format, "text") + } +} + +func TestLoad_ZeroRetention_PreservedAsKeepForever(t *testing.T) { + t.Parallel() + + // Arrange — explicit retention_days: 0 means "keep forever" + path := writeTestConfig(t, ` +server: + addr: "127.0.0.1:8080" +database: + path: "./test.db" +scraper: + interval: "10s" + timeout: "5s" +session: + max_age: "24h" + idle_timeout: "2h" +audit: + retention_days: 0 +log: + level: "info" + format: "json" +`) + + // Act + cfg, err := Load(path) + + // Assert + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Audit.RetentionDays == nil { + t.Fatal("Audit.RetentionDays is nil, want 0") + } + if *cfg.Audit.RetentionDays != 0 { + t.Errorf("Audit.RetentionDays = %d, want 0 (keep forever)", *cfg.Audit.RetentionDays) + } +} + +func TestLoad_EnvOverrides_AllFields(t *testing.T) { + // Not parallel — modifies environment via t.Setenv. + + // Arrange + path := filepath.Join(t.TempDir(), "nonexistent.yaml") + t.Setenv("CHAPERONE_ADMIN_SERVER_ADDR", "0.0.0.0:3000") + t.Setenv("CHAPERONE_ADMIN_SERVER_SECURE_COOKIES", "true") + t.Setenv("CHAPERONE_ADMIN_DATABASE_PATH", "/tmp/test.db") + t.Setenv("CHAPERONE_ADMIN_SCRAPER_INTERVAL", "20s") + t.Setenv("CHAPERONE_ADMIN_SCRAPER_TIMEOUT", "8s") + t.Setenv("CHAPERONE_ADMIN_SESSION_MAX_AGE", "48h") + t.Setenv("CHAPERONE_ADMIN_SESSION_IDLE_TIMEOUT", "4h") + t.Setenv("CHAPERONE_ADMIN_AUDIT_RETENTION_DAYS", "60") + t.Setenv("CHAPERONE_ADMIN_LOG_LEVEL", "WARN") + t.Setenv("CHAPERONE_ADMIN_LOG_FORMAT", "TEXT") + + // Act + cfg, err := Load(path) + + // Assert + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Server.Addr != "0.0.0.0:3000" { + t.Errorf("Server.Addr = %q, want %q", cfg.Server.Addr, "0.0.0.0:3000") + } + if !cfg.Server.SecureCookies { + t.Error("Server.SecureCookies = false, want true") + } + if cfg.Database.Path != "/tmp/test.db" { + t.Errorf("Database.Path = %q, want %q", cfg.Database.Path, "/tmp/test.db") + } + if cfg.Scraper.Interval.Unwrap() != 20*time.Second { + t.Errorf("Scraper.Interval = %v, want %v", cfg.Scraper.Interval.Unwrap(), 20*time.Second) + } + if cfg.Scraper.Timeout.Unwrap() != 8*time.Second { + t.Errorf("Scraper.Timeout = %v, want %v", cfg.Scraper.Timeout.Unwrap(), 8*time.Second) + } + if cfg.Session.MaxAge.Unwrap() != 48*time.Hour { + t.Errorf("Session.MaxAge = %v, want %v", cfg.Session.MaxAge.Unwrap(), 48*time.Hour) + } + if cfg.Session.IdleTimeout.Unwrap() != 4*time.Hour { + t.Errorf("Session.IdleTimeout = %v, want %v", cfg.Session.IdleTimeout.Unwrap(), 4*time.Hour) + } + if *cfg.Audit.RetentionDays != 60 { + t.Errorf("Audit.RetentionDays = %d, want %d", *cfg.Audit.RetentionDays, 60) + } + if cfg.Log.Level != "warn" { + t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, "warn") + } + if cfg.Log.Format != "text" { + t.Errorf("Log.Format = %q, want %q", cfg.Log.Format, "text") + } +} + +func TestLoad_InvalidYAML_ReturnsError(t *testing.T) { + t.Parallel() + + // Arrange + path := writeTestConfig(t, `{{{invalid yaml`) + + // Act + _, err := Load(path) + + // Assert + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestLoad_EnvOverride_InvalidDuration_ReturnsError(t *testing.T) { + // Not parallel — modifies environment. + + // Arrange + path := filepath.Join(t.TempDir(), "nonexistent.yaml") + t.Setenv("CHAPERONE_ADMIN_SCRAPER_INTERVAL", "not-a-duration") + + // Act + _, err := Load(path) + + // Assert + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestApplyDefaults_ZeroConfig_SetsAllDefaults(t *testing.T) { + t.Parallel() + + // Arrange + cfg := &Config{} + + // Act + applyDefaults(cfg) + + // Assert + if cfg.Server.Addr != "127.0.0.1:8080" { + t.Errorf("Server.Addr = %q, want %q", cfg.Server.Addr, "127.0.0.1:8080") + } + if cfg.Database.Path != "./chaperone-admin.db" { + t.Errorf("Database.Path = %q, want %q", cfg.Database.Path, "./chaperone-admin.db") + } + if cfg.Scraper.Interval.Unwrap() != 10*time.Second { + t.Errorf("Scraper.Interval = %v, want 10s", cfg.Scraper.Interval.Unwrap()) + } + if cfg.Scraper.Timeout.Unwrap() != 5*time.Second { + t.Errorf("Scraper.Timeout = %v, want 5s", cfg.Scraper.Timeout.Unwrap()) + } + if cfg.Session.MaxAge.Unwrap() != 24*time.Hour { + t.Errorf("Session.MaxAge = %v, want 24h", cfg.Session.MaxAge.Unwrap()) + } + if cfg.Session.IdleTimeout.Unwrap() != 2*time.Hour { + t.Errorf("Session.IdleTimeout = %v, want 2h", cfg.Session.IdleTimeout.Unwrap()) + } + if *cfg.Audit.RetentionDays != 90 { + t.Errorf("Audit.RetentionDays = %d, want 90", *cfg.Audit.RetentionDays) + } + if cfg.Log.Level != "info" { + t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, "info") + } + if cfg.Log.Format != "json" { + t.Errorf("Log.Format = %q, want %q", cfg.Log.Format, "json") + } +} + +func TestResolveConfigPath_ExplicitPath_ReturnsSame(t *testing.T) { + t.Parallel() + + // Act + got := resolveConfigPath("/custom/config.yaml") + + // Assert + if got != "/custom/config.yaml" { + t.Errorf("resolveConfigPath() = %q, want %q", got, "/custom/config.yaml") + } +} + +func TestResolveConfigPath_EnvVar_ReturnsEnvValue(t *testing.T) { + // Not parallel — modifies environment. + t.Setenv("CHAPERONE_ADMIN_CONFIG", "/env/config.yaml") + + // Act + got := resolveConfigPath("") + + // Assert + if got != "/env/config.yaml" { + t.Errorf("resolveConfigPath() = %q, want %q", got, "/env/config.yaml") + } +} + +func TestResolveConfigPath_Default_ReturnsDefault(t *testing.T) { + t.Parallel() + + // Act + got := resolveConfigPath("") + + // Assert + if got != "chaperone-admin.yaml" { + t.Errorf("resolveConfigPath() = %q, want %q", got, "chaperone-admin.yaml") + } +} diff --git a/admin/config/validate.go b/admin/config/validate.go new file mode 100644 index 0000000..6eadfea --- /dev/null +++ b/admin/config/validate.go @@ -0,0 +1,61 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "errors" + "fmt" + "net" + "time" +) + +// Validate checks the configuration for required fields and valid values. +func (c *Config) Validate() error { + var errs []error + + if c.Server.Addr == "" { + errs = append(errs, errors.New("server.addr is required")) + } else if _, _, err := net.SplitHostPort(c.Server.Addr); err != nil { + errs = append(errs, fmt.Errorf("server.addr: %w", err)) + } + + if c.Database.Path == "" { + errs = append(errs, errors.New("database.path is required")) + } + + if c.Scraper.Interval.Unwrap() < 1*time.Second { + errs = append(errs, errors.New("scraper.interval must be at least 1s")) + } + if c.Scraper.Timeout.Unwrap() < 1*time.Second { + errs = append(errs, errors.New("scraper.timeout must be at least 1s")) + } + if c.Scraper.Timeout.Unwrap() >= c.Scraper.Interval.Unwrap() { + errs = append(errs, errors.New("scraper.timeout must be less than scraper.interval")) + } + + if c.Session.MaxAge.Unwrap() < 1*time.Minute { + errs = append(errs, errors.New("session.max_age must be at least 1m")) + } + if c.Session.IdleTimeout.Unwrap() < 1*time.Minute { + errs = append(errs, errors.New("session.idle_timeout must be at least 1m")) + } + + if *c.Audit.RetentionDays < 0 { + errs = append(errs, errors.New("audit.retention_days must be non-negative (0 = keep forever)")) + } + + switch c.Log.Level { + case "debug", DefaultLogLevel, "warn", "error": + default: + errs = append(errs, fmt.Errorf("log.level: unknown level %q (valid: debug, info, warn, error)", c.Log.Level)) + } + + switch c.Log.Format { + case DefaultLogFormat, LogFormatText: + default: + errs = append(errs, fmt.Errorf("log.format: unknown format %q (valid: json, text)", c.Log.Format)) + } + + return errors.Join(errs...) +} diff --git a/admin/config/validate_test.go b/admin/config/validate_test.go new file mode 100644 index 0000000..85ead1b --- /dev/null +++ b/admin/config/validate_test.go @@ -0,0 +1,233 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "strings" + "testing" + "time" +) + +// validConfig returns a Config with all fields set to valid values. +// Tests mutate a single field to test specific validation rules. +func validConfig() *Config { + return &Config{ + Server: ServerConfig{Addr: DefaultAddr}, + Database: DatabaseConfig{Path: "./test.db"}, + Scraper: ScraperConfig{ + Interval: Duration(10 * time.Second), + Timeout: Duration(5 * time.Second), + }, + Session: SessionConfig{ + MaxAge: Duration(24 * time.Hour), + IdleTimeout: Duration(2 * time.Hour), + }, + Audit: AuditConfig{RetentionDays: intPtr(90)}, + Log: LogConfig{Level: DefaultLogLevel, Format: DefaultLogFormat}, + } +} + +func TestValidate_ValidConfig_NoError(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + + // Act + err := cfg.Validate() + + // Assert + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestValidate_InvalidAddr_ReturnsError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + addr string + }{ + {"empty addr", ""}, + {"missing port", "127.0.0.1"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + cfg.Server.Addr = tt.addr + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Error("expected error, got nil") + } + }) + } +} + +func TestValidate_EmptyDatabasePath_ReturnsError(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + cfg.Database.Path = "" + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestValidate_TimeoutGteInterval_ReturnsError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + interval time.Duration + timeout time.Duration + }{ + {"timeout equals interval", 10 * time.Second, 10 * time.Second}, + {"timeout exceeds interval", 10 * time.Second, 15 * time.Second}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + cfg.Scraper.Interval = Duration(tt.interval) + cfg.Scraper.Timeout = Duration(tt.timeout) + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "timeout must be less than") { + t.Errorf("error = %q, want to contain %q", err.Error(), "timeout must be less than") + } + }) + } +} + +func TestValidate_NegativeRetention_ReturnsError(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + cfg.Audit.RetentionDays = intPtr(-1) + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "retention_days") { + t.Errorf("error = %q, want to contain %q", err.Error(), "retention_days") + } +} + +func TestValidate_ZeroRetention_NoError(t *testing.T) { + t.Parallel() + + // Arrange — 0 means "keep forever" + cfg := validConfig() + cfg.Audit.RetentionDays = intPtr(0) + + // Act + err := cfg.Validate() + + // Assert + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestValidate_UnknownLogLevel_ReturnsError(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + cfg.Log.Level = "trace" + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "unknown level") { + t.Errorf("error = %q, want to contain %q", err.Error(), "unknown level") + } +} + +func TestValidate_UnknownLogFormat_ReturnsError(t *testing.T) { + t.Parallel() + + // Arrange + cfg := validConfig() + cfg.Log.Format = "xml" + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "unknown format") { + t.Errorf("error = %q, want to contain %q", err.Error(), "unknown format") + } +} + +func TestValidate_MultipleErrors_ReturnsAllErrors(t *testing.T) { + t.Parallel() + + // Arrange — multiple invalid fields + cfg := &Config{ + Server: ServerConfig{Addr: ""}, + Database: DatabaseConfig{Path: ""}, + Scraper: ScraperConfig{ + Interval: Duration(10 * time.Second), + Timeout: Duration(10 * time.Second), + }, + Session: SessionConfig{ + MaxAge: Duration(24 * time.Hour), + IdleTimeout: Duration(2 * time.Hour), + }, + Audit: AuditConfig{RetentionDays: intPtr(-1)}, + Log: LogConfig{Level: "bad", Format: "bad"}, + } + + // Act + err := cfg.Validate() + + // Assert + if err == nil { + t.Fatal("expected error, got nil") + } + msg := err.Error() + checks := []string{"server.addr", "database.path", "timeout must be less than", "retention_days", "unknown level", "unknown format"} + for _, check := range checks { + if !strings.Contains(msg, check) { + t.Errorf("error = %q, want to contain %q", msg, check) + } + } +} diff --git a/admin/embed.go b/admin/embed.go new file mode 100644 index 0000000..1c29866 --- /dev/null +++ b/admin/embed.go @@ -0,0 +1,28 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +//go:build !dev + +package admin + +import ( + "embed" + "io/fs" +) + +// uiRawAssets holds the compiled Vue SPA build output. +// +// The ui/dist directory must exist at compile time. Build with: +// +// cd admin/ui && pnpm install && pnpm build +// +// Or use: make build-admin +// +// For development without building the UI, use: go build -tags dev +// +//go:embed all:ui/dist +var uiRawAssets embed.FS + +func loadUIAssets() (fs.FS, error) { + return fs.Sub(uiRawAssets, "ui/dist") +} diff --git a/admin/embed_dev.go b/admin/embed_dev.go new file mode 100644 index 0000000..fd9ffc8 --- /dev/null +++ b/admin/embed_dev.go @@ -0,0 +1,25 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +//go:build dev + +package admin + +import ( + "fmt" + "io/fs" + "os" +) + +const devUIDir = "admin/ui/dist" + +// loadUIAssets serves the Vue SPA from the filesystem during development. +// This assumes the working directory is the repository root (e.g., via +// make run-admin). For hot reload, run "pnpm dev" separately and use +// the Vite dev server proxy instead. +func loadUIAssets() (fs.FS, error) { + if _, err := os.Stat(devUIDir); err != nil { + return nil, fmt.Errorf("UI dist directory not found at %s: run 'make build-admin-ui' first: %w", devUIDir, err) + } + return os.DirFS(devUIDir), nil +} diff --git a/admin/go.mod b/admin/go.mod new file mode 100644 index 0000000..6de0c0d --- /dev/null +++ b/admin/go.mod @@ -0,0 +1,29 @@ +module github.com/cloudblue/chaperone/admin + +go 1.26.2 + +require ( + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.67.5 + golang.org/x/crypto v0.48.0 + golang.org/x/term v0.40.0 + gopkg.in/yaml.v3 v3.0.1 + modernc.org/sqlite v1.46.1 +) + +require ( + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/sys v0.41.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + modernc.org/libc v1.67.6 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect +) diff --git a/admin/go.sum b/admin/go.sum new file mode 100644 index 0000000..6a60705 --- /dev/null +++ b/admin/go.sum @@ -0,0 +1,89 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= +modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= +modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE= +modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY= +modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= +modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= +modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU= +modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/admin/metrics/collector.go b/admin/metrics/collector.go new file mode 100644 index 0000000..2e042cf --- /dev/null +++ b/admin/metrics/collector.go @@ -0,0 +1,499 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "sort" + "sync" + "time" +) + +// Collector manages per-instance metric ring buffers and computes derived +// metrics (rates, percentiles) on demand. +type Collector struct { + mu sync.RWMutex + buffers map[int64]*Ring + capacity int +} + +// NewCollector creates a Collector with the given ring buffer capacity. +func NewCollector(capacity int) *Collector { + return &Collector{ + buffers: make(map[int64]*Ring), + capacity: capacity, + } +} + +// RecordScrape parses raw Prometheus text data and stores the resulting +// snapshot for the given instance. +func (c *Collector) RecordScrape(instanceID int64, data []byte, t time.Time) error { + snap, err := Parse(data, t) + if err != nil { + return err + } + c.Record(instanceID, *snap) + return nil +} + +// Record stores a pre-parsed snapshot for the given instance. +func (c *Collector) Record(instanceID int64, snap Snapshot) { + c.mu.Lock() + defer c.mu.Unlock() + + buf, ok := c.buffers[instanceID] + if !ok { + buf = NewRing(c.capacity) + c.buffers[instanceID] = buf + } + buf.Push(snap) +} + +// Remove deletes all stored data for the given instance. +func (c *Collector) Remove(instanceID int64) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.buffers, instanceID) +} + +// Prune removes ring buffers for instances not in the activeIDs set. +func (c *Collector) Prune(activeIDs map[int64]bool) { + c.mu.Lock() + defer c.mu.Unlock() + for id := range c.buffers { + if !activeIDs[id] { + delete(c.buffers, id) + } + } +} + +// GetInstanceMetrics computes full metrics for a single instance. +// Returns nil if no data exists for the given instance. +func (c *Collector) GetInstanceMetrics(instanceID int64) *InstanceMetrics { + c.mu.RLock() + defer c.mu.RUnlock() + + buf, ok := c.buffers[instanceID] + if !ok || buf.Len() == 0 { + return nil + } + + last, _ := buf.Last() + im := &InstanceMetrics{ + InstanceID: instanceID, + CollectedAt: last.Time, + DataPoints: buf.Len(), + ActiveConnections: last.ActiveConnections, + PanicsTotal: last.PanicsTotal, + } + + if buf.Len() >= 2 { + prev := buf.At(buf.Len() - 2) + c.fillCurrentKPIs(im, prev, last) + c.fillVendorMetrics(im, prev, last) + } + + c.fillTrends(im, buf) + im.Series = c.buildSeries(buf) + im.VendorSeries = c.buildVendorSeries(buf) + + return im +} + +// GetInstanceSummary returns a compact summary for the fleet view. +// Returns nil if no data exists. +func (c *Collector) GetInstanceSummary(instanceID int64) *InstanceSummary { + c.mu.RLock() + defer c.mu.RUnlock() + + buf, ok := c.buffers[instanceID] + if !ok || buf.Len() == 0 { + return nil + } + + s := instanceSummary(buf, instanceID) + return &s +} + +// instanceSummary is the shared implementation for GetInstanceSummary and +// summarizeForFleet. Caller must hold at least a read lock. +func instanceSummary(buf *Ring, id int64) InstanceSummary { + last, _ := buf.Last() + s := InstanceSummary{ + InstanceID: id, + ActiveConnections: last.ActiveConnections, + PanicsTotal: last.PanicsTotal, + } + + if buf.Len() >= 2 { + prev := buf.At(buf.Len() - 2) + dt := last.Time.Sub(prev.Time) + s.RPS = counterRate(prev.totalRequests(), last.totalRequests(), dt) + s.ErrorRate = errorRate(prev.totalRequests(), last.totalRequests(), prev.totalErrors(), last.totalErrors()) + s.P99 = secondsToMs(histogramQuantile(0.99, mergeHistogramDelta(prev, last))) + } + + return s +} + +// fleetAccumulator collects per-instance data for fleet-wide aggregation. +type fleetAccumulator struct { + totalReqDelta float64 + totalErrDelta float64 + trendOldRPS float64 + trendOldReqDelta float64 + trendOldErrDelta float64 + hasTrend bool +} + +// GetFleetMetrics aggregates metrics across the given instance IDs. +func (c *Collector) GetFleetMetrics(instanceIDs []int64) *FleetMetrics { + c.mu.RLock() + defer c.mu.RUnlock() + + fm := &FleetMetrics{ + CollectedAt: time.Now(), + Instances: make([]InstanceSummary, 0, len(instanceIDs)), + } + + var acc fleetAccumulator + for _, id := range instanceIDs { + buf, ok := c.buffers[id] + if !ok || buf.Len() == 0 { + continue + } + s := c.summarizeForFleet(buf, id, &acc) + fm.TotalRPS += s.RPS + fm.TotalActiveConnections += s.ActiveConnections + fm.TotalPanics += s.PanicsTotal + fm.Instances = append(fm.Instances, s) + } + + if acc.totalReqDelta > 0 { + fm.FleetErrorRate = acc.totalErrDelta / acc.totalReqDelta + } + acc.applyTrends(fm) + return fm +} + +// summarizeForFleet computes an InstanceSummary and accumulates fleet-wide deltas. +func (c *Collector) summarizeForFleet(buf *Ring, id int64, acc *fleetAccumulator) InstanceSummary { + s := instanceSummary(buf, id) + + if buf.Len() >= 2 { + last, _ := buf.Last() + prev := buf.At(buf.Len() - 2) + reqD := last.totalRequests() - prev.totalRequests() + errD := last.totalErrors() - prev.totalErrors() + if reqD >= 0 && errD >= 0 { + acc.totalReqDelta += reqD + acc.totalErrDelta += errD + } + } + + if td, ok := c.trendSnapshot(buf); ok { + acc.trendOldRPS += td.rps + acc.trendOldReqDelta += td.reqDelta + acc.trendOldErrDelta += td.errDelta + acc.hasTrend = true + } + return s +} + +func (acc *fleetAccumulator) applyTrends(fm *FleetMetrics) { + if !acc.hasTrend { + return + } + rpsTrend := fm.TotalRPS - acc.trendOldRPS + fm.RPSTrend = &rpsTrend + + var oldErrRate float64 + if acc.trendOldReqDelta > 0 { + oldErrRate = acc.trendOldErrDelta / acc.trendOldReqDelta + } + errTrend := fm.FleetErrorRate - oldErrRate + fm.ErrorRateTrend = &errTrend +} + +// fillCurrentKPIs populates global RPS, error rate, and latency percentiles +// from the two most recent snapshots. +func (*Collector) fillCurrentKPIs(im *InstanceMetrics, prev, curr Snapshot) { + dt := curr.Time.Sub(prev.Time) + im.RPS = counterRate(prev.totalRequests(), curr.totalRequests(), dt) + im.ErrorRate = errorRate(prev.totalRequests(), curr.totalRequests(), prev.totalErrors(), curr.totalErrors()) + + dh := mergeHistogramDelta(prev, curr) + im.P50 = secondsToMs(histogramQuantile(0.50, dh)) + im.P95 = secondsToMs(histogramQuantile(0.95, dh)) + im.P99 = secondsToMs(histogramQuantile(0.99, dh)) +} + +// fillVendorMetrics populates per-vendor KPIs from the two most recent snapshots. +func (*Collector) fillVendorMetrics(im *InstanceMetrics, prev, curr Snapshot) { + dt := curr.Time.Sub(prev.Time) + vendorIDs := collectVendorIDs(prev, curr) + + for _, vid := range vendorIDs { + pv := vendorOr(prev, vid) + cv := vendorOr(curr, vid) + + vm := VendorMetrics{VendorID: vid} + vm.RPS = counterRate(pv.RequestsTotal, cv.RequestsTotal, dt) + vm.ErrorRate = errorRate(pv.RequestsTotal, cv.RequestsTotal, pv.RequestsErrors, cv.RequestsErrors) + + dh := histogramDelta(pv.Duration, cv.Duration) + vm.P50 = secondsToMs(histogramQuantile(0.50, dh)) + vm.P95 = secondsToMs(histogramQuantile(0.95, dh)) + vm.P99 = secondsToMs(histogramQuantile(0.99, dh)) + + im.Vendors = append(im.Vendors, vm) + } + sort.Slice(im.Vendors, func(i, j int) bool { + return im.Vendors[i].VendorID < im.Vendors[j].VendorID + }) +} + +// historicalPair returns the two snapshots forming a rate pair from ~1 hour +// ago in the ring buffer. If the buffer doesn't span at least 50 minutes, +// ok is false. +func historicalPair(buf *Ring) (prev, curr Snapshot, ok bool) { + if buf.Len() < 4 { + return Snapshot{}, Snapshot{}, false + } + newest := buf.At(buf.Len() - 1) + oldest := buf.At(0) + if newest.Time.Sub(oldest.Time) < 50*time.Minute { + return Snapshot{}, Snapshot{}, false + } + + target := newest.Time.Add(-1 * time.Hour) + idx := findNearest(buf, target) + start := idx + if start > 0 { + start = idx - 1 + } + if start+1 >= buf.Len() { + return Snapshot{}, Snapshot{}, false + } + return buf.At(start), buf.At(start + 1), true +} + +// fillTrends computes trend values by comparing the current rate to the rate +// from approximately 1 hour ago. +func (*Collector) fillTrends(im *InstanceMetrics, buf *Ring) { + prev, curr, ok := historicalPair(buf) + if !ok { + return + } + dt := curr.Time.Sub(prev.Time) + + oldRPS := counterRate(prev.totalRequests(), curr.totalRequests(), dt) + rpsTrend := im.RPS - oldRPS + im.RPSTrend = &rpsTrend + + oldErr := errorRate(prev.totalRequests(), curr.totalRequests(), prev.totalErrors(), curr.totalErrors()) + errTrend := im.ErrorRate - oldErr + im.ErrorRateTrend = &errTrend +} + +type historicalTrend struct { + rps float64 + reqDelta float64 + errDelta float64 +} + +// trendSnapshot returns historical RPS and request/error deltas from ~1h ago. +func (*Collector) trendSnapshot(buf *Ring) (historicalTrend, bool) { + prev, curr, ok := historicalPair(buf) + if !ok { + return historicalTrend{}, false + } + dt := curr.Time.Sub(prev.Time) + + reqD := curr.totalRequests() - prev.totalRequests() + errD := curr.totalErrors() - prev.totalErrors() + if reqD < 0 || errD < 0 { + return historicalTrend{}, false + } + + return historicalTrend{ + rps: counterRate(prev.totalRequests(), curr.totalRequests(), dt), + reqDelta: reqD, + errDelta: errD, + }, true +} + +// buildSeries creates the global time series from the ring buffer. +func (*Collector) buildSeries(buf *Ring) []SeriesPoint { + if buf.Len() < 2 { + return nil + } + + points := make([]SeriesPoint, 0, buf.Len()-1) + for i := 1; i < buf.Len(); i++ { + prev := buf.At(i - 1) + curr := buf.At(i) + dt := curr.Time.Sub(prev.Time) + + dh := mergeHistogramDelta(prev, curr) + points = append(points, SeriesPoint{ + Time: curr.Time.Unix(), + RPS: counterRate(prev.totalRequests(), curr.totalRequests(), dt), + ErrorRate: errorRate(prev.totalRequests(), curr.totalRequests(), prev.totalErrors(), curr.totalErrors()), + P50: secondsToMs(histogramQuantile(0.50, dh)), + P95: secondsToMs(histogramQuantile(0.95, dh)), + P99: secondsToMs(histogramQuantile(0.99, dh)), + ActiveConnections: curr.ActiveConnections, + }) + } + return points +} + +// buildVendorSeries creates per-vendor time series from the ring buffer. +func (*Collector) buildVendorSeries(buf *Ring) map[string][]VendorSeriesPoint { + if buf.Len() < 2 { + return nil + } + + allVendors := make(map[string]bool) + for i := 0; i < buf.Len(); i++ { + for vid := range buf.At(i).Vendors { + allVendors[vid] = true + } + } + if len(allVendors) == 0 { + return nil + } + + result := make(map[string][]VendorSeriesPoint, len(allVendors)) + for vid := range allVendors { + points := make([]VendorSeriesPoint, 0, buf.Len()-1) + for i := 1; i < buf.Len(); i++ { + prev := buf.At(i - 1) + curr := buf.At(i) + dt := curr.Time.Sub(prev.Time) + pv := vendorOr(prev, vid) + cv := vendorOr(curr, vid) + + dh := histogramDelta(pv.Duration, cv.Duration) + points = append(points, VendorSeriesPoint{ + Time: curr.Time.Unix(), + RPS: counterRate(pv.RequestsTotal, cv.RequestsTotal, dt), + ErrorRate: errorRate(pv.RequestsTotal, cv.RequestsTotal, pv.RequestsErrors, cv.RequestsErrors), + P50: secondsToMs(histogramQuantile(0.50, dh)), + P95: secondsToMs(histogramQuantile(0.95, dh)), + P99: secondsToMs(histogramQuantile(0.99, dh)), + }) + } + result[vid] = points + } + return result +} + +// --- helpers --- + +// findNearest returns the index of the snapshot closest to target time. +func findNearest(buf *Ring, target time.Time) int { + best := 0 + bestDelta := absDuration(buf.At(0).Time.Sub(target)) + for i := 1; i < buf.Len(); i++ { + d := absDuration(buf.At(i).Time.Sub(target)) + if d < bestDelta { + best = i + bestDelta = d + } + } + return best +} + +func absDuration(d time.Duration) time.Duration { + if d < 0 { + return -d + } + return d +} + +// mergeHistogramDelta merges all vendor histograms and computes their delta. +func mergeHistogramDelta(prev, curr Snapshot) Histogram { + pm := mergeVendorHistograms(prev) + cm := mergeVendorHistograms(curr) + return histogramDelta(pm, cm) +} + +// mergeVendorHistograms combines histograms across all vendors in a snapshot. +func mergeVendorHistograms(s Snapshot) Histogram { + var merged Histogram + for _, vs := range s.Vendors { + merged = addHistograms(merged, vs.Duration) + } + return merged +} + +// addHistograms sums two histograms that share the same bucket boundaries. +// All vendors on a single proxy share the same HistogramVec bucket layout, +// so boundaries always match in practice. If they ever diverge (proxy version +// drift), we fall back to the histogram with more observations rather than +// silently producing a corrupt merge. +func addHistograms(a, b Histogram) Histogram { + if len(a.Buckets) == 0 { + return b + } + if len(b.Buckets) == 0 { + return a + } + if !sameBucketBoundaries(a, b) { + if a.Count >= b.Count { + return a + } + return b + } + result := Histogram{ + Count: a.Count + b.Count, + Sum: a.Sum + b.Sum, + Buckets: make([]Bucket, len(a.Buckets)), + } + for i := range result.Buckets { + result.Buckets[i] = Bucket{ + UpperBound: a.Buckets[i].UpperBound, + Count: a.Buckets[i].Count + b.Buckets[i].Count, + } + } + return result +} + +func sameBucketBoundaries(a, b Histogram) bool { + if len(a.Buckets) != len(b.Buckets) { + return false + } + for i := range a.Buckets { + if a.Buckets[i].UpperBound != b.Buckets[i].UpperBound { + return false + } + } + return true +} + +// vendorOr returns the VendorSnapshot for a vendor or a zero value. +func vendorOr(s Snapshot, id string) VendorSnapshot { + if vs, ok := s.Vendors[id]; ok { + return *vs + } + return VendorSnapshot{} +} + +// collectVendorIDs returns the sorted union of vendor IDs from two snapshots. +func collectVendorIDs(a, b Snapshot) []string { + seen := make(map[string]bool) + for k := range a.Vendors { + seen[k] = true + } + for k := range b.Vendors { + seen[k] = true + } + ids := make([]string, 0, len(seen)) + for k := range seen { + ids = append(ids, k) + } + sort.Strings(ids) + return ids +} diff --git a/admin/metrics/collector_test.go b/admin/metrics/collector_test.go new file mode 100644 index 0000000..8110d44 --- /dev/null +++ b/admin/metrics/collector_test.go @@ -0,0 +1,419 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "math" + "testing" + "time" +) + +func makeSnapshot(t time.Time, totalReq, errReq, active, panics float64) Snapshot { + return Snapshot{ + Time: t, + Vendors: map[string]*VendorSnapshot{ + "acme": { + RequestsTotal: totalReq * 0.6, + RequestsErrors: errReq * 0.6, + Duration: Histogram{ + Count: totalReq * 0.6, + Sum: totalReq * 0.6 * 0.15, + Buckets: []Bucket{ + {UpperBound: 0.05, Count: totalReq * 0.6 * 0.2}, + {UpperBound: 0.1, Count: totalReq * 0.6 * 0.5}, + {UpperBound: 0.25, Count: totalReq * 0.6 * 0.8}, + {UpperBound: 0.5, Count: totalReq * 0.6 * 0.95}, + {UpperBound: 1.0, Count: totalReq * 0.6}, + }, + }, + }, + "beta": { + RequestsTotal: totalReq * 0.4, + RequestsErrors: errReq * 0.4, + Duration: Histogram{ + Count: totalReq * 0.4, + Sum: totalReq * 0.4 * 0.1, + Buckets: []Bucket{ + {UpperBound: 0.05, Count: totalReq * 0.4 * 0.3}, + {UpperBound: 0.1, Count: totalReq * 0.4 * 0.6}, + {UpperBound: 0.25, Count: totalReq * 0.4 * 0.9}, + {UpperBound: 0.5, Count: totalReq * 0.4 * 0.98}, + {UpperBound: 1.0, Count: totalReq * 0.4}, + }, + }, + }, + }, + ActiveConnections: active, + PanicsTotal: panics, + } +} + +func TestCollector_RecordScrape_ParsesAndStores(t *testing.T) { + t.Parallel() + c := NewCollector(10) + + err := c.RecordScrape(1, []byte(sampleMetrics), time.Now()) + if err != nil { + t.Fatalf("RecordScrape() error = %v", err) + } + + c.mu.RLock() + buf, ok := c.buffers[1] + c.mu.RUnlock() + + if !ok { + t.Fatal("expected buffer for instance 1") + } + if buf.Len() != 1 { + t.Errorf("buffer Len() = %d, want 1", buf.Len()) + } +} + +func TestCollector_RecordScrape_MalformedData_ReturnsError(t *testing.T) { + t.Parallel() + c := NewCollector(10) + + err := c.RecordScrape(1, []byte("# TYPE foo gauge\n# TYPE foo counter\nfoo 1\n"), time.Now()) + if err == nil { + t.Error("expected error for malformed data") + } +} + +func TestCollector_Remove(t *testing.T) { + t.Parallel() + c := NewCollector(10) + c.Record(1, Snapshot{Time: time.Now()}) + c.Remove(1) + + c.mu.RLock() + _, ok := c.buffers[1] + c.mu.RUnlock() + + if ok { + t.Error("expected buffer to be removed") + } +} + +func TestCollector_Prune(t *testing.T) { + t.Parallel() + c := NewCollector(10) + c.Record(1, Snapshot{Time: time.Now()}) + c.Record(2, Snapshot{Time: time.Now()}) + c.Record(3, Snapshot{Time: time.Now()}) + + c.Prune(map[int64]bool{1: true, 3: true}) + + c.mu.RLock() + _, has2 := c.buffers[2] + c.mu.RUnlock() + + if has2 { + t.Error("expected instance 2 to be pruned") + } +} + +func TestCollector_GetInstanceMetrics_NoData_ReturnsNil(t *testing.T) { + t.Parallel() + c := NewCollector(10) + if got := c.GetInstanceMetrics(99); got != nil { + t.Error("expected nil for unknown instance") + } +} + +func TestCollector_GetInstanceMetrics_SingleSnapshot_NoRates(t *testing.T) { + t.Parallel() + c := NewCollector(10) + c.Record(1, makeSnapshot(time.Now(), 1000, 50, 10, 2)) + + im := c.GetInstanceMetrics(1) + if im == nil { + t.Fatal("expected non-nil InstanceMetrics") + } + if im.DataPoints != 1 { + t.Errorf("DataPoints = %d, want 1", im.DataPoints) + } + // With only 1 snapshot, rates should be zero. + if im.RPS != 0 { + t.Errorf("RPS = %v, want 0 (single snapshot)", im.RPS) + } + if im.ActiveConnections != 10 { + t.Errorf("ActiveConnections = %v, want 10", im.ActiveConnections) + } +} + +func TestCollector_GetInstanceMetrics_TwoSnapshots_ComputesRates(t *testing.T) { + t.Parallel() + c := NewCollector(10) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + t1 := t0.Add(10 * time.Second) + + c.Record(1, makeSnapshot(t0, 1000, 50, 10, 1)) + c.Record(1, makeSnapshot(t1, 1100, 55, 12, 2)) + + im := c.GetInstanceMetrics(1) + if im == nil { + t.Fatal("expected non-nil InstanceMetrics") + } + + // RPS: (1100 - 1000) / 10 = 10 + if math.Abs(im.RPS-10.0) > 0.01 { + t.Errorf("RPS = %v, want ~10", im.RPS) + } + + // Error rate: (55-50) / (1100-1000) = 5/100 = 0.05 + if math.Abs(im.ErrorRate-0.05) > 0.001 { + t.Errorf("ErrorRate = %v, want ~0.05", im.ErrorRate) + } + + if im.ActiveConnections != 12 { + t.Errorf("ActiveConnections = %v, want 12", im.ActiveConnections) + } + if im.PanicsTotal != 2 { + t.Errorf("PanicsTotal = %v, want 2", im.PanicsTotal) + } + + // Should have latency percentiles > 0 + if im.P50 <= 0 { + t.Errorf("P50 = %v, want > 0", im.P50) + } + if im.P99 <= 0 { + t.Errorf("P99 = %v, want > 0", im.P99) + } + + // Should have vendor metrics + if len(im.Vendors) != 2 { + t.Errorf("Vendors = %d, want 2", len(im.Vendors)) + } +} + +func TestCollector_GetInstanceMetrics_SeriesGenerated(t *testing.T) { + t.Parallel() + c := NewCollector(100) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + + for i := 0; i < 5; i++ { + c.Record(1, makeSnapshot( + t0.Add(time.Duration(i)*10*time.Second), + float64(1000+i*100), + float64(50+i*5), + 10, + float64(i), + )) + } + + im := c.GetInstanceMetrics(1) + if im == nil { + t.Fatal("expected non-nil InstanceMetrics") + } + + // 5 snapshots → 4 series points + if len(im.Series) != 4 { + t.Errorf("Series length = %d, want 4", len(im.Series)) + } + + // Should have vendor series for both vendors + if len(im.VendorSeries) != 2 { + t.Errorf("VendorSeries count = %d, want 2", len(im.VendorSeries)) + } + for vid, series := range im.VendorSeries { + if len(series) != 4 { + t.Errorf("VendorSeries[%s] length = %d, want 4", vid, len(series)) + } + } +} + +func TestCollector_GetInstanceMetrics_TrendWithEnoughData(t *testing.T) { + t.Parallel() + c := NewCollector(DefaultCapacity) + t0 := time.Date(2026, 3, 7, 11, 0, 0, 0, time.UTC) + + // Fill 1h of data at 10s intervals + for i := 0; i <= 360; i++ { + c.Record(1, makeSnapshot( + t0.Add(time.Duration(i)*10*time.Second), + float64(i*100), + float64(i*5), + 10, + 0, + )) + } + + im := c.GetInstanceMetrics(1) + if im == nil { + t.Fatal("expected non-nil InstanceMetrics") + } + if im.RPSTrend == nil { + t.Error("expected RPSTrend to be set with 1h of data") + } + if im.ErrorRateTrend == nil { + t.Error("expected ErrorRateTrend to be set with 1h of data") + } +} + +func TestCollector_GetInstanceMetrics_NoTrendWithInsufficientData(t *testing.T) { + t.Parallel() + c := NewCollector(100) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + + for i := 0; i < 5; i++ { + c.Record(1, makeSnapshot( + t0.Add(time.Duration(i)*10*time.Second), + float64(1000+i*100), + float64(50+i*5), + 10, + 0, + )) + } + + im := c.GetInstanceMetrics(1) + if im == nil { + t.Fatal("expected non-nil InstanceMetrics") + } + if im.RPSTrend != nil { + t.Error("expected nil RPSTrend with < 50min of data") + } +} + +func TestCollector_GetFleetMetrics_AggregatesAcrossInstances(t *testing.T) { + t.Parallel() + c := NewCollector(10) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + t1 := t0.Add(10 * time.Second) + + c.Record(1, makeSnapshot(t0, 1000, 50, 10, 1)) + c.Record(1, makeSnapshot(t1, 1100, 55, 12, 2)) + + c.Record(2, makeSnapshot(t0, 2000, 100, 20, 0)) + c.Record(2, makeSnapshot(t1, 2200, 110, 22, 1)) + + fm := c.GetFleetMetrics([]int64{1, 2}) + + // RPS: instance1=10, instance2=20 → 30 + if math.Abs(fm.TotalRPS-30.0) > 0.01 { + t.Errorf("TotalRPS = %v, want ~30", fm.TotalRPS) + } + if fm.TotalActiveConnections != 34 { + t.Errorf("TotalActiveConnections = %v, want 34", fm.TotalActiveConnections) + } + if fm.TotalPanics != 3 { + t.Errorf("TotalPanics = %v, want 3", fm.TotalPanics) + } + if len(fm.Instances) != 2 { + t.Errorf("Instances = %d, want 2", len(fm.Instances)) + } +} + +func TestCollector_GetFleetMetrics_SkipsMissingInstances(t *testing.T) { + t.Parallel() + c := NewCollector(10) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + t1 := t0.Add(10 * time.Second) + + c.Record(1, makeSnapshot(t0, 1000, 50, 10, 0)) + c.Record(1, makeSnapshot(t1, 1100, 55, 12, 0)) + + // Instance 99 has no data + fm := c.GetFleetMetrics([]int64{1, 99}) + + if len(fm.Instances) != 1 { + t.Errorf("Instances = %d, want 1 (skip missing)", len(fm.Instances)) + } +} + +func TestAddHistograms_MismatchedBoundaries_FallsBack(t *testing.T) { + t.Parallel() + a := Histogram{ + Count: 100, + Buckets: []Bucket{{UpperBound: 0.05, Count: 50}, {UpperBound: 0.1, Count: 100}}, + } + b := Histogram{ + Count: 200, + Buckets: []Bucket{{UpperBound: 0.01, Count: 100}, {UpperBound: 0.5, Count: 200}}, + } + result := addHistograms(a, b) + + // Should fall back to b (higher count) instead of merging mismatched buckets. + if result.Count != 200 { + t.Errorf("Count = %v, want 200 (fallback to b)", result.Count) + } + if result.Buckets[0].UpperBound != 0.01 { + t.Errorf("Bucket[0].UpperBound = %v, want 0.01 (b's boundaries)", result.Buckets[0].UpperBound) + } +} + +func TestGetFleetMetrics_CounterReset_DoesNotCorruptErrorRate(t *testing.T) { + t.Parallel() + c := NewCollector(10) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + t1 := t0.Add(10 * time.Second) + + // Instance 1: normal operation + c.Record(1, makeSnapshot(t0, 1000, 50, 10, 0)) + c.Record(1, makeSnapshot(t1, 1100, 55, 12, 0)) + + // Instance 2: counter reset (counters dropped from 2000 to 100) + c.Record(2, makeSnapshot(t0, 2000, 100, 20, 0)) + c.Record(2, makeSnapshot(t1, 100, 5, 22, 0)) + + fm := c.GetFleetMetrics([]int64{1, 2}) + + // Fleet error rate should only reflect instance 1 (instance 2 skipped due to reset). + // Instance 1: 5 errors / 100 requests = 0.05 + if fm.FleetErrorRate < 0 { + t.Errorf("FleetErrorRate = %v, want >= 0 (counter reset should not corrupt)", fm.FleetErrorRate) + } + if math.Abs(fm.FleetErrorRate-0.05) > 0.01 { + t.Errorf("FleetErrorRate = %v, want ~0.05 (only instance 1)", fm.FleetErrorRate) + } +} + +func TestGetFleetMetrics_ErrorRateTrend_Populated(t *testing.T) { + t.Parallel() + c := NewCollector(DefaultCapacity) + t0 := time.Date(2026, 3, 7, 11, 0, 0, 0, time.UTC) + + for i := 0; i <= 360; i++ { + c.Record(1, makeSnapshot( + t0.Add(time.Duration(i)*10*time.Second), + float64(i*100), + float64(i*5), + 10, + 0, + )) + } + + fm := c.GetFleetMetrics([]int64{1}) + if fm.ErrorRateTrend == nil { + t.Error("expected ErrorRateTrend to be set with 1h of data") + } +} + +func TestCollector_GetInstanceSummary_NoData_ReturnsNil(t *testing.T) { + t.Parallel() + c := NewCollector(10) + if got := c.GetInstanceSummary(1); got != nil { + t.Error("expected nil for unknown instance") + } +} + +func TestCollector_GetInstanceSummary_ComputesKPIs(t *testing.T) { + t.Parallel() + c := NewCollector(10) + t0 := time.Date(2026, 3, 7, 12, 0, 0, 0, time.UTC) + t1 := t0.Add(10 * time.Second) + + c.Record(1, makeSnapshot(t0, 1000, 50, 10, 1)) + c.Record(1, makeSnapshot(t1, 1100, 55, 12, 2)) + + s := c.GetInstanceSummary(1) + if s == nil { + t.Fatal("expected non-nil InstanceSummary") + } + if math.Abs(s.RPS-10.0) > 0.01 { + t.Errorf("RPS = %v, want ~10", s.RPS) + } + if s.ActiveConnections != 12 { + t.Errorf("ActiveConnections = %v, want 12", s.ActiveConnections) + } +} diff --git a/admin/metrics/compute.go b/admin/metrics/compute.go new file mode 100644 index 0000000..5f54164 --- /dev/null +++ b/admin/metrics/compute.go @@ -0,0 +1,95 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "math" + "time" +) + +// counterRate computes the per-second rate between two counter values. +// Returns 0 if the counter was reset (curr < prev) or the time delta is zero. +func counterRate(prev, curr float64, dt time.Duration) float64 { + if dt <= 0 || curr < prev { + return 0 + } + return (curr - prev) / dt.Seconds() +} + +// errorRate computes the fraction of errors in the interval. +// Returns 0 if the total delta is zero or a counter reset occurred. +func errorRate(prevTotal, currTotal, prevErrors, currErrors float64) float64 { + totalDelta := currTotal - prevTotal + if totalDelta <= 0 { + return 0 + } + errDelta := currErrors - prevErrors + if errDelta < 0 { + return 0 + } + rate := errDelta / totalDelta + return math.Min(rate, 1) +} + +// histogramQuantile computes the q-th quantile (0 ≤ q ≤ 1) from cumulative +// histogram buckets using linear interpolation — the standard Prometheus method. +// +// Buckets must be sorted by UpperBound with the +Inf bucket excluded. +func histogramQuantile(q float64, h Histogram) float64 { + if h.Count == 0 || len(h.Buckets) == 0 { + return 0 + } + + rank := q * h.Count + var prevCount, prevBound float64 + + for _, b := range h.Buckets { + if b.Count >= rank { + if b.Count == prevCount { + return prevBound + } + fraction := (rank - prevCount) / (b.Count - prevCount) + return prevBound + fraction*(b.UpperBound-prevBound) + } + prevCount = b.Count + prevBound = b.UpperBound + } + + // All observations above the highest finite bucket. + return h.Buckets[len(h.Buckets)-1].UpperBound +} + +// histogramDelta computes the per-interval histogram by subtracting prev from +// curr. On counter reset (curr.Count < prev.Count) it returns curr as-is. +func histogramDelta(prev, curr Histogram) Histogram { + if curr.Count < prev.Count { + return curr + } + + delta := Histogram{ + Count: curr.Count - prev.Count, + Sum: curr.Sum - prev.Sum, + } + + for i, b := range curr.Buckets { + bc := b.Count + if i < len(prev.Buckets) { + bc -= prev.Buckets[i].Count + if bc < 0 { + bc = 0 + } + } + delta.Buckets = append(delta.Buckets, Bucket{ + UpperBound: b.UpperBound, + Count: bc, + }) + } + + return delta +} + +// secondsToMs converts seconds to milliseconds, rounding to 2 decimal places. +func secondsToMs(s float64) float64 { + return math.Round(s*100000) / 100 +} diff --git a/admin/metrics/compute_test.go b/admin/metrics/compute_test.go new file mode 100644 index 0000000..f35b97e --- /dev/null +++ b/admin/metrics/compute_test.go @@ -0,0 +1,176 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "math" + "testing" + "time" +) + +func TestCounterRate_Normal(t *testing.T) { + t.Parallel() + got := counterRate(100, 200, 10*time.Second) + if got != 10.0 { + t.Errorf("counterRate(100, 200, 10s) = %v, want 10", got) + } +} + +func TestCounterRate_Reset_ReturnsZero(t *testing.T) { + t.Parallel() + got := counterRate(200, 50, 10*time.Second) + if got != 0 { + t.Errorf("counterRate(200, 50, 10s) = %v, want 0 (reset)", got) + } +} + +func TestCounterRate_ZeroDuration_ReturnsZero(t *testing.T) { + t.Parallel() + got := counterRate(100, 200, 0) + if got != 0 { + t.Errorf("counterRate(100, 200, 0) = %v, want 0", got) + } +} + +func TestErrorRate_Normal(t *testing.T) { + t.Parallel() + // 100 total requests, 10 errors → 10% + got := errorRate(900, 1000, 40, 50) + if math.Abs(got-0.1) > 0.001 { + t.Errorf("errorRate(900,1000,40,50) = %v, want ~0.1", got) + } +} + +func TestErrorRate_ZeroTotal_ReturnsZero(t *testing.T) { + t.Parallel() + got := errorRate(100, 100, 5, 5) + if got != 0 { + t.Errorf("errorRate with zero total delta = %v, want 0", got) + } +} + +func TestErrorRate_Reset_ReturnsZero(t *testing.T) { + t.Parallel() + got := errorRate(100, 50, 10, 5) + if got != 0 { + t.Errorf("errorRate with counter reset = %v, want 0", got) + } +} + +func TestErrorRate_CappedAtOne(t *testing.T) { + t.Parallel() + // Pathological case: error delta > total delta + got := errorRate(0, 10, 0, 20) + if got != 1 { + t.Errorf("errorRate capped = %v, want 1", got) + } +} + +func TestHistogramQuantile_P50(t *testing.T) { + t.Parallel() + h := Histogram{ + Count: 1000, + Buckets: []Bucket{ + {UpperBound: 0.05, Count: 200}, + {UpperBound: 0.1, Count: 500}, + {UpperBound: 0.25, Count: 800}, + {UpperBound: 0.5, Count: 950}, + {UpperBound: 1.0, Count: 1000}, + }, + } + // rank = 0.5 * 1000 = 500 + // Falls in bucket [0.05, 0.1] at count 500 exactly → boundary + got := histogramQuantile(0.50, h) + if got != 0.1 { + t.Errorf("p50 = %v, want 0.1", got) + } +} + +func TestHistogramQuantile_P99(t *testing.T) { + t.Parallel() + h := Histogram{ + Count: 1000, + Buckets: []Bucket{ + {UpperBound: 0.05, Count: 200}, + {UpperBound: 0.1, Count: 500}, + {UpperBound: 0.25, Count: 800}, + {UpperBound: 0.5, Count: 950}, + {UpperBound: 1.0, Count: 1000}, + }, + } + // rank = 0.99 * 1000 = 990 + // Falls in bucket [0.5, 1.0]: prevCount=950, count=1000 + // fraction = (990 - 950) / (1000 - 950) = 40/50 = 0.8 + // result = 0.5 + 0.8 * (1.0 - 0.5) = 0.5 + 0.4 = 0.9 + got := histogramQuantile(0.99, h) + if math.Abs(got-0.9) > 0.001 { + t.Errorf("p99 = %v, want ~0.9", got) + } +} + +func TestHistogramQuantile_Empty_ReturnsZero(t *testing.T) { + t.Parallel() + got := histogramQuantile(0.5, Histogram{}) + if got != 0 { + t.Errorf("quantile of empty histogram = %v, want 0", got) + } +} + +func TestHistogramQuantile_AllAboveHighestBucket(t *testing.T) { + t.Parallel() + h := Histogram{ + Count: 100, + Buckets: []Bucket{{UpperBound: 0.1, Count: 0}}, + } + got := histogramQuantile(0.5, h) + // All 100 observations are above the only bucket → return bucket upper bound + if got != 0.1 { + t.Errorf("quantile above all buckets = %v, want 0.1", got) + } +} + +func TestHistogramDelta_Normal(t *testing.T) { + t.Parallel() + prev := Histogram{ + Count: 100, Sum: 10, + Buckets: []Bucket{{UpperBound: 0.1, Count: 50}, {UpperBound: 0.5, Count: 100}}, + } + curr := Histogram{ + Count: 200, Sum: 25, + Buckets: []Bucket{{UpperBound: 0.1, Count: 80}, {UpperBound: 0.5, Count: 200}}, + } + + d := histogramDelta(prev, curr) + if d.Count != 100 { + t.Errorf("delta Count = %v, want 100", d.Count) + } + if d.Sum != 15 { + t.Errorf("delta Sum = %v, want 15", d.Sum) + } + if d.Buckets[0].Count != 30 { + t.Errorf("delta bucket[0] Count = %v, want 30", d.Buckets[0].Count) + } + if d.Buckets[1].Count != 100 { + t.Errorf("delta bucket[1] Count = %v, want 100", d.Buckets[1].Count) + } +} + +func TestHistogramDelta_Reset_ReturnsCurr(t *testing.T) { + t.Parallel() + prev := Histogram{Count: 200, Buckets: []Bucket{{UpperBound: 0.1, Count: 200}}} + curr := Histogram{Count: 50, Buckets: []Bucket{{UpperBound: 0.1, Count: 50}}} + + d := histogramDelta(prev, curr) + if d.Count != 50 { + t.Errorf("delta after reset Count = %v, want 50 (curr)", d.Count) + } +} + +func TestSecondsToMs(t *testing.T) { + t.Parallel() + got := secondsToMs(0.123) + if math.Abs(got-123.0) > 0.01 { + t.Errorf("secondsToMs(0.123) = %v, want ~123", got) + } +} diff --git a/admin/metrics/metrics.go b/admin/metrics/metrics.go new file mode 100644 index 0000000..243a68d --- /dev/null +++ b/admin/metrics/metrics.go @@ -0,0 +1,156 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +// Package metrics parses Prometheus metrics from Chaperone proxy instances, +// stores them in ring buffers, and computes rates and percentiles. +package metrics + +import "time" + +// DefaultCapacity is the number of scrape snapshots retained per instance. +// At 10s intervals this gives ~1 hour of history. +const DefaultCapacity = 360 + +// Prometheus metric names emitted by the Chaperone proxy. +const ( + metricRequestsTotal = "chaperone_requests_total" + metricDurationSeconds = "chaperone_request_duration_seconds" + metricActiveConns = "chaperone_active_connections" + metricPanicsTotal = "chaperone_panics_total" + + labelVendorID = "vendor_id" + labelStatusClass = "status_class" +) + +// Snapshot holds parsed metrics from a single scrape of one proxy instance. +type Snapshot struct { + Time time.Time + Vendors map[string]*VendorSnapshot + ActiveConnections float64 + PanicsTotal float64 +} + +// VendorSnapshot holds per-vendor counters and histogram for a single scrape. +type VendorSnapshot struct { + RequestsTotal float64 + RequestsErrors float64 // 4xx + 5xx + Duration Histogram +} + +// Histogram holds cumulative histogram bucket data. +type Histogram struct { + Buckets []Bucket + Count float64 + Sum float64 +} + +// Bucket is a single cumulative histogram bucket. +type Bucket struct { + UpperBound float64 + Count float64 // cumulative count of observations <= UpperBound +} + +// vendorOrCreate returns the VendorSnapshot for the given vendor, creating it if needed. +func (s *Snapshot) vendorOrCreate(id string) *VendorSnapshot { + if id == "" { + id = "unknown" + } + vs, ok := s.Vendors[id] + if !ok { + vs = &VendorSnapshot{} + s.Vendors[id] = vs + } + return vs +} + +// totalRequests returns the sum of RequestsTotal across all vendors. +func (s *Snapshot) totalRequests() float64 { + var total float64 + for _, vs := range s.Vendors { + total += vs.RequestsTotal + } + return total +} + +// totalErrors returns the sum of RequestsErrors across all vendors. +func (s *Snapshot) totalErrors() float64 { + var total float64 + for _, vs := range s.Vendors { + total += vs.RequestsErrors + } + return total +} + +// --- API response types --- + +// InstanceMetrics is returned by GET /api/metrics/{id}. +type InstanceMetrics struct { + InstanceID int64 `json:"instance_id"` + CollectedAt time.Time `json:"collected_at"` + DataPoints int `json:"data_points"` + RPS float64 `json:"rps"` + ErrorRate float64 `json:"error_rate"` + ActiveConnections float64 `json:"active_connections"` + PanicsTotal float64 `json:"panics_total"` + P50 float64 `json:"p50_ms"` + P95 float64 `json:"p95_ms"` + P99 float64 `json:"p99_ms"` + RPSTrend *float64 `json:"rps_trend"` + ErrorRateTrend *float64 `json:"error_rate_trend"` + Vendors []VendorMetrics `json:"vendors"` + Series []SeriesPoint `json:"series"` + VendorSeries map[string][]VendorSeriesPoint `json:"vendor_series"` +} + +// FleetMetrics is returned by GET /api/metrics/fleet. +type FleetMetrics struct { + CollectedAt time.Time `json:"collected_at"` + TotalRPS float64 `json:"total_rps"` + FleetErrorRate float64 `json:"fleet_error_rate"` + TotalActiveConnections float64 `json:"total_active_connections"` + TotalPanics float64 `json:"total_panics"` + RPSTrend *float64 `json:"rps_trend"` + ErrorRateTrend *float64 `json:"error_rate_trend"` + Instances []InstanceSummary `json:"instances"` +} + +// InstanceSummary is a compact per-instance overview for the fleet endpoint. +type InstanceSummary struct { + InstanceID int64 `json:"instance_id"` + RPS float64 `json:"rps"` + ErrorRate float64 `json:"error_rate"` + ActiveConnections float64 `json:"active_connections"` + PanicsTotal float64 `json:"panics_total"` + P99 float64 `json:"p99_ms"` +} + +// VendorMetrics holds current per-vendor KPIs. +type VendorMetrics struct { + VendorID string `json:"vendor_id"` + RPS float64 `json:"rps"` + ErrorRate float64 `json:"error_rate"` + P50 float64 `json:"p50_ms"` + P95 float64 `json:"p95_ms"` + P99 float64 `json:"p99_ms"` +} + +// SeriesPoint is one data point in a global time series. +type SeriesPoint struct { + Time int64 `json:"t"` + RPS float64 `json:"rps"` + ErrorRate float64 `json:"err"` + P50 float64 `json:"p50"` + P95 float64 `json:"p95"` + P99 float64 `json:"p99"` + ActiveConnections float64 `json:"conn"` +} + +// VendorSeriesPoint is one data point in a per-vendor time series. +type VendorSeriesPoint struct { + Time int64 `json:"t"` + RPS float64 `json:"rps"` + ErrorRate float64 `json:"err"` + P50 float64 `json:"p50"` + P95 float64 `json:"p95"` + P99 float64 `json:"p99"` +} diff --git a/admin/metrics/parse.go b/admin/metrics/parse.go new file mode 100644 index 0000000..7ed4160 --- /dev/null +++ b/admin/metrics/parse.go @@ -0,0 +1,117 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "bytes" + "fmt" + "math" + "sort" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" +) + +// Parse parses Prometheus text exposition format data into a Snapshot. +// Only Chaperone-specific metrics are extracted; unknown metrics are ignored. +func Parse(data []byte, t time.Time) (*Snapshot, error) { + parser := expfmt.NewTextParser(model.LegacyValidation) + families, err := parser.TextToMetricFamilies(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("parsing prometheus text: %w", err) + } + + snap := &Snapshot{ + Time: t, + Vendors: make(map[string]*VendorSnapshot), + } + + for name, family := range families { + switch name { + case metricRequestsTotal: + parseRequests(family, snap) + case metricDurationSeconds: + parseDuration(family, snap) + case metricActiveConns: + parseGauge(family, &snap.ActiveConnections) + case metricPanicsTotal: + parseCounter(family, &snap.PanicsTotal) + } + } + + return snap, nil +} + +// parseRequests extracts chaperone_requests_total counters, summing across +// methods but preserving vendor_id and status_class dimensions. +func parseRequests(family *dto.MetricFamily, snap *Snapshot) { + for _, m := range family.GetMetric() { + vendorID := labelValue(m.GetLabel(), labelVendorID) + statusClass := labelValue(m.GetLabel(), labelStatusClass) + value := m.GetCounter().GetValue() + + vs := snap.vendorOrCreate(vendorID) + vs.RequestsTotal += value + if statusClass == "4xx" || statusClass == "5xx" { + vs.RequestsErrors += value + } + } +} + +// parseDuration extracts chaperone_request_duration_seconds histograms per vendor. +func parseDuration(family *dto.MetricFamily, snap *Snapshot) { + for _, m := range family.GetMetric() { + vendorID := labelValue(m.GetLabel(), labelVendorID) + h := m.GetHistogram() + if h == nil { + continue + } + + hist := Histogram{ + Count: float64(h.GetSampleCount()), + Sum: h.GetSampleSum(), + } + for _, b := range h.GetBucket() { + hist.Buckets = append(hist.Buckets, Bucket{ + UpperBound: b.GetUpperBound(), + Count: float64(b.GetCumulativeCount()), + }) + } + sort.Slice(hist.Buckets, func(i, j int) bool { + return hist.Buckets[i].UpperBound < hist.Buckets[j].UpperBound + }) + // Strip the +Inf bucket — we use Count for that. + if n := len(hist.Buckets); n > 0 && math.IsInf(hist.Buckets[n-1].UpperBound, 1) { + hist.Buckets = hist.Buckets[:n-1] + } + + snap.vendorOrCreate(vendorID).Duration = hist + } +} + +// parseGauge extracts a single gauge value (first metric in the family). +func parseGauge(family *dto.MetricFamily, dst *float64) { + if ms := family.GetMetric(); len(ms) > 0 { + *dst = ms[0].GetGauge().GetValue() + } +} + +// parseCounter extracts a single counter value (first metric in the family). +func parseCounter(family *dto.MetricFamily, dst *float64) { + if ms := family.GetMetric(); len(ms) > 0 { + *dst = ms[0].GetCounter().GetValue() + } +} + +// labelValue returns the value for the named label, or "" if not found. +func labelValue(labels []*dto.LabelPair, name string) string { + for _, l := range labels { + if l.GetName() == name { + return l.GetValue() + } + } + return "" +} diff --git a/admin/metrics/parse_test.go b/admin/metrics/parse_test.go new file mode 100644 index 0000000..8487d52 --- /dev/null +++ b/admin/metrics/parse_test.go @@ -0,0 +1,174 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "testing" + "time" +) + +const sampleMetrics = `# HELP chaperone_requests_total Total number of requests processed +# TYPE chaperone_requests_total counter +chaperone_requests_total{vendor_id="acme",status_class="2xx",method="GET"} 1500 +chaperone_requests_total{vendor_id="acme",status_class="2xx",method="POST"} 300 +chaperone_requests_total{vendor_id="acme",status_class="4xx",method="GET"} 50 +chaperone_requests_total{vendor_id="acme",status_class="5xx",method="POST"} 5 +chaperone_requests_total{vendor_id="beta",status_class="2xx",method="GET"} 800 +chaperone_requests_total{vendor_id="beta",status_class="4xx",method="GET"} 20 +# HELP chaperone_panics_total Total number of recovered panics +# TYPE chaperone_panics_total counter +chaperone_panics_total 2 +# HELP chaperone_request_duration_seconds Total request duration +# TYPE chaperone_request_duration_seconds histogram +chaperone_request_duration_seconds_bucket{vendor_id="acme",le="0.05"} 600 +chaperone_request_duration_seconds_bucket{vendor_id="acme",le="0.1"} 1000 +chaperone_request_duration_seconds_bucket{vendor_id="acme",le="0.25"} 1500 +chaperone_request_duration_seconds_bucket{vendor_id="acme",le="0.5"} 1700 +chaperone_request_duration_seconds_bucket{vendor_id="acme",le="1"} 1800 +chaperone_request_duration_seconds_bucket{vendor_id="acme",le="+Inf"} 1855 +chaperone_request_duration_seconds_sum{vendor_id="acme"} 250.5 +chaperone_request_duration_seconds_count{vendor_id="acme"} 1855 +chaperone_request_duration_seconds_bucket{vendor_id="beta",le="0.05"} 400 +chaperone_request_duration_seconds_bucket{vendor_id="beta",le="0.1"} 700 +chaperone_request_duration_seconds_bucket{vendor_id="beta",le="0.25"} 780 +chaperone_request_duration_seconds_bucket{vendor_id="beta",le="0.5"} 800 +chaperone_request_duration_seconds_bucket{vendor_id="beta",le="1"} 810 +chaperone_request_duration_seconds_bucket{vendor_id="beta",le="+Inf"} 820 +chaperone_request_duration_seconds_sum{vendor_id="beta"} 80.0 +chaperone_request_duration_seconds_count{vendor_id="beta"} 820 +# HELP chaperone_active_connections Number of active connections +# TYPE chaperone_active_connections gauge +chaperone_active_connections 15 +` + +func TestParse_FullSample_ExtractsAllMetrics(t *testing.T) { + t.Parallel() + now := time.Now() + + snap, err := Parse([]byte(sampleMetrics), now) + if err != nil { + t.Fatalf("Parse() error = %v", err) + } + + if snap.Time != now { + t.Errorf("Time = %v, want %v", snap.Time, now) + } + if snap.ActiveConnections != 15 { + t.Errorf("ActiveConnections = %v, want 15", snap.ActiveConnections) + } + if snap.PanicsTotal != 2 { + t.Errorf("PanicsTotal = %v, want 2", snap.PanicsTotal) + } + if len(snap.Vendors) != 2 { + t.Fatalf("Vendors count = %d, want 2", len(snap.Vendors)) + } +} + +func TestParse_Requests_SumsAcrossMethods(t *testing.T) { + t.Parallel() + + snap, err := Parse([]byte(sampleMetrics), time.Now()) + if err != nil { + t.Fatalf("Parse() error = %v", err) + } + + acme := snap.Vendors["acme"] + if acme == nil { + t.Fatal("missing vendor 'acme'") + } + // GET 1500 + POST 300 (2xx) + GET 50 (4xx) + POST 5 (5xx) = 1855 + if acme.RequestsTotal != 1855 { + t.Errorf("acme.RequestsTotal = %v, want 1855", acme.RequestsTotal) + } + // 4xx: 50 + 5xx: 5 = 55 + if acme.RequestsErrors != 55 { + t.Errorf("acme.RequestsErrors = %v, want 55", acme.RequestsErrors) + } + + beta := snap.Vendors["beta"] + if beta == nil { + t.Fatal("missing vendor 'beta'") + } + // GET 800 (2xx) + GET 20 (4xx) = 820 + if beta.RequestsTotal != 820 { + t.Errorf("beta.RequestsTotal = %v, want 820", beta.RequestsTotal) + } + if beta.RequestsErrors != 20 { + t.Errorf("beta.RequestsErrors = %v, want 20", beta.RequestsErrors) + } +} + +func TestParse_Histogram_ParsesBucketsCorrectly(t *testing.T) { + t.Parallel() + + snap, err := Parse([]byte(sampleMetrics), time.Now()) + if err != nil { + t.Fatalf("Parse() error = %v", err) + } + + acme := snap.Vendors["acme"] + h := acme.Duration + if h.Count != 1855 { + t.Errorf("acme histogram Count = %v, want 1855", h.Count) + } + if h.Sum != 250.5 { + t.Errorf("acme histogram Sum = %v, want 250.5", h.Sum) + } + // +Inf bucket should be stripped + if len(h.Buckets) != 5 { + t.Fatalf("acme histogram Buckets = %d, want 5", len(h.Buckets)) + } + if h.Buckets[0].UpperBound != 0.05 { + t.Errorf("first bucket UpperBound = %v, want 0.05", h.Buckets[0].UpperBound) + } + if h.Buckets[0].Count != 600 { + t.Errorf("first bucket Count = %v, want 600", h.Buckets[0].Count) + } +} + +func TestParse_EmptyInput_ReturnsEmptySnapshot(t *testing.T) { + t.Parallel() + + snap, err := Parse([]byte(""), time.Now()) + if err != nil { + t.Fatalf("Parse() error = %v", err) + } + if len(snap.Vendors) != 0 { + t.Errorf("Vendors count = %d, want 0", len(snap.Vendors)) + } + if snap.ActiveConnections != 0 { + t.Errorf("ActiveConnections = %v, want 0", snap.ActiveConnections) + } +} + +func TestParse_UnknownMetrics_Ignored(t *testing.T) { + t.Parallel() + input := `# HELP custom_metric A custom metric +# TYPE custom_metric gauge +custom_metric 42 +# HELP chaperone_active_connections Number of active connections +# TYPE chaperone_active_connections gauge +chaperone_active_connections 7 +` + snap, err := Parse([]byte(input), time.Now()) + if err != nil { + t.Fatalf("Parse() error = %v", err) + } + if snap.ActiveConnections != 7 { + t.Errorf("ActiveConnections = %v, want 7", snap.ActiveConnections) + } +} + +func TestParse_MalformedInput_ReturnsError(t *testing.T) { + t.Parallel() + // Invalid Prometheus format: duplicate TYPE declaration + input := `# TYPE chaperone_active_connections gauge +# TYPE chaperone_active_connections counter +chaperone_active_connections 7 +` + _, err := Parse([]byte(input), time.Now()) + if err == nil { + t.Error("expected error for malformed input, got nil") + } +} diff --git a/admin/metrics/ring.go b/admin/metrics/ring.go new file mode 100644 index 0000000..7e1e468 --- /dev/null +++ b/admin/metrics/ring.go @@ -0,0 +1,48 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +// Ring is a fixed-capacity circular buffer of Snapshots. +// It is not safe for concurrent use; the Collector handles synchronization. +type Ring struct { + data []Snapshot + write int // next write position + count int // number of stored snapshots +} + +// NewRing creates a Ring with the given capacity. +func NewRing(capacity int) *Ring { + return &Ring{data: make([]Snapshot, capacity)} +} + +// Push adds a snapshot, overwriting the oldest if at capacity. +func (r *Ring) Push(s Snapshot) { + r.data[r.write] = s + r.write = (r.write + 1) % len(r.data) + if r.count < len(r.data) { + r.count++ + } +} + +// Len returns the number of stored snapshots. +func (r *Ring) Len() int { return r.count } + +// At returns the i-th snapshot where 0 is the oldest. +// Panics if i is out of range. +func (r *Ring) At(i int) Snapshot { + if i < 0 || i >= r.count { + panic("ring: index out of range") + } + start := (r.write - r.count + len(r.data)) % len(r.data) + return r.data[(start+i)%len(r.data)] +} + +// Last returns the most recent snapshot and true, or a zero Snapshot and false +// if the ring is empty. +func (r *Ring) Last() (Snapshot, bool) { + if r.count == 0 { + return Snapshot{}, false + } + return r.At(r.count - 1), true +} diff --git a/admin/metrics/ring_test.go b/admin/metrics/ring_test.go new file mode 100644 index 0000000..a24848f --- /dev/null +++ b/admin/metrics/ring_test.go @@ -0,0 +1,95 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + "testing" + "time" +) + +func TestRing_PushAndLen(t *testing.T) { + t.Parallel() + r := NewRing(5) + + if r.Len() != 0 { + t.Fatalf("Len() = %d, want 0", r.Len()) + } + + for i := 0; i < 3; i++ { + r.Push(Snapshot{Time: time.Unix(int64(i), 0)}) + } + if r.Len() != 3 { + t.Fatalf("Len() = %d, want 3", r.Len()) + } +} + +func TestRing_At_ReturnsOldestFirst(t *testing.T) { + t.Parallel() + r := NewRing(5) + + for i := 0; i < 3; i++ { + r.Push(Snapshot{Time: time.Unix(int64(i+10), 0)}) + } + + if got := r.At(0).Time.Unix(); got != 10 { + t.Errorf("At(0).Time = %d, want 10", got) + } + if got := r.At(2).Time.Unix(); got != 12 { + t.Errorf("At(2).Time = %d, want 12", got) + } +} + +func TestRing_Wraparound_OverwritesOldest(t *testing.T) { + t.Parallel() + r := NewRing(3) + + for i := 0; i < 5; i++ { + r.Push(Snapshot{Time: time.Unix(int64(i), 0)}) + } + + if r.Len() != 3 { + t.Fatalf("Len() = %d, want 3", r.Len()) + } + // After pushing 0,1,2,3,4 into capacity 3, oldest should be 2. + if got := r.At(0).Time.Unix(); got != 2 { + t.Errorf("At(0).Time = %d, want 2 (oldest after wrap)", got) + } + if got := r.At(2).Time.Unix(); got != 4 { + t.Errorf("At(2).Time = %d, want 4 (newest)", got) + } +} + +func TestRing_Last_ReturnsNewest(t *testing.T) { + t.Parallel() + r := NewRing(5) + + _, ok := r.Last() + if ok { + t.Error("Last() on empty ring should return false") + } + + r.Push(Snapshot{Time: time.Unix(100, 0)}) + r.Push(Snapshot{Time: time.Unix(200, 0)}) + + last, ok := r.Last() + if !ok { + t.Fatal("Last() returned false on non-empty ring") + } + if last.Time.Unix() != 200 { + t.Errorf("Last().Time = %d, want 200", last.Time.Unix()) + } +} + +func TestRing_At_PanicsOnOutOfRange(t *testing.T) { + t.Parallel() + r := NewRing(3) + r.Push(Snapshot{}) + + defer func() { + if recover() == nil { + t.Error("expected panic for out of range index") + } + }() + r.At(1) // only 1 element, index 1 is out of range +} diff --git a/admin/poller/poller.go b/admin/poller/poller.go new file mode 100644 index 0000000..68d6e64 --- /dev/null +++ b/admin/poller/poller.go @@ -0,0 +1,312 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package poller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "math/rand/v2" + "net" + "net/http" + "sync" + "time" + + "github.com/cloudblue/chaperone/admin/metrics" + "github.com/cloudblue/chaperone/admin/store" +) + +const ( + failuresUntilUnreachable = 3 + maxJitter = time.Second +) + +// ProbeResult holds the outcome of a single proxy probe. +type ProbeResult struct { + OK bool `json:"ok"` + Health string `json:"health,omitempty"` + Version string `json:"version,omitempty"` + Error string `json:"error,omitempty"` +} + +// Probe performs a one-off health and version check against a proxy admin port. +func Probe(ctx context.Context, client *http.Client, address string) ProbeResult { + health, err := fetchHealth(ctx, client, address) + if err != nil { + return ProbeResult{OK: false, Error: friendlyError(err)} + } + + version, err := fetchVersion(ctx, client, address) + if err != nil { + return ProbeResult{OK: false, Error: friendlyError(err)} + } + + return ProbeResult{OK: true, Health: health, Version: version} +} + +// Poller periodically polls registered proxy instances for health and version. +type Poller struct { + store *store.Store + collector *metrics.Collector + client *http.Client + interval time.Duration + timeout time.Duration + + mu sync.Mutex + failures map[int64]int // instance ID → consecutive failure count +} + +// New creates a Poller with the given configuration. +// If collector is non-nil, each successful poll also scrapes /metrics. +func New(st *store.Store, collector *metrics.Collector, interval, timeout time.Duration) *Poller { + return &Poller{ + store: st, + collector: collector, + client: &http.Client{Timeout: timeout}, + interval: interval, + timeout: timeout, + failures: make(map[int64]int), + } +} + +// Run starts the polling loop. It blocks until the context is cancelled. +func (p *Poller) Run(ctx context.Context) { + slog.Info("poller started", "interval", p.interval, "timeout", p.timeout) + + // Run an immediate first poll, then tick on interval. + p.pollAll(ctx) + + ticker := time.NewTicker(p.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + slog.Info("poller stopped") + return + case <-ticker.C: + p.pollAll(ctx) + } + } +} + +func (p *Poller) pollAll(ctx context.Context) { + instances, err := p.store.ListInstances(ctx) + if err != nil { + slog.Error("poller: listing instances", "error", err) + return + } + // Prune failure counts and stale metric buffers. + p.pruneFailures(instances) + p.pruneCollector(instances) + + if len(instances) == 0 { + return + } + + type result struct { + id int64 + probe ProbeResult + metrics []byte // raw /metrics text, nil if unavailable + } + + results := make(chan result, len(instances)) + var wg sync.WaitGroup + + for i := range instances { + inst := &instances[i] + wg.Add(1) + go func() { + defer wg.Done() + // Jitter: ±1s random offset to spread scrapes. + jitter := time.Duration(rand.Int64N(int64(2*maxJitter))) - maxJitter // #nosec G404 -- jitter doesn't need cryptographic randomness //nolint:gosec + sleep(ctx, jitter) + + pr := Probe(ctx, p.client, inst.Address) + var raw []byte + if pr.OK && p.collector != nil { + raw = fetchMetrics(ctx, p.client, inst.Address) + } + results <- result{id: inst.ID, probe: pr, metrics: raw} + }() + } + + go func() { + wg.Wait() + close(results) + }() + + now := time.Now() + for r := range results { + p.applyResult(ctx, r.id, r.probe) + if r.metrics != nil { + if err := p.collector.RecordScrape(r.id, r.metrics, now); err != nil { + slog.Warn("poller: parsing metrics", "id", r.id, "error", err) + } + } + } +} + +func (p *Poller) pruneFailures(active []store.Instance) { + p.mu.Lock() + defer p.mu.Unlock() + + for id := range p.failures { + found := false + for j := range active { + if active[j].ID == id { + found = true + break + } + } + if !found { + delete(p.failures, id) + } + } +} + +func (p *Poller) applyResult(ctx context.Context, id int64, pr ProbeResult) { + p.mu.Lock() + defer p.mu.Unlock() + + if pr.OK { + p.failures[id] = 0 + if err := p.store.SetInstanceHealthy(ctx, id, pr.Version); err != nil { + slog.Error("poller: setting instance healthy", "id", id, "error", err) + } + return + } + + p.failures[id]++ + count := p.failures[id] + slog.Debug("poller: probe failed", "id", id, "consecutive_failures", count, "error", pr.Error) + + if count >= failuresUntilUnreachable { + if err := p.store.SetInstanceUnreachable(ctx, id); err != nil { + slog.Error("poller: setting instance unreachable", "id", id, "error", err) + } + } +} + +// fetchHealth calls GET /_ops/health and returns the status field. +func fetchHealth(ctx context.Context, client *http.Client, address string) (string, error) { + url := fmt.Sprintf("http://%s/_ops/health", address) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return "", fmt.Errorf("creating health request: %w", err) + } + + resp, err := client.Do(req) // #nosec G704 -- address comes from admin-managed instance registry + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("health endpoint returned %d", resp.StatusCode) + } + + var body struct { + Status string `json:"status"` + } + if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { + return "", fmt.Errorf("decoding health response: %w", err) + } + return body.Status, nil +} + +// fetchVersion calls GET /_ops/version and returns the version field. +func fetchVersion(ctx context.Context, client *http.Client, address string) (string, error) { + url := fmt.Sprintf("http://%s/_ops/version", address) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return "", fmt.Errorf("creating version request: %w", err) + } + + resp, err := client.Do(req) // #nosec G704 -- address comes from admin-managed instance registry + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("version endpoint returned %d", resp.StatusCode) + } + + var body struct { + Version string `json:"version"` + } + if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { + return "", fmt.Errorf("decoding version response: %w", err) + } + return body.Version, nil +} + +func (p *Poller) pruneCollector(active []store.Instance) { + if p.collector == nil { + return + } + ids := make(map[int64]bool, len(active)) + for i := range active { + ids[active[i].ID] = true + } + p.collector.Prune(ids) +} + +// fetchMetrics calls GET /metrics on a proxy admin port and returns the raw body. +func fetchMetrics(ctx context.Context, client *http.Client, address string) []byte { + url := fmt.Sprintf("http://%s/metrics", address) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return nil + } + + resp, err := client.Do(req) // #nosec G704 -- address comes from admin-managed instance registry + if err != nil { + return nil + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil + } + + data, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) // 1 MB limit + if err != nil { + return nil + } + return data +} + +// friendlyError converts network errors into user-facing messages. +func friendlyError(err error) string { + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + return "Connection timed out. Check that the proxy is running and the address is correct." + } + + var opErr *net.OpError + if errors.As(err, &opErr) { + return fmt.Sprintf("Connection failed: %s. The proxy admin server may be bound to localhost only; check admin_addr in the proxy configuration.", opErr.Err) + } + + return fmt.Sprintf("Connection failed: %s", err) +} + +// sleep waits for the given duration or until the context is cancelled. +// Negative durations return immediately. +func sleep(ctx context.Context, d time.Duration) { + if d <= 0 { + return + } + t := time.NewTimer(d) + defer t.Stop() + select { + case <-ctx.Done(): + case <-t.C: + } +} diff --git a/admin/poller/poller_test.go b/admin/poller/poller_test.go new file mode 100644 index 0000000..84ba627 --- /dev/null +++ b/admin/poller/poller_test.go @@ -0,0 +1,377 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package poller + +import ( + "context" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/cloudblue/chaperone/admin/metrics" + "github.com/cloudblue/chaperone/admin/store" +) + +func openTestStore(t *testing.T) *store.Store { + t.Helper() + dbPath := filepath.Join(t.TempDir(), "test.db") + st, err := store.Open(context.Background(), dbPath) + if err != nil { + t.Fatalf("Open(%q) failed: %v", dbPath, err) + } + t.Cleanup(func() { st.Close() }) + return st +} + +func fakeProxy(t *testing.T) *httptest.Server { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.URL.Path { + case "/_ops/health": + w.Write([]byte(`{"status":"alive"}`)) + case "/_ops/version": + w.Write([]byte(`{"version":"1.0.0"}`)) + default: + http.NotFound(w, r) + } + })) + t.Cleanup(srv.Close) + return srv +} + +const sampleMetrics = `# HELP chaperone_requests_total Total number of requests processed +# TYPE chaperone_requests_total counter +chaperone_requests_total{vendor_id="acme",status_class="2xx",method="GET"} 1000 +# HELP chaperone_active_connections Number of active connections +# TYPE chaperone_active_connections gauge +chaperone_active_connections 5 +` + +func fakeProxyWithMetrics(t *testing.T) *httptest.Server { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_ops/health": + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"status":"alive"}`)) + case "/_ops/version": + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"version":"1.0.0"}`)) + case "/metrics": + w.Header().Set("Content-Type", "text/plain") + w.Write([]byte(sampleMetrics)) + default: + http.NotFound(w, r) + } + })) + t.Cleanup(srv.Close) + return srv +} + +func TestProbe_HealthyProxy_ReturnsOK(t *testing.T) { + t.Parallel() + proxy := fakeProxy(t) + addr := strings.TrimPrefix(proxy.URL, "http://") + + result := Probe(context.Background(), &http.Client{Timeout: 2 * time.Second}, addr) + + if !result.OK { + t.Fatalf("expected OK=true, got error: %s", result.Error) + } + if result.Health != "alive" { + t.Errorf("Health = %q, want %q", result.Health, "alive") + } + if result.Version != "1.0.0" { + t.Errorf("Version = %q, want %q", result.Version, "1.0.0") + } +} + +func TestProbe_UnreachableAddress_ReturnsError(t *testing.T) { + t.Parallel() + + result := Probe(context.Background(), &http.Client{Timeout: 1 * time.Second}, "127.0.0.1:1") + + if result.OK { + t.Error("expected OK=false for unreachable address") + } + if result.Error == "" { + t.Error("expected non-empty error") + } +} + +func TestProbe_HealthEndpointError_ReturnsError(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer srv.Close() + addr := strings.TrimPrefix(srv.URL, "http://") + + result := Probe(context.Background(), &http.Client{Timeout: 2 * time.Second}, addr) + + if result.OK { + t.Error("expected OK=false for error status") + } +} + +func TestPoller_SinglePoll_SetsHealthy(t *testing.T) { + t.Parallel() + st := openTestStore(t) + proxy := fakeProxy(t) + addr := strings.TrimPrefix(proxy.URL, "http://") + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", addr) + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, nil, 1*time.Hour, 2*time.Second) // long interval; we call pollAll manually. + p.pollAll(ctx) + + got, err := st.GetInstance(ctx, inst.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Status != "healthy" { + t.Errorf("Status = %q, want %q", got.Status, "healthy") + } + if got.Version != "1.0.0" { + t.Errorf("Version = %q, want %q", got.Version, "1.0.0") + } +} + +func TestPoller_ThreeFailures_SetsUnreachable(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", "127.0.0.1:1") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, nil, 1*time.Hour, 500*time.Millisecond) + + // Poll 3 times to reach unreachable threshold. + for i := 0; i < failuresUntilUnreachable; i++ { + p.pollAll(ctx) + } + + got, err := st.GetInstance(ctx, inst.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Status != "unreachable" { + t.Errorf("Status = %q, want %q after %d failures", got.Status, "unreachable", failuresUntilUnreachable) + } +} + +func TestPoller_TwoFailures_StaysUnknown(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", "127.0.0.1:1") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, nil, 1*time.Hour, 500*time.Millisecond) + + // Poll only twice — should not yet transition to unreachable. + for i := 0; i < failuresUntilUnreachable-1; i++ { + p.pollAll(ctx) + } + + got, err := st.GetInstance(ctx, inst.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Status != "unknown" { + t.Errorf("Status = %q, want %q after %d failures", got.Status, "unknown", failuresUntilUnreachable-1) + } +} + +func TestPoller_RecoveryAfterUnreachable_SetsHealthy(t *testing.T) { + t.Parallel() + st := openTestStore(t) + proxy := fakeProxy(t) + addr := strings.TrimPrefix(proxy.URL, "http://") + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", "127.0.0.1:1") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, nil, 1*time.Hour, 500*time.Millisecond) + + // Drive to unreachable. + for i := 0; i < failuresUntilUnreachable; i++ { + p.pollAll(ctx) + } + + // Now point instance to the live proxy. + _, updateErr := st.UpdateInstance(ctx, inst.ID, "test-proxy", addr) + if updateErr != nil { + t.Fatalf("UpdateInstance() error = %v", updateErr) + } + + // Single success should recover. + p.pollAll(ctx) + + got, err := st.GetInstance(ctx, inst.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Status != "healthy" { + t.Errorf("Status = %q, want %q after recovery", got.Status, "healthy") + } +} + +func TestPoller_DeletedInstance_PrunesFailures(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", "127.0.0.1:1") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, nil, 1*time.Hour, 500*time.Millisecond) + + // Accumulate failures. + p.pollAll(ctx) + + p.mu.Lock() + count := p.failures[inst.ID] + p.mu.Unlock() + if count != 1 { + t.Fatalf("failures[%d] = %d, want 1", inst.ID, count) + } + + // Delete the instance and poll again. + if err := st.DeleteInstance(ctx, inst.ID); err != nil { + t.Fatalf("DeleteInstance() error = %v", err) + } + p.pollAll(ctx) + + p.mu.Lock() + _, exists := p.failures[inst.ID] + p.mu.Unlock() + if exists { + t.Errorf("failures[%d] still present after instance deletion", inst.ID) + } +} + +func TestPoller_RunStopsOnContextCancel(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + p := New(st, nil, 50*time.Millisecond, 500*time.Millisecond) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + p.Run(ctx) + close(done) + }() + + cancel() + + select { + case <-done: + // OK — Run returned. + case <-time.After(2 * time.Second): + t.Fatal("Run did not stop after context cancellation") + } +} + +func TestPoller_MetricsScraping_RecordsToCollector(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + proxy := fakeProxyWithMetrics(t) + addr := strings.TrimPrefix(proxy.URL, "http://") + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", addr) + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, c, 1*time.Hour, 2*time.Second) + p.pollAll(ctx) + + // Verify the collector received a snapshot. + im := c.GetInstanceMetrics(inst.ID) + if im == nil { + t.Fatal("expected metrics to be recorded after poll") + } + if im.DataPoints != 1 { + t.Errorf("DataPoints = %d, want 1", im.DataPoints) + } + if im.ActiveConnections != 5 { + t.Errorf("ActiveConnections = %v, want 5", im.ActiveConnections) + } +} + +func TestPoller_MetricsScraping_SkippedOnHealthFailure(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", "127.0.0.1:1") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, c, 1*time.Hour, 500*time.Millisecond) + p.pollAll(ctx) + + // Collector should have no data — probe failed, so /metrics was not fetched. + if im := c.GetInstanceMetrics(inst.ID); im != nil { + t.Error("expected no metrics for unreachable instance") + } +} + +func TestPoller_DeletedInstance_PrunesCollector(t *testing.T) { + t.Parallel() + st := openTestStore(t) + c := metrics.NewCollector(10) + proxy := fakeProxyWithMetrics(t) + addr := strings.TrimPrefix(proxy.URL, "http://") + + ctx := context.Background() + inst, err := st.CreateInstance(ctx, "test-proxy", addr) + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + p := New(st, c, 1*time.Hour, 2*time.Second) + p.pollAll(ctx) + + // Verify data exists. + if im := c.GetInstanceMetrics(inst.ID); im == nil { + t.Fatal("expected metrics after poll") + } + + // Delete instance and poll again — collector should be pruned. + if err := st.DeleteInstance(ctx, inst.ID); err != nil { + t.Fatalf("DeleteInstance() error = %v", err) + } + p.pollAll(ctx) + + if im := c.GetInstanceMetrics(inst.ID); im != nil { + t.Error("expected metrics to be pruned after instance deletion") + } +} diff --git a/admin/server.go b/admin/server.go new file mode 100644 index 0000000..e6f6bb7 --- /dev/null +++ b/admin/server.go @@ -0,0 +1,150 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package admin + +import ( + "context" + "fmt" + "io/fs" + "log/slog" + "net/http" + "path" + "strings" + "time" + + "github.com/cloudblue/chaperone/admin/api" + "github.com/cloudblue/chaperone/admin/auth" + "github.com/cloudblue/chaperone/admin/config" + "github.com/cloudblue/chaperone/admin/metrics" + "github.com/cloudblue/chaperone/admin/store" +) + +// Server is the admin portal HTTP server. +type Server struct { + httpServer *http.Server + config *config.Config + store *store.Store + collector *metrics.Collector + authService *auth.Service +} + +// NewServer creates a new admin portal server. +func NewServer(cfg *config.Config, st *store.Store, collector *metrics.Collector) (*Server, error) { + mux := http.NewServeMux() + + authService := auth.NewService(st, cfg.Session.MaxAge.Unwrap(), cfg.Session.IdleTimeout.Unwrap()) + secureCookies := cfg.Server.SecureCookies + + handler := securityHeaders(auth.RequireAuth(authService, auth.CSRFProtection(mux))) + + s := &Server{ + httpServer: &http.Server{ + Addr: cfg.Server.Addr, + Handler: handler, + ReadHeaderTimeout: 5 * time.Second, + ReadTimeout: 15 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 60 * time.Second, + }, + config: cfg, + store: st, + collector: collector, + authService: authService, + } + + if err := s.routes(mux, authService, secureCookies); err != nil { + return nil, fmt.Errorf("setting up routes: %w", err) + } + return s, nil +} + +// ListenAndServe starts the HTTP server. +func (s *Server) ListenAndServe() error { + return s.httpServer.ListenAndServe() +} + +// Shutdown gracefully shuts down the server. +func (s *Server) Shutdown(ctx context.Context) error { + return s.httpServer.Shutdown(ctx) +} + +// SweepRateLimiter removes expired entries from the login rate limiter. +func (s *Server) SweepRateLimiter() { + s.authService.SweepRateLimiter() +} + +func (s *Server) routes(mux *http.ServeMux, authService *auth.Service, secureCookies bool) error { + // API health check for the portal itself. + mux.HandleFunc("GET /api/health", s.handleHealth) + + // Auth endpoints (login, logout, password change). + authHandler := api.NewAuthHandler(authService, s.store, secureCookies, s.config.Session.MaxAge.Unwrap()) + authHandler.Register(mux) + + // Instance CRUD + test connection. + instances := api.NewInstanceHandler(s.store, s.config.Scraper.Timeout.Unwrap()) + instances.Register(mux) + + // Metrics API. + metricsAPI := api.NewMetricsHandler(s.store, s.collector) + metricsAPI.Register(mux) + + // Audit log API. + audit := api.NewAuditHandler(s.store) + audit.Register(mux) + + // SPA serving — all non-API routes serve the Vue app. + assets, err := loadUIAssets() + if err != nil { + return fmt.Errorf("loading UI assets: %w", err) + } + mux.Handle("/", spaHandler(assets)) + return nil +} + +func (s *Server) handleHealth(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write([]byte(`{"status":"ok"}`)); err != nil { + slog.Error("writing health response", "error", err) + } +} + +// securityHeaders adds standard security headers to all responses. +func securityHeaders(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Security-Policy", "default-src 'self'; style-src 'self' 'unsafe-inline'") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Header().Set("X-Frame-Options", "DENY") + w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin") + w.Header().Set("Permissions-Policy", "camera=(), microphone=(), geolocation=()") + next.ServeHTTP(w, r) + }) +} + +// spaHandler serves static files from the embedded filesystem, +// falling back to index.html for client-side routing. +func spaHandler(assets fs.FS) http.Handler { + fileServer := http.FileServer(http.FS(assets)) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + fileServer.ServeHTTP(w, r) + return + } + + // API routes that didn't match a registered handler should 404, + // not fall through to the SPA. + if strings.HasPrefix(r.URL.Path, "/api/") { + http.NotFound(w, r) + return + } + + // Clean and strip leading slash for fs.Stat. + name := path.Clean(r.URL.Path[1:]) + if _, err := fs.Stat(assets, name); err != nil { + // File not found — serve index.html for client-side routing. + r.URL.Path = "/" + } + fileServer.ServeHTTP(w, r) + }) +} diff --git a/admin/server_test.go b/admin/server_test.go new file mode 100644 index 0000000..5593b81 --- /dev/null +++ b/admin/server_test.go @@ -0,0 +1,156 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package admin + +import ( + "net/http" + "net/http/httptest" + "testing" + "testing/fstest" +) + +func TestHandleHealth_ReturnsOK_WithJSON(t *testing.T) { + t.Parallel() + + // Arrange + s := &Server{} + req := httptest.NewRequest(http.MethodGet, "/api/health", nil) + rec := httptest.NewRecorder() + + // Act + s.handleHealth(rec, req) + + // Assert + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + if ct := rec.Header().Get("Content-Type"); ct != "application/json" { + t.Errorf("Content-Type = %q, want %q", ct, "application/json") + } + if body := rec.Body.String(); body != `{"status":"ok"}` { + t.Errorf("body = %q, want %q", body, `{"status":"ok"}`) + } +} + +func TestSPAHandler_Root_ServesIndexHTML(t *testing.T) { + t.Parallel() + + // Arrange + assets := fstest.MapFS{ + "index.html": &fstest.MapFile{Data: []byte("app")}, + } + handler := spaHandler(assets) + req := httptest.NewRequest(http.MethodGet, "/", nil) + rec := httptest.NewRecorder() + + // Act + handler.ServeHTTP(rec, req) + + // Assert + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + if body := rec.Body.String(); body != "app" { + t.Errorf("body = %q, want %q", body, "app") + } +} + +func TestSPAHandler_UnknownRoute_FallsBackToIndex(t *testing.T) { + t.Parallel() + + // Arrange + assets := fstest.MapFS{ + "index.html": &fstest.MapFile{Data: []byte("spa")}, + } + handler := spaHandler(assets) + req := httptest.NewRequest(http.MethodGet, "/dashboard/some-page", nil) + rec := httptest.NewRecorder() + + // Act + handler.ServeHTTP(rec, req) + + // Assert + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + if body := rec.Body.String(); body != "spa" { + t.Errorf("body = %q, want %q", body, "spa") + } +} + +func TestSecurityHeaders_SetOnAllResponses(t *testing.T) { + t.Parallel() + + // Arrange + inner := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + }) + handler := securityHeaders(inner) + req := httptest.NewRequest(http.MethodGet, "/", nil) + rec := httptest.NewRecorder() + + // Act + handler.ServeHTTP(rec, req) + + // Assert + tests := []struct { + header string + want string + }{ + {"Content-Security-Policy", "default-src 'self'; style-src 'self' 'unsafe-inline'"}, + {"X-Content-Type-Options", "nosniff"}, + {"X-Frame-Options", "DENY"}, + {"Referrer-Policy", "strict-origin-when-cross-origin"}, + {"Permissions-Policy", "camera=(), microphone=(), geolocation=()"}, + } + for _, tt := range tests { + if got := rec.Header().Get(tt.header); got != tt.want { + t.Errorf("%s = %q, want %q", tt.header, got, tt.want) + } + } +} + +func TestSPAHandler_UnmatchedAPIRoute_Returns404(t *testing.T) { + t.Parallel() + + // Arrange + assets := fstest.MapFS{ + "index.html": &fstest.MapFile{Data: []byte("spa")}, + } + handler := spaHandler(assets) + req := httptest.NewRequest(http.MethodGet, "/api/nonexistent", nil) + rec := httptest.NewRecorder() + + // Act + handler.ServeHTTP(rec, req) + + // Assert + if rec.Code != http.StatusNotFound { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound) + } +} + +func TestSPAHandler_ExistingFile_ServesFile(t *testing.T) { + t.Parallel() + + // Arrange + assets := fstest.MapFS{ + "index.html": &fstest.MapFile{Data: []byte("app")}, + "assets/style.css": &fstest.MapFile{Data: []byte("body{}")}, + } + handler := spaHandler(assets) + req := httptest.NewRequest(http.MethodGet, "/assets/style.css", nil) + rec := httptest.NewRecorder() + + // Act + handler.ServeHTTP(rec, req) + + // Assert + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + if body := rec.Body.String(); body != "body{}" { + t.Errorf("body = %q, want %q", body, "body{}") + } +} diff --git a/admin/store/audit.go b/admin/store/audit.go new file mode 100644 index 0000000..b7ff251 --- /dev/null +++ b/admin/store/audit.go @@ -0,0 +1,162 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "fmt" + "strings" + "time" +) + +// AuditEntry represents a single audit log record. +type AuditEntry struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + Username string `json:"user"` + Action string `json:"action"` + InstanceID *int64 `json:"instance_id,omitempty"` + Detail string `json:"detail"` + CreatedAt time.Time `json:"created_at"` +} + +// AuditFilter specifies query parameters for listing audit entries. +type AuditFilter struct { + UserID *int64 + Action string + InstanceID *int64 + From *time.Time + To *time.Time + Query string // full-text search on detail + Page int + PerPage int +} + +// AuditPage is a paginated response of audit entries. +type AuditPage struct { + Items []AuditEntry `json:"items"` + Total int `json:"total"` + Page int `json:"page"` +} + +// InsertAuditEntry records an action in the audit log. +func (s *Store) InsertAuditEntry(ctx context.Context, userID int64, action string, instanceID *int64, detail string) error { + _, err := s.db.ExecContext(ctx, + `INSERT INTO audit_log (user_id, action, instance_id, detail) VALUES (?, ?, ?, ?)`, + userID, action, instanceID, detail) + if err != nil { + return fmt.Errorf("inserting audit entry: %w", err) + } + return nil +} + +// ListAuditEntries returns a paginated, filtered list of audit entries. +func (s *Store) ListAuditEntries(ctx context.Context, filter AuditFilter) (*AuditPage, error) { + if filter.Page < 1 { + filter.Page = 1 + } + if filter.PerPage < 1 { + filter.PerPage = 20 + } + + conditions, args := buildAuditConditions(filter) + joins := "JOIN users u ON a.user_id = u.id" + if filter.Query != "" { + joins += " JOIN audit_log_fts f ON a.id = f.rowid" + } + + where := "1=1" + if len(conditions) > 0 { + where = strings.Join(conditions, " AND ") + } + + // Count total matching entries. + // Dynamic SQL is safe: joins and where are built from fixed strings, not user input. + countQuery := fmt.Sprintf("SELECT COUNT(*) FROM audit_log a %s WHERE %s", joins, where) //nolint:gosec // G201 -- see above + var total int + if err := s.db.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil { + return nil, fmt.Errorf("counting audit entries: %w", err) + } + + // Fetch the page. + offset := (filter.Page - 1) * filter.PerPage + dataQuery := fmt.Sprintf( //nolint:gosec // G201 -- joins/where built from fixed strings + `SELECT a.id, a.user_id, u.username, a.action, a.instance_id, a.detail, a.created_at + FROM audit_log a %s + WHERE %s + ORDER BY a.created_at DESC + LIMIT ? OFFSET ?`, joins, where) + dataArgs := append(args, filter.PerPage, offset) //nolint:gocritic // append to copy is intentional + + rows, err := s.db.QueryContext(ctx, dataQuery, dataArgs...) + if err != nil { + return nil, fmt.Errorf("listing audit entries: %w", err) + } + defer rows.Close() + + items := make([]AuditEntry, 0) + for rows.Next() { + var e AuditEntry + if err := rows.Scan(&e.ID, &e.UserID, &e.Username, &e.Action, &e.InstanceID, &e.Detail, &e.CreatedAt); err != nil { + return nil, fmt.Errorf("scanning audit entry: %w", err) + } + items = append(items, e) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating audit entries: %w", err) + } + + return &AuditPage{Items: items, Total: total, Page: filter.Page}, nil +} + +// DeleteAuditEntriesBefore removes audit entries older than the given time. +// Returns the number of deleted rows. +func (s *Store) DeleteAuditEntriesBefore(ctx context.Context, before time.Time) (int64, error) { + result, err := s.db.ExecContext(ctx, + `DELETE FROM audit_log WHERE created_at < ?`, before) + if err != nil { + return 0, fmt.Errorf("deleting old audit entries: %w", err) + } + n, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("checking rows affected: %w", err) + } + return n, nil +} + +func buildAuditConditions(filter AuditFilter) (conditions []string, args []any) { + if filter.UserID != nil { + conditions = append(conditions, "a.user_id = ?") + args = append(args, *filter.UserID) + } + if filter.Action != "" { + conditions = append(conditions, "a.action = ?") + args = append(args, filter.Action) + } + if filter.InstanceID != nil { + conditions = append(conditions, "a.instance_id = ?") + args = append(args, *filter.InstanceID) + } + if filter.From != nil { + conditions = append(conditions, "a.created_at >= ?") + args = append(args, *filter.From) + } + if filter.To != nil { + conditions = append(conditions, "a.created_at <= ?") + args = append(args, *filter.To) + } + if filter.Query != "" { + conditions = append(conditions, "audit_log_fts MATCH ?") + args = append(args, ftsQuote(filter.Query)) + } + + return conditions, args +} + +// ftsQuote wraps a user query in double quotes so FTS5 treats it as a +// literal phrase. Internal double quotes are escaped per FTS5 rules. +func ftsQuote(q string) string { + escaped := strings.ReplaceAll(q, `"`, `""`) + return `"` + escaped + `"` +} diff --git a/admin/store/audit_test.go b/admin/store/audit_test.go new file mode 100644 index 0000000..ed6e485 --- /dev/null +++ b/admin/store/audit_test.go @@ -0,0 +1,392 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "testing" + "time" +) + +func createTestUser(t *testing.T, st *Store) int64 { + t.Helper() + user, err := st.CreateUser(context.Background(), "testuser", "$2a$10$abcdefghijklmnopqrstuuABCDEFGHIJKLMNOPQRSTUVWXYZ01234") + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + return user.ID +} + +func createTestInstance(t *testing.T, st *Store) int64 { + t.Helper() + inst, err := st.CreateInstance(context.Background(), "test-proxy", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + return inst.ID +} + +func TestInsertAuditEntry_Success(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + instID := createTestInstance(t, st) + + err := st.InsertAuditEntry(ctx, userID, "instance.create", &instID, "Created instance test-proxy at 10.0.0.1:9090") + if err != nil { + t.Fatalf("InsertAuditEntry() error = %v", err) + } +} + +func TestInsertAuditEntry_NilInstanceID(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + err := st.InsertAuditEntry(ctx, userID, "user.login", nil, "User logged in") + if err != nil { + t.Fatalf("InsertAuditEntry() error = %v", err) + } +} + +func TestListAuditEntries_Empty_ReturnsEmptyPage(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + page, err := st.ListAuditEntries(ctx, AuditFilter{Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 0 { + t.Errorf("Total = %d, want 0", page.Total) + } + if len(page.Items) != 0 { + t.Errorf("len(Items) = %d, want 0", len(page.Items)) + } + if page.Page != 1 { + t.Errorf("Page = %d, want 1", page.Page) + } +} + +func TestListAuditEntries_ReturnsEntriesOrderedByCreatedAtDesc(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + actions := []string{"user.login", "instance.create", "instance.delete"} + for _, action := range actions { + if err := st.InsertAuditEntry(ctx, userID, action, nil, "detail for "+action); err != nil { + t.Fatalf("InsertAuditEntry(%s) error = %v", action, err) + } + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 3 { + t.Fatalf("Total = %d, want 3", page.Total) + } + + // Most recent first. + if page.Items[0].Action != "instance.delete" { + t.Errorf("Items[0].Action = %q, want %q", page.Items[0].Action, "instance.delete") + } + if page.Items[2].Action != "user.login" { + t.Errorf("Items[2].Action = %q, want %q", page.Items[2].Action, "user.login") + } +} + +func TestListAuditEntries_JoinsUsername(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + if err := st.InsertAuditEntry(ctx, userID, "user.login", nil, "Login"); err != nil { + t.Fatalf("InsertAuditEntry() error = %v", err) + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Items[0].Username != "testuser" { + t.Errorf("Username = %q, want %q", page.Items[0].Username, "testuser") + } +} + +func TestListAuditEntries_FilterByAction(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + for _, action := range []string{"user.login", "instance.create", "user.login"} { + if err := st.InsertAuditEntry(ctx, userID, action, nil, "detail"); err != nil { + t.Fatalf("InsertAuditEntry() error = %v", err) + } + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{Action: "user.login", Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 2 { + t.Errorf("Total = %d, want 2", page.Total) + } + for _, item := range page.Items { + if item.Action != "user.login" { + t.Errorf("Action = %q, want %q", item.Action, "user.login") + } + } +} + +func TestListAuditEntries_FilterByUserID(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + user2, createErr := st.CreateUser(ctx, "other", "$2a$10$abcdefghijklmnopqrstuuABCDEFGHIJKLMNOPQRSTUVWXYZ01234") + if createErr != nil { + t.Fatalf("CreateUser() error = %v", createErr) + } + + if insertErr := st.InsertAuditEntry(ctx, userID, "user.login", nil, "u1"); insertErr != nil { + t.Fatal(insertErr) + } + if insertErr := st.InsertAuditEntry(ctx, user2.ID, "user.login", nil, "u2"); insertErr != nil { + t.Fatal(insertErr) + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{UserID: &userID, Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } +} + +func TestListAuditEntries_FilterByInstanceID(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + instID := createTestInstance(t, st) + + if err := st.InsertAuditEntry(ctx, userID, "instance.create", &instID, "with instance"); err != nil { + t.Fatal(err) + } + if err := st.InsertAuditEntry(ctx, userID, "user.login", nil, "without instance"); err != nil { + t.Fatal(err) + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{InstanceID: &instID, Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } +} + +func TestListAuditEntries_FilterByDateRange(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + // Insert entries with explicit timestamps via raw SQL. + for _, ts := range []string{"2026-01-01 00:00:00", "2026-02-01 00:00:00", "2026-03-01 00:00:00"} { + _, err := st.db.ExecContext(ctx, + `INSERT INTO audit_log (user_id, action, detail, created_at) VALUES (?, 'user.login', ?, ?)`, + userID, "entry at "+ts, ts) + if err != nil { + t.Fatalf("inserting audit entry: %v", err) + } + } + + from := time.Date(2026, 1, 15, 0, 0, 0, 0, time.UTC) + to := time.Date(2026, 2, 15, 0, 0, 0, 0, time.UTC) + + page, err := st.ListAuditEntries(ctx, AuditFilter{From: &from, To: &to, Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } +} + +func TestListAuditEntries_FullTextSearch(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + entries := []struct{ action, detail string }{ + {"instance.create", "Created instance production-proxy at 10.0.0.1:9090"}, + {"instance.create", "Created instance staging-proxy at 10.0.0.2:9090"}, + {"user.login", "User logged in from 192.168.1.1"}, + } + for _, e := range entries { + if err := st.InsertAuditEntry(ctx, userID, e.action, nil, e.detail); err != nil { + t.Fatalf("InsertAuditEntry() error = %v", err) + } + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{Query: "production", Page: 1, PerPage: 20}) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } + if page.Total > 0 && page.Items[0].Detail != "Created instance production-proxy at 10.0.0.1:9090" { + t.Errorf("Detail = %q, unexpected", page.Items[0].Detail) + } +} + +func TestListAuditEntries_Pagination(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + for i := 0; i < 5; i++ { + if err := st.InsertAuditEntry(ctx, userID, "user.login", nil, "entry"); err != nil { + t.Fatal(err) + } + } + + page1, err := st.ListAuditEntries(ctx, AuditFilter{Page: 1, PerPage: 2}) + if err != nil { + t.Fatalf("page 1: %v", err) + } + if len(page1.Items) != 2 { + t.Errorf("page 1 len = %d, want 2", len(page1.Items)) + } + if page1.Total != 5 { + t.Errorf("page 1 Total = %d, want 5", page1.Total) + } + + page3, err := st.ListAuditEntries(ctx, AuditFilter{Page: 3, PerPage: 2}) + if err != nil { + t.Fatalf("page 3: %v", err) + } + if len(page3.Items) != 1 { + t.Errorf("page 3 len = %d, want 1", len(page3.Items)) + } +} + +func TestListAuditEntries_CombinedFilters(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + instID := createTestInstance(t, st) + + if err := st.InsertAuditEntry(ctx, userID, "instance.create", &instID, "Created production-proxy"); err != nil { + t.Fatal(err) + } + if err := st.InsertAuditEntry(ctx, userID, "instance.delete", &instID, "Deleted production-proxy"); err != nil { + t.Fatal(err) + } + if err := st.InsertAuditEntry(ctx, userID, "user.login", nil, "Login"); err != nil { + t.Fatal(err) + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{ + Action: "instance.create", + InstanceID: &instID, + Query: "production", + Page: 1, + PerPage: 20, + }) + if err != nil { + t.Fatalf("ListAuditEntries() error = %v", err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } +} + +func TestDeleteAuditEntriesBefore_DeletesOldEntries(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + userID := createTestUser(t, st) + + // Insert old and new entries via raw SQL for controlled timestamps. + _, insertErr := st.db.ExecContext(ctx, + `INSERT INTO audit_log (user_id, action, detail, created_at) VALUES (?, 'user.login', 'old', '2025-01-01 00:00:00')`, + userID) + if insertErr != nil { + t.Fatal(insertErr) + } + if recentErr := st.InsertAuditEntry(ctx, userID, "user.login", nil, "recent"); recentErr != nil { + t.Fatal(recentErr) + } + + cutoff := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + deleted, err := st.DeleteAuditEntriesBefore(ctx, cutoff) + if err != nil { + t.Fatalf("DeleteAuditEntriesBefore() error = %v", err) + } + if deleted != 1 { + t.Errorf("deleted = %d, want 1", deleted) + } + + page, err := st.ListAuditEntries(ctx, AuditFilter{Page: 1, PerPage: 20}) + if err != nil { + t.Fatal(err) + } + if page.Total != 1 { + t.Errorf("Total = %d, want 1", page.Total) + } +} + +func TestFtsQuote_EscapesSpecialCharacters(t *testing.T) { + t.Parallel() + tests := []struct { + input string + want string + }{ + {"simple", `"simple"`}, + {"proxy-1", `"proxy-1"`}, + {"", `""`}, + {`has "quotes"`, `"has ""quotes"""`}, + {`double "" already`, `"double """" already"`}, + {"AND OR NOT", `"AND OR NOT"`}, + {"prefix*", `"prefix*"`}, + {"NEAR/2", `"NEAR/2"`}, + {`back\slash`, `"back\slash"`}, + } + for _, tt := range tests { + got := ftsQuote(tt.input) + if got != tt.want { + t.Errorf("ftsQuote(%q) = %q, want %q", tt.input, got, tt.want) + } + } +} + +func TestDeleteAuditEntriesBefore_NothingToDelete(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + cutoff := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + deleted, err := st.DeleteAuditEntriesBefore(context.Background(), cutoff) + if err != nil { + t.Fatalf("DeleteAuditEntriesBefore() error = %v", err) + } + if deleted != 0 { + t.Errorf("deleted = %d, want 0", deleted) + } +} diff --git a/admin/store/instance.go b/admin/store/instance.go new file mode 100644 index 0000000..2d4345a --- /dev/null +++ b/admin/store/instance.go @@ -0,0 +1,169 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" +) + +// Sentinel errors for instance operations. +var ( + ErrInstanceNotFound = errors.New("instance not found") + ErrDuplicateAddress = errors.New("duplicate instance address") +) + +// Instance represents a registered proxy instance. +type Instance struct { + ID int64 `json:"id"` + Name string `json:"name"` + Address string `json:"address"` + Status string `json:"status"` + Version string `json:"version"` + LastSeenAt *time.Time `json:"last_seen_at,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// ListInstances returns all registered instances ordered by name. +func (s *Store) ListInstances(ctx context.Context) ([]Instance, error) { + rows, err := s.db.QueryContext(ctx, + `SELECT id, name, address, status, version, last_seen_at, created_at, updated_at + FROM instances ORDER BY name`) + if err != nil { + return nil, fmt.Errorf("listing instances: %w", err) + } + defer rows.Close() + + var instances []Instance + for rows.Next() { + inst, err := scanInstance(rows) + if err != nil { + return nil, err + } + instances = append(instances, inst) + } + return instances, rows.Err() +} + +// GetInstance returns a single instance by ID. +func (s *Store) GetInstance(ctx context.Context, id int64) (*Instance, error) { + row := s.db.QueryRowContext(ctx, + `SELECT id, name, address, status, version, last_seen_at, created_at, updated_at + FROM instances WHERE id = ?`, id) + + inst, err := scanInstance(row) + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrInstanceNotFound + } + if err != nil { + return nil, fmt.Errorf("getting instance %d: %w", id, err) + } + return &inst, nil +} + +// CreateInstance inserts a new instance and returns it. +func (s *Store) CreateInstance(ctx context.Context, name, address string) (*Instance, error) { + result, err := s.db.ExecContext(ctx, + `INSERT INTO instances (name, address) VALUES (?, ?)`, name, address) + if err != nil { + if isUniqueConstraintError(err) { + return nil, ErrDuplicateAddress + } + return nil, fmt.Errorf("creating instance: %w", err) + } + + id, err := result.LastInsertId() + if err != nil { + return nil, fmt.Errorf("getting last insert ID: %w", err) + } + return s.GetInstance(ctx, id) +} + +// UpdateInstance updates instance name and address by ID and returns the updated instance. +func (s *Store) UpdateInstance(ctx context.Context, id int64, name, address string) (*Instance, error) { + result, err := s.db.ExecContext(ctx, + `UPDATE instances SET name = ?, address = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?`, + name, address, id) + if err != nil { + if isUniqueConstraintError(err) { + return nil, ErrDuplicateAddress + } + return nil, fmt.Errorf("updating instance %d: %w", id, err) + } + + n, err := result.RowsAffected() + if err != nil { + return nil, fmt.Errorf("checking rows affected: %w", err) + } + if n == 0 { + return nil, ErrInstanceNotFound + } + return s.GetInstance(ctx, id) +} + +// DeleteInstance removes an instance by ID. +func (s *Store) DeleteInstance(ctx context.Context, id int64) error { + result, err := s.db.ExecContext(ctx, `DELETE FROM instances WHERE id = ?`, id) + if err != nil { + return fmt.Errorf("deleting instance %d: %w", id, err) + } + + n, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("checking rows affected: %w", err) + } + if n == 0 { + return ErrInstanceNotFound + } + return nil +} + +// SetInstanceHealthy marks an instance as healthy with the given version. +func (s *Store) SetInstanceHealthy(ctx context.Context, id int64, version string) error { + _, err := s.db.ExecContext(ctx, + `UPDATE instances SET status = 'healthy', version = ?, last_seen_at = CURRENT_TIMESTAMP, + updated_at = CURRENT_TIMESTAMP WHERE id = ?`, + version, id) + if err != nil { + return fmt.Errorf("setting instance %d healthy: %w", id, err) + } + return nil +} + +// SetInstanceUnreachable marks an instance as unreachable. +func (s *Store) SetInstanceUnreachable(ctx context.Context, id int64) error { + _, err := s.db.ExecContext(ctx, + `UPDATE instances SET status = 'unreachable', updated_at = CURRENT_TIMESTAMP WHERE id = ?`, + id) + if err != nil { + return fmt.Errorf("setting instance %d unreachable: %w", id, err) + } + return nil +} + +// scanner is satisfied by both *sql.Row and *sql.Rows. +type scanner interface { + Scan(dest ...any) error +} + +func scanInstance(s scanner) (Instance, error) { + var inst Instance + err := s.Scan( + &inst.ID, &inst.Name, &inst.Address, &inst.Status, + &inst.Version, &inst.LastSeenAt, &inst.CreatedAt, &inst.UpdatedAt, + ) + if err != nil { + return Instance{}, fmt.Errorf("scanning instance: %w", err) + } + return inst, nil +} + +func isUniqueConstraintError(err error) bool { + return err != nil && strings.Contains(err.Error(), "UNIQUE constraint failed") +} diff --git a/admin/store/instance_test.go b/admin/store/instance_test.go new file mode 100644 index 0000000..df7dc6d --- /dev/null +++ b/admin/store/instance_test.go @@ -0,0 +1,254 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "errors" + "testing" +) + +func TestCreateInstance_Success(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + inst, err := st.CreateInstance(context.Background(), "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + if inst.ID == 0 { + t.Error("expected non-zero ID") + } + if inst.Name != "proxy-1" { + t.Errorf("Name = %q, want %q", inst.Name, "proxy-1") + } + if inst.Address != "10.0.0.1:9090" { + t.Errorf("Address = %q, want %q", inst.Address, "10.0.0.1:9090") + } + if inst.Status != "unknown" { + t.Errorf("Status = %q, want %q", inst.Status, "unknown") + } +} + +func TestCreateInstance_DuplicateAddress_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + if _, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090"); err != nil { + t.Fatalf("first CreateInstance() error = %v", err) + } + + _, err := st.CreateInstance(ctx, "proxy-2", "10.0.0.1:9090") + if !errors.Is(err, ErrDuplicateAddress) { + t.Errorf("error = %v, want %v", err, ErrDuplicateAddress) + } +} + +func TestGetInstance_Exists_ReturnsInstance(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + created, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + got, err := st.GetInstance(ctx, created.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Name != "proxy-1" { + t.Errorf("Name = %q, want %q", got.Name, "proxy-1") + } +} + +func TestGetInstance_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + _, err := st.GetInstance(context.Background(), 999) + if !errors.Is(err, ErrInstanceNotFound) { + t.Errorf("error = %v, want %v", err, ErrInstanceNotFound) + } +} + +func TestListInstances_Empty_ReturnsNil(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + instances, err := st.ListInstances(context.Background()) + if err != nil { + t.Fatalf("ListInstances() error = %v", err) + } + if instances != nil { + t.Errorf("expected nil, got %v", instances) + } +} + +func TestListInstances_Multiple_ReturnsSortedByName(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + for _, name := range []string{"charlie", "alpha", "bravo"} { + if _, err := st.CreateInstance(ctx, name, name+":9090"); err != nil { + t.Fatalf("CreateInstance(%q) error = %v", name, err) + } + } + + instances, err := st.ListInstances(ctx) + if err != nil { + t.Fatalf("ListInstances() error = %v", err) + } + if len(instances) != 3 { + t.Fatalf("len = %d, want 3", len(instances)) + } + + want := []string{"alpha", "bravo", "charlie"} + for i, inst := range instances { + if inst.Name != want[i] { + t.Errorf("instances[%d].Name = %q, want %q", i, inst.Name, want[i]) + } + } +} + +func TestUpdateInstance_Success(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + created, err := st.CreateInstance(ctx, "old-name", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + updated, err := st.UpdateInstance(ctx, created.ID, "new-name", "10.0.0.2:9090") + if err != nil { + t.Fatalf("UpdateInstance() error = %v", err) + } + + if updated.Name != "new-name" { + t.Errorf("Name = %q, want %q", updated.Name, "new-name") + } + if updated.Address != "10.0.0.2:9090" { + t.Errorf("Address = %q, want %q", updated.Address, "10.0.0.2:9090") + } +} + +func TestUpdateInstance_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + _, err := st.UpdateInstance(context.Background(), 999, "name", "addr:9090") + if !errors.Is(err, ErrInstanceNotFound) { + t.Errorf("error = %v, want %v", err, ErrInstanceNotFound) + } +} + +func TestUpdateInstance_DuplicateAddress_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + if _, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090"); err != nil { + t.Fatalf("CreateInstance(proxy-1) error = %v", err) + } + inst2, err := st.CreateInstance(ctx, "proxy-2", "10.0.0.2:9090") + if err != nil { + t.Fatalf("CreateInstance(proxy-2) error = %v", err) + } + + _, err = st.UpdateInstance(ctx, inst2.ID, "proxy-2", "10.0.0.1:9090") + if !errors.Is(err, ErrDuplicateAddress) { + t.Errorf("error = %v, want %v", err, ErrDuplicateAddress) + } +} + +func TestDeleteInstance_Success(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + created, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + err = st.DeleteInstance(ctx, created.ID) + if err != nil { + t.Fatalf("DeleteInstance() error = %v", err) + } + + _, err = st.GetInstance(ctx, created.ID) + if !errors.Is(err, ErrInstanceNotFound) { + t.Errorf("after delete: error = %v, want %v", err, ErrInstanceNotFound) + } +} + +func TestDeleteInstance_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + err := st.DeleteInstance(context.Background(), 999) + if !errors.Is(err, ErrInstanceNotFound) { + t.Errorf("error = %v, want %v", err, ErrInstanceNotFound) + } +} + +func TestSetInstanceHealthy_UpdatesStatusAndVersion(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + created, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + err = st.SetInstanceHealthy(ctx, created.ID, "1.2.3") + if err != nil { + t.Fatalf("SetInstanceHealthy() error = %v", err) + } + + got, err := st.GetInstance(ctx, created.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Status != "healthy" { + t.Errorf("Status = %q, want %q", got.Status, "healthy") + } + if got.Version != "1.2.3" { + t.Errorf("Version = %q, want %q", got.Version, "1.2.3") + } + if got.LastSeenAt == nil { + t.Error("LastSeenAt should not be nil after healthy poll") + } +} + +func TestSetInstanceUnreachable_UpdatesStatus(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + created, err := st.CreateInstance(ctx, "proxy-1", "10.0.0.1:9090") + if err != nil { + t.Fatalf("CreateInstance() error = %v", err) + } + + err = st.SetInstanceUnreachable(ctx, created.ID) + if err != nil { + t.Fatalf("SetInstanceUnreachable() error = %v", err) + } + + got, err := st.GetInstance(ctx, created.ID) + if err != nil { + t.Fatalf("GetInstance() error = %v", err) + } + if got.Status != "unreachable" { + t.Errorf("Status = %q, want %q", got.Status, "unreachable") + } +} diff --git a/admin/store/migrations.go b/admin/store/migrations.go new file mode 100644 index 0000000..2c2caf6 --- /dev/null +++ b/admin/store/migrations.go @@ -0,0 +1,169 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "fmt" + "log/slog" +) + +type migration struct { + Version int + Description string + SQL string +} + +var migrations = []migration{ + { + Version: 1, + Description: "initial schema", + SQL: ` +CREATE TABLE instances ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + address TEXT NOT NULL UNIQUE, + status TEXT NOT NULL DEFAULT 'unknown', + version TEXT NOT NULL DEFAULT '', + last_seen_at TIMESTAMP, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE users ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + username TEXT NOT NULL UNIQUE, + password_hash TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE sessions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + token TEXT NOT NULL UNIQUE, + expires_at TIMESTAMP NOT NULL, + last_active_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE audit_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL REFERENCES users(id), + action TEXT NOT NULL, + instance_id INTEGER REFERENCES instances(id) ON DELETE SET NULL, + detail TEXT, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX idx_audit_log_created_at ON audit_log(created_at); +CREATE INDEX idx_audit_log_user_id ON audit_log(user_id); +CREATE INDEX idx_audit_log_action ON audit_log(action); +CREATE INDEX idx_sessions_token ON sessions(token); +CREATE INDEX idx_sessions_user_id ON sessions(user_id); +CREATE INDEX idx_sessions_expires_at ON sessions(expires_at); +`, + }, + { + Version: 2, + Description: "add FTS5 index for audit log full-text search", + SQL: ` +CREATE VIRTUAL TABLE audit_log_fts USING fts5( + detail, + content='audit_log', + content_rowid='id' +); + +CREATE TRIGGER audit_log_ai AFTER INSERT ON audit_log BEGIN + INSERT INTO audit_log_fts(rowid, detail) VALUES (new.id, new.detail); +END; + +CREATE TRIGGER audit_log_ad AFTER DELETE ON audit_log BEGIN + INSERT INTO audit_log_fts(audit_log_fts, rowid, detail) VALUES('delete', old.id, old.detail); +END; + +CREATE TRIGGER audit_log_au AFTER UPDATE ON audit_log BEGIN + INSERT INTO audit_log_fts(audit_log_fts, rowid, detail) VALUES('delete', old.id, old.detail); + INSERT INTO audit_log_fts(rowid, detail) VALUES (new.id, new.detail); +END; +`, + }, +} + +func (s *Store) migrate(ctx context.Context) error { + if err := s.ensureMigrationsTable(ctx); err != nil { + return err + } + + current, err := s.currentSchemaVersion(ctx) + if err != nil { + return err + } + + for _, m := range migrations { + if m.Version <= current { + continue + } + + slog.Info("applying migration", + "version", m.Version, + "description", m.Description, + ) + + if err := s.applyMigration(ctx, m); err != nil { + return err + } + } + + return nil +} + +func (s *Store) ensureMigrationsTable(ctx context.Context) error { + _, err := s.db.ExecContext(ctx, ` + CREATE TABLE IF NOT EXISTS schema_migrations ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + ) + `) + if err != nil { + return fmt.Errorf("creating schema_migrations table: %w", err) + } + return nil +} + +func (s *Store) currentSchemaVersion(ctx context.Context) (int, error) { + var current int + err := s.db.QueryRowContext(ctx, "SELECT COALESCE(MAX(version), 0) FROM schema_migrations").Scan(¤t) + if err != nil { + return 0, fmt.Errorf("reading current schema version: %w", err) + } + return current, nil +} + +func (s *Store) applyMigration(ctx context.Context, m migration) error { + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("beginning transaction for migration %d: %w", m.Version, err) + } + + if _, err := tx.ExecContext(ctx, m.SQL); err != nil { + if rbErr := tx.Rollback(); rbErr != nil { + slog.Error("rolling back migration", "version", m.Version, "error", rbErr) + } + return fmt.Errorf("applying migration %d (%s): %w", m.Version, m.Description, err) + } + + if _, err := tx.ExecContext(ctx, "INSERT INTO schema_migrations (version) VALUES (?)", m.Version); err != nil { + if rbErr := tx.Rollback(); rbErr != nil { + slog.Error("rolling back migration", "version", m.Version, "error", rbErr) + } + return fmt.Errorf("recording migration %d: %w", m.Version, err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("committing migration %d: %w", m.Version, err) + } + + return nil +} diff --git a/admin/store/store.go b/admin/store/store.go new file mode 100644 index 0000000..51fcdb4 --- /dev/null +++ b/admin/store/store.go @@ -0,0 +1,70 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "database/sql" + "fmt" + "net/url" + + _ "modernc.org/sqlite" // register sqlite driver +) + +// Store wraps a SQLite database connection for the admin portal. +type Store struct { + db *sql.DB +} + +// DB returns the underlying database connection for use by API handlers. +func (s *Store) DB() *sql.DB { + return s.db +} + +// Open creates a new Store with the given SQLite database path. +// It configures WAL mode, busy timeout, and foreign keys, then runs +// any pending schema migrations. +// +// Pragmas are passed via the DSN so that every connection in the +// database/sql pool receives them, not just the first one. +func Open(ctx context.Context, dbPath string) (*Store, error) { + dsn := buildDSN(dbPath) + + db, err := sql.Open("sqlite", dsn) + if err != nil { + return nil, fmt.Errorf("opening database: %w", err) + } + + if err := db.PingContext(ctx); err != nil { + _ = db.Close() // best-effort cleanup; primary error is the ping failure + return nil, fmt.Errorf("pinging database: %w", err) + } + + s := &Store{db: db} + if err := s.migrate(ctx); err != nil { + _ = db.Close() // best-effort cleanup; primary error is the migration failure + return nil, fmt.Errorf("running migrations: %w", err) + } + + return s, nil +} + +// buildDSN constructs a SQLite DSN with per-connection pragmas. +// Using _pragma query parameters ensures every pooled connection +// gets WAL mode, a busy timeout, and foreign key enforcement. +func buildDSN(dbPath string) string { + v := url.Values{} + v.Add("_pragma", "journal_mode(WAL)") + v.Add("_pragma", "busy_timeout(5000)") + v.Add("_pragma", "foreign_keys(1)") + return dbPath + "?" + v.Encode() +} + +// Close closes the database connection. +func (s *Store) Close() error { + if err := s.db.Close(); err != nil { + return fmt.Errorf("closing database: %w", err) + } + return nil +} diff --git a/admin/store/store_test.go b/admin/store/store_test.go new file mode 100644 index 0000000..9b7debc --- /dev/null +++ b/admin/store/store_test.go @@ -0,0 +1,198 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "path/filepath" + "sort" + "strings" + "testing" +) + +func openTestStore(t *testing.T) *Store { + t.Helper() + dbPath := filepath.Join(t.TempDir(), "test.db") + st, err := Open(context.Background(), dbPath) + if err != nil { + t.Fatalf("Open(%q) failed: %v", dbPath, err) + } + t.Cleanup(func() { st.Close() }) + return st +} + +func TestOpen_CreatesAllTables(t *testing.T) { + t.Parallel() + + // Arrange & Act + st := openTestStore(t) + + // Assert — query sqlite_master for expected tables + rows, err := st.DB().QueryContext(context.Background(), + `SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name`, + ) + if err != nil { + t.Fatalf("querying sqlite_master: %v", err) + } + defer rows.Close() + + var tables []string + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + t.Fatalf("scanning table name: %v", err) + } + tables = append(tables, name) + } + if err := rows.Err(); err != nil { + t.Fatalf("iterating rows: %v", err) + } + + expected := []string{"audit_log", "audit_log_fts", "audit_log_fts_config", "audit_log_fts_data", "audit_log_fts_docsize", "audit_log_fts_idx", "instances", "schema_migrations", "sessions", "users"} + sort.Strings(tables) + + if len(tables) != len(expected) { + t.Fatalf("tables = %v, want %v", tables, expected) + } + for i, name := range tables { + if name != expected[i] { + t.Errorf("table[%d] = %q, want %q", i, name, expected[i]) + } + } +} + +func TestOpen_MigrationIdempotent(t *testing.T) { + t.Parallel() + + // Arrange — open twice on same DB to verify re-run is safe + dbPath := filepath.Join(t.TempDir(), "test.db") + + st1, err := Open(context.Background(), dbPath) + if err != nil { + t.Fatalf("first Open failed: %v", err) + } + st1.Close() + + // Act — open again (should re-run migrate without error) + st2, err := Open(context.Background(), dbPath) + if err != nil { + t.Fatalf("second Open failed: %v", err) + } + defer st2.Close() + + // Assert — schema_migrations still has exactly one entry + var count int + if err := st2.DB().QueryRowContext(context.Background(), "SELECT COUNT(*) FROM schema_migrations").Scan(&count); err != nil { + t.Fatalf("counting migrations: %v", err) + } + if count != len(migrations) { + t.Errorf("migration count = %d, want %d", count, len(migrations)) + } +} + +func TestOpen_WALMode_Enabled(t *testing.T) { + t.Parallel() + + // Arrange & Act + st := openTestStore(t) + + // Assert + var mode string + if err := st.DB().QueryRowContext(context.Background(), "PRAGMA journal_mode").Scan(&mode); err != nil { + t.Fatalf("querying journal_mode: %v", err) + } + if mode != "wal" { + t.Errorf("journal_mode = %q, want %q", mode, "wal") + } +} + +func TestOpen_SchemaMigrations_TracksVersion(t *testing.T) { + t.Parallel() + + // Arrange & Act + st := openTestStore(t) + + // Assert + var version int + if err := st.DB().QueryRowContext(context.Background(), "SELECT MAX(version) FROM schema_migrations").Scan(&version); err != nil { + t.Fatalf("querying schema version: %v", err) + } + if version != len(migrations) { + t.Errorf("schema version = %d, want %d", version, len(migrations)) + } +} + +func TestOpen_InvalidPath_ReturnsError(t *testing.T) { + t.Parallel() + + // Arrange — directory that doesn't exist + dbPath := "/nonexistent/dir/test.db" + + // Act + _, err := Open(context.Background(), dbPath) + + // Assert + if err == nil { + t.Error("expected error, got nil") + } +} + +func TestOpen_PragmasApplyToAllPoolConnections(t *testing.T) { + t.Parallel() + + // Arrange — open store and force multiple connections in the pool. + st := openTestStore(t) + db := st.DB() + db.SetMaxOpenConns(4) + + ctx := context.Background() + + // Act — grab several raw connections and check pragmas on each. + for i := range 4 { + conn, err := db.Conn(ctx) + if err != nil { + t.Fatalf("conn %d: %v", i, err) + } + + var timeout int + if err := conn.QueryRowContext(ctx, "PRAGMA busy_timeout").Scan(&timeout); err != nil { + t.Fatalf("conn %d: querying busy_timeout: %v", i, err) + } + if timeout != 5000 { + t.Errorf("conn %d: busy_timeout = %d, want 5000", i, timeout) + } + + var fk int + if err := conn.QueryRowContext(ctx, "PRAGMA foreign_keys").Scan(&fk); err != nil { + t.Fatalf("conn %d: querying foreign_keys: %v", i, err) + } + if fk != 1 { + t.Errorf("conn %d: foreign_keys = %d, want 1", i, fk) + } + + conn.Close() + } +} + +func TestBuildDSN_ContainsPragmas(t *testing.T) { + t.Parallel() + + dsn := buildDSN("/tmp/test.db") + + // url.Values.Encode() percent-encodes parentheses, so check + // the encoded form that the driver actually receives. + for _, want := range []string{ + "_pragma=journal_mode%28WAL%29", + "_pragma=busy_timeout%285000%29", + "_pragma=foreign_keys%281%29", + } { + if !strings.Contains(dsn, want) { + t.Errorf("DSN %q missing pragma %q", dsn, want) + } + } + + if !strings.HasPrefix(dsn, "/tmp/test.db?") { + t.Errorf("DSN %q does not start with expected path", dsn) + } +} diff --git a/admin/store/user.go b/admin/store/user.go new file mode 100644 index 0000000..91db384 --- /dev/null +++ b/admin/store/user.go @@ -0,0 +1,198 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "time" +) + +// Sentinel errors for user and session operations. +var ( + ErrUserNotFound = errors.New("user not found") + ErrDuplicateUsername = errors.New("duplicate username") + ErrSessionNotFound = errors.New("session not found") +) + +// User represents a portal admin user. +type User struct { + ID int64 `json:"id"` + Username string `json:"username"` + PasswordHash string `json:"-"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Session represents an active user session. +type Session struct { + ID int64 + UserID int64 + TokenHash string + ExpiresAt time.Time + LastActiveAt time.Time + CreatedAt time.Time +} + +// CreateUser inserts a new user with the given bcrypt password hash. +func (s *Store) CreateUser(ctx context.Context, username, passwordHash string) (*User, error) { + result, err := s.db.ExecContext(ctx, + `INSERT INTO users (username, password_hash) VALUES (?, ?)`, username, passwordHash) + if err != nil { + if isUniqueConstraintError(err) { + return nil, ErrDuplicateUsername + } + return nil, fmt.Errorf("creating user: %w", err) + } + + id, err := result.LastInsertId() + if err != nil { + return nil, fmt.Errorf("getting last insert ID: %w", err) + } + return s.GetUserByID(ctx, id) +} + +// GetUserByID returns a user by their ID. +func (s *Store) GetUserByID(ctx context.Context, id int64) (*User, error) { + var u User + err := s.db.QueryRowContext(ctx, + `SELECT id, username, password_hash, created_at, updated_at FROM users WHERE id = ?`, id). + Scan(&u.ID, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt) + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrUserNotFound + } + if err != nil { + return nil, fmt.Errorf("getting user %d: %w", id, err) + } + return &u, nil +} + +// GetUserByUsername returns a user by their username. +func (s *Store) GetUserByUsername(ctx context.Context, username string) (*User, error) { + var u User + err := s.db.QueryRowContext(ctx, + `SELECT id, username, password_hash, created_at, updated_at FROM users WHERE username = ?`, username). + Scan(&u.ID, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt) + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrUserNotFound + } + if err != nil { + return nil, fmt.Errorf("getting user %q: %w", username, err) + } + return &u, nil +} + +// UpdateUserPassword changes a user's password hash. +func (s *Store) UpdateUserPassword(ctx context.Context, userID int64, passwordHash string) error { + result, err := s.db.ExecContext(ctx, + `UPDATE users SET password_hash = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?`, + passwordHash, userID) + if err != nil { + return fmt.Errorf("updating password for user %d: %w", userID, err) + } + + n, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("checking rows affected: %w", err) + } + if n == 0 { + return ErrUserNotFound + } + return nil +} + +// CreateSession inserts a new session record. +// The raw token is hashed before storage; callers always pass raw tokens. +func (s *Store) CreateSession(ctx context.Context, userID int64, token string, expiresAt time.Time) error { + _, err := s.db.ExecContext(ctx, + `INSERT INTO sessions (user_id, token, expires_at) VALUES (?, ?, ?)`, + userID, hashToken(token), expiresAt) + if err != nil { + return fmt.Errorf("creating session: %w", err) + } + return nil +} + +// GetSessionByToken looks up a session by its raw token (hashed for lookup). +func (s *Store) GetSessionByToken(ctx context.Context, token string) (*Session, error) { + var sess Session + err := s.db.QueryRowContext(ctx, + `SELECT id, user_id, token, expires_at, last_active_at, created_at + FROM sessions WHERE token = ?`, hashToken(token)). + Scan(&sess.ID, &sess.UserID, &sess.TokenHash, &sess.ExpiresAt, &sess.LastActiveAt, &sess.CreatedAt) + if errors.Is(err, sql.ErrNoRows) { + return nil, ErrSessionNotFound + } + if err != nil { + return nil, fmt.Errorf("getting session: %w", err) + } + return &sess, nil +} + +// TouchSession updates the last_active_at timestamp for idle timeout tracking. +// Accepts the raw token (hashed for lookup). +func (s *Store) TouchSession(ctx context.Context, token string) error { + _, err := s.db.ExecContext(ctx, + `UPDATE sessions SET last_active_at = CURRENT_TIMESTAMP WHERE token = ?`, hashToken(token)) + if err != nil { + return fmt.Errorf("touching session: %w", err) + } + return nil +} + +// DeleteSession removes a session by raw token (hashed for lookup). +func (s *Store) DeleteSession(ctx context.Context, token string) error { + _, err := s.db.ExecContext(ctx, + `DELETE FROM sessions WHERE token = ?`, hashToken(token)) + if err != nil { + return fmt.Errorf("deleting session: %w", err) + } + return nil +} + +// DeleteUserSessions removes all sessions for a user (password reset). +func (s *Store) DeleteUserSessions(ctx context.Context, userID int64) error { + _, err := s.db.ExecContext(ctx, + `DELETE FROM sessions WHERE user_id = ?`, userID) + if err != nil { + return fmt.Errorf("deleting sessions for user %d: %w", userID, err) + } + return nil +} + +// DeleteOtherSessions removes all sessions for a user except the given token. +// Accepts the raw keepToken (hashed for comparison). +func (s *Store) DeleteOtherSessions(ctx context.Context, userID int64, keepToken string) error { + _, err := s.db.ExecContext(ctx, + `DELETE FROM sessions WHERE user_id = ? AND token != ?`, userID, hashToken(keepToken)) + if err != nil { + return fmt.Errorf("deleting other sessions for user %d: %w", userID, err) + } + return nil +} + +// DeleteExpiredSessions removes sessions past their absolute expiry. +func (s *Store) DeleteExpiredSessions(ctx context.Context) (int64, error) { + result, err := s.db.ExecContext(ctx, + `DELETE FROM sessions WHERE expires_at < ?`, time.Now()) + if err != nil { + return 0, fmt.Errorf("deleting expired sessions: %w", err) + } + n, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("checking rows affected: %w", err) + } + return n, nil +} + +// hashToken computes the SHA-256 hash of a raw session token. +// The database stores hashes so a DB compromise does not leak usable tokens. +func hashToken(raw string) string { + h := sha256.Sum256([]byte(raw)) + return hex.EncodeToString(h[:]) +} diff --git a/admin/store/user_test.go b/admin/store/user_test.go new file mode 100644 index 0000000..ec96121 --- /dev/null +++ b/admin/store/user_test.go @@ -0,0 +1,287 @@ +// Copyright 2026 CloudBlue LLC +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestCreateUser_Success(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + user, err := st.CreateUser(context.Background(), "admin", "$2a$10$hash") + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + if user.ID == 0 { + t.Error("expected non-zero ID") + } + if user.Username != "admin" { + t.Errorf("Username = %q, want %q", user.Username, "admin") + } +} + +func TestCreateUser_DuplicateUsername_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + if _, err := st.CreateUser(ctx, "admin", "$2a$10$hash1"); err != nil { + t.Fatalf("first CreateUser() error = %v", err) + } + + _, err := st.CreateUser(ctx, "admin", "$2a$10$hash2") + if !errors.Is(err, ErrDuplicateUsername) { + t.Errorf("error = %v, want %v", err, ErrDuplicateUsername) + } +} + +func TestGetUserByID_Exists_ReturnsUser(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + created, err := st.CreateUser(ctx, "admin", "$2a$10$hash") + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + + got, err := st.GetUserByID(ctx, created.ID) + if err != nil { + t.Fatalf("GetUserByID() error = %v", err) + } + if got.Username != "admin" { + t.Errorf("Username = %q, want %q", got.Username, "admin") + } + if got.PasswordHash != "$2a$10$hash" { + t.Errorf("PasswordHash = %q, want %q", got.PasswordHash, "$2a$10$hash") + } +} + +func TestGetUserByID_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + _, err := st.GetUserByID(context.Background(), 999) + if !errors.Is(err, ErrUserNotFound) { + t.Errorf("error = %v, want %v", err, ErrUserNotFound) + } +} + +func TestGetUserByUsername_Exists_ReturnsUser(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + if _, err := st.CreateUser(ctx, "admin", "$2a$10$hash"); err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + + got, err := st.GetUserByUsername(ctx, "admin") + if err != nil { + t.Fatalf("GetUserByUsername() error = %v", err) + } + if got.Username != "admin" { + t.Errorf("Username = %q, want %q", got.Username, "admin") + } +} + +func TestGetUserByUsername_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + _, err := st.GetUserByUsername(context.Background(), "nonexistent") + if !errors.Is(err, ErrUserNotFound) { + t.Errorf("error = %v, want %v", err, ErrUserNotFound) + } +} + +func TestUpdateUserPassword_Success(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, err := st.CreateUser(ctx, "admin", "$2a$10$oldhash") + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + + err = st.UpdateUserPassword(ctx, user.ID, "$2a$10$newhash") + if err != nil { + t.Fatalf("UpdateUserPassword() error = %v", err) + } + + got, err := st.GetUserByID(ctx, user.ID) + if err != nil { + t.Fatalf("GetUserByID() error = %v", err) + } + if got.PasswordHash != "$2a$10$newhash" { + t.Errorf("PasswordHash = %q, want %q", got.PasswordHash, "$2a$10$newhash") + } +} + +func TestUpdateUserPassword_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + err := st.UpdateUserPassword(context.Background(), 999, "$2a$10$hash") + if !errors.Is(err, ErrUserNotFound) { + t.Errorf("error = %v, want %v", err, ErrUserNotFound) + } +} + +func TestCreateSession_And_GetByToken(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, err := st.CreateUser(ctx, "admin", "$2a$10$hash") + if err != nil { + t.Fatalf("CreateUser() error = %v", err) + } + + expiresAt := time.Now().Add(24 * time.Hour) + err = st.CreateSession(ctx, user.ID, "tok-abc-123", expiresAt) + if err != nil { + t.Fatalf("CreateSession() error = %v", err) + } + + sess, err := st.GetSessionByToken(ctx, "tok-abc-123") + if err != nil { + t.Fatalf("GetSessionByToken() error = %v", err) + } + if sess.UserID != user.ID { + t.Errorf("UserID = %d, want %d", sess.UserID, user.ID) + } + if sess.TokenHash == "" { + t.Error("expected non-empty token hash") + } + if sess.TokenHash == "tok-abc-123" { + t.Error("token should be stored as a hash, not raw") + } +} + +func TestGetSessionByToken_NotFound_ReturnsError(t *testing.T) { + t.Parallel() + st := openTestStore(t) + + _, err := st.GetSessionByToken(context.Background(), "nonexistent") + if !errors.Is(err, ErrSessionNotFound) { + t.Errorf("error = %v, want %v", err, ErrSessionNotFound) + } +} + +func TestTouchSession_UpdatesLastActiveAt(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, _ := st.CreateUser(ctx, "admin", "$2a$10$hash") + expiresAt := time.Now().Add(24 * time.Hour) + _ = st.CreateSession(ctx, user.ID, "tok-touch", expiresAt) + + before, _ := st.GetSessionByToken(ctx, "tok-touch") + if err := st.TouchSession(ctx, "tok-touch"); err != nil { + t.Fatalf("TouchSession() error = %v", err) + } + after, _ := st.GetSessionByToken(ctx, "tok-touch") + + if after.LastActiveAt.Before(before.LastActiveAt) { + t.Errorf("LastActiveAt should not go backward after touch") + } +} + +func TestDeleteSession_RemovesSession(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, _ := st.CreateUser(ctx, "admin", "$2a$10$hash") + _ = st.CreateSession(ctx, user.ID, "tok-del", time.Now().Add(time.Hour)) + + if err := st.DeleteSession(ctx, "tok-del"); err != nil { + t.Fatalf("DeleteSession() error = %v", err) + } + + _, err := st.GetSessionByToken(ctx, "tok-del") + if !errors.Is(err, ErrSessionNotFound) { + t.Errorf("after delete: error = %v, want %v", err, ErrSessionNotFound) + } +} + +func TestDeleteUserSessions_RemovesAll(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, _ := st.CreateUser(ctx, "admin", "$2a$10$hash") + _ = st.CreateSession(ctx, user.ID, "tok-a", time.Now().Add(time.Hour)) + _ = st.CreateSession(ctx, user.ID, "tok-b", time.Now().Add(time.Hour)) + + if err := st.DeleteUserSessions(ctx, user.ID); err != nil { + t.Fatalf("DeleteUserSessions() error = %v", err) + } + + _, errA := st.GetSessionByToken(ctx, "tok-a") + _, errB := st.GetSessionByToken(ctx, "tok-b") + if !errors.Is(errA, ErrSessionNotFound) || !errors.Is(errB, ErrSessionNotFound) { + t.Errorf("sessions should be deleted; errA = %v, errB = %v", errA, errB) + } +} + +func TestDeleteOtherSessions_KeepsSpecifiedToken(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, _ := st.CreateUser(ctx, "admin", "$2a$10$hash") + _ = st.CreateSession(ctx, user.ID, "tok-keep", time.Now().Add(time.Hour)) + _ = st.CreateSession(ctx, user.ID, "tok-remove-a", time.Now().Add(time.Hour)) + _ = st.CreateSession(ctx, user.ID, "tok-remove-b", time.Now().Add(time.Hour)) + + if err := st.DeleteOtherSessions(ctx, user.ID, "tok-keep"); err != nil { + t.Fatalf("DeleteOtherSessions() error = %v", err) + } + + // Kept session should still exist. + if _, err := st.GetSessionByToken(ctx, "tok-keep"); err != nil { + t.Errorf("kept session should exist: %v", err) + } + + // Other sessions should be deleted. + _, errA := st.GetSessionByToken(ctx, "tok-remove-a") + _, errB := st.GetSessionByToken(ctx, "tok-remove-b") + if !errors.Is(errA, ErrSessionNotFound) || !errors.Is(errB, ErrSessionNotFound) { + t.Errorf("other sessions should be deleted; errA = %v, errB = %v", errA, errB) + } +} + +func TestDeleteExpiredSessions_RemovesExpiredOnly(t *testing.T) { + t.Parallel() + st := openTestStore(t) + ctx := context.Background() + + user, _ := st.CreateUser(ctx, "admin", "$2a$10$hash") + + // One expired, one active. + _ = st.CreateSession(ctx, user.ID, "tok-expired", time.Now().Add(-time.Hour)) + _ = st.CreateSession(ctx, user.ID, "tok-active", time.Now().Add(time.Hour)) + + n, err := st.DeleteExpiredSessions(ctx) + if err != nil { + t.Fatalf("DeleteExpiredSessions() error = %v", err) + } + if n != 1 { + t.Errorf("deleted = %d, want 1", n) + } + + _, err = st.GetSessionByToken(ctx, "tok-active") + if err != nil { + t.Errorf("active session should still exist: %v", err) + } +} diff --git a/admin/ui/.prettierrc b/admin/ui/.prettierrc new file mode 100644 index 0000000..a50db1a --- /dev/null +++ b/admin/ui/.prettierrc @@ -0,0 +1,7 @@ +{ + "semi": true, + "singleQuote": true, + "useTabs": true, + "tabWidth": 2, + "trailingComma": "all" +} diff --git a/admin/ui/e2e/auth.setup.js b/admin/ui/e2e/auth.setup.js new file mode 100644 index 0000000..ab7114d --- /dev/null +++ b/admin/ui/e2e/auth.setup.js @@ -0,0 +1,17 @@ +import { test as setup, expect } from '@playwright/test'; +import path from 'node:path'; +import { TEST_USER, TEST_PASSWORD } from './helpers/constants.js'; + +const authFile = path.join(import.meta.dirname, '.auth', 'user.json'); + +setup('authenticate', async ({ page }) => { + await page.goto('/login'); + await page.getByTestId('login-username').fill(TEST_USER); + await page.getByTestId('login-password').fill(TEST_PASSWORD); + await page.getByTestId('login-submit').click(); + + // Wait until redirected to dashboard + await expect(page.getByTestId('dashboard-title')).toBeVisible(); + + await page.context().storageState({ path: authFile }); +}); diff --git a/admin/ui/e2e/global-setup.js b/admin/ui/e2e/global-setup.js new file mode 100644 index 0000000..4ec4b6f --- /dev/null +++ b/admin/ui/e2e/global-setup.js @@ -0,0 +1,95 @@ +import { spawn, execSync, execFileSync } from 'node:child_process'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { waitForHealth } from './helpers/services.js'; +import { TEST_USER, PW_CHANGE_USER, TEST_PASSWORD } from './helpers/constants.js'; + +const ROOT = path.resolve(import.meta.dirname, '..', '..', '..'); + +function killPid(envVar) { + const pid = process.env[envVar]; + if (pid) { + try { + process.kill(Number(pid), 'SIGTERM'); + } catch { + // Process may have already exited + } + } +} + +export default async function globalSetup() { + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'chaperone-e2e-')); + const dbPath = path.join(tmpDir, 'test.db'); + const binDir = path.join(ROOT, 'bin'); + const authDir = path.join(import.meta.dirname, '.auth'); + + fs.mkdirSync(authDir, { recursive: true }); + + // Store paths for teardown + process.env.E2E_TMP_DIR = tmpDir; + process.env.E2E_DB_PATH = dbPath; + + try { + // 1. Build admin binary + seed-user + console.log('[e2e] Building admin binary...'); + execSync('make build-admin', { cwd: ROOT, stdio: 'pipe' }); + console.log('[e2e] Building seed-user...'); + execSync( + `cd admin && go build -o ../bin/seed-user ./cmd/seed-user`, + { cwd: ROOT, stdio: 'pipe' }, + ); + + // 2. Start mock chaperone fleet + console.log('[e2e] Starting mock chaperone fleet...'); + const mockProc = spawn( + 'node', + [path.join(ROOT, 'test', 'mock-chaperone', 'mock-chaperone.js')], + { stdio: 'ignore' }, + ); + process.env.E2E_MOCK_PID = String(mockProc.pid); + + await waitForHealth('http://127.0.0.1:19091/_ops/health', 15_000); + console.log('[e2e] Mock fleet ready'); + + // 3. Seed test users + console.log('[e2e] Seeding test users...'); + const seedBin = path.join(binDir, 'seed-user'); + execFileSync(seedBin, ['--db', dbPath, '--username', TEST_USER, '--password', TEST_PASSWORD], { + cwd: ROOT, + stdio: 'pipe', + }); + execFileSync(seedBin, ['--db', dbPath, '--username', PW_CHANGE_USER, '--password', TEST_PASSWORD], { + cwd: ROOT, + stdio: 'pipe', + }); + + // 4. Start admin server + console.log('[e2e] Starting admin server...'); + const adminProc = spawn( + path.join(binDir, 'chaperone-admin'), + [], + { + stdio: 'ignore', + env: { + ...process.env, + CHAPERONE_ADMIN_SERVER_ADDR: '127.0.0.1:8080', + CHAPERONE_ADMIN_DATABASE_PATH: dbPath, + CHAPERONE_ADMIN_SERVER_SECURE_COOKIES: 'false', + CHAPERONE_ADMIN_SCRAPER_INTERVAL: '3s', + CHAPERONE_ADMIN_SCRAPER_TIMEOUT: '2s', + CHAPERONE_ADMIN_LOG_LEVEL: 'warn', + }, + }, + ); + process.env.E2E_ADMIN_PID = String(adminProc.pid); + + await waitForHealth('http://127.0.0.1:8080/api/health', 15_000); + console.log('[e2e] Admin server ready'); + } catch (err) { + // Kill any processes we spawned before Playwright skips globalTeardown + killPid('E2E_ADMIN_PID'); + killPid('E2E_MOCK_PID'); + throw err; + } +} diff --git a/admin/ui/e2e/global-teardown.js b/admin/ui/e2e/global-teardown.js new file mode 100644 index 0000000..414ac46 --- /dev/null +++ b/admin/ui/e2e/global-teardown.js @@ -0,0 +1,33 @@ +import fs from 'node:fs'; + +export default async function globalTeardown() { + // Kill admin server + const adminPid = process.env.E2E_ADMIN_PID; + if (adminPid) { + try { + process.kill(Number(adminPid), 'SIGTERM'); + } catch { + // Process may have already exited + } + } + + // Kill mock chaperone fleet + const mockPid = process.env.E2E_MOCK_PID; + if (mockPid) { + try { + process.kill(Number(mockPid), 'SIGTERM'); + } catch { + // Process may have already exited + } + } + + // Remove temp directory + const tmpDir = process.env.E2E_TMP_DIR; + if (tmpDir) { + try { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } catch { + // Best-effort cleanup + } + } +} diff --git a/admin/ui/e2e/helpers/constants.js b/admin/ui/e2e/helpers/constants.js new file mode 100644 index 0000000..20a09a3 --- /dev/null +++ b/admin/ui/e2e/helpers/constants.js @@ -0,0 +1,3 @@ +export const TEST_USER = 'admin'; +export const PW_CHANGE_USER = 'admin-pw-test'; +export const TEST_PASSWORD = 'testpassword12'; diff --git a/admin/ui/e2e/helpers/fixtures.js b/admin/ui/e2e/helpers/fixtures.js new file mode 100644 index 0000000..93e5ca1 --- /dev/null +++ b/admin/ui/e2e/helpers/fixtures.js @@ -0,0 +1,56 @@ +import { test as base, expect } from '@playwright/test'; +import { TEST_USER, TEST_PASSWORD } from './constants.js'; + +/** + * Custom fixtures for E2E tests. + * Provides an authenticated API context with CSRF handling for seeding data. + */ +export const test = base.extend({ + /** + * An authenticated API request context with CSRF support. + * Use for seeding instances via the REST API in beforeAll hooks. + */ + authedAPI: async ({ playwright }, use) => { + const ctx = await playwright.request.newContext({ + baseURL: 'http://127.0.0.1:8080', + }); + + // Login to get session + CSRF cookies + const loginRes = await ctx.post('/api/login', { + data: { username: TEST_USER, password: TEST_PASSWORD }, + }); + expect(loginRes.ok()).toBeTruthy(); + + // Extract CSRF token from cookies + const cookies = await ctx.storageState(); + const csrfCookie = cookies.cookies.find((c) => c.name === 'csrf_token'); + if (!csrfCookie) throw new Error('expected csrf_token cookie after login'); + const csrfToken = csrfCookie.value; + + // Wrap context to auto-include CSRF header on writes + const originalPost = ctx.post.bind(ctx); + const originalPut = ctx.put.bind(ctx); + const originalDelete = ctx.delete.bind(ctx); + + ctx.post = (url, options = {}) => + originalPost(url, { + ...options, + headers: { ...options.headers, 'X-CSRF-Token': csrfToken }, + }); + ctx.put = (url, options = {}) => + originalPut(url, { + ...options, + headers: { ...options.headers, 'X-CSRF-Token': csrfToken }, + }); + ctx.delete = (url, options = {}) => + originalDelete(url, { + ...options, + headers: { ...options.headers, 'X-CSRF-Token': csrfToken }, + }); + + await use(ctx); + await ctx.dispose(); + }, +}); + +export { expect }; diff --git a/admin/ui/e2e/helpers/services.js b/admin/ui/e2e/helpers/services.js new file mode 100644 index 0000000..ada4355 --- /dev/null +++ b/admin/ui/e2e/helpers/services.js @@ -0,0 +1,33 @@ +import http from 'node:http'; + +/** + * Wait for an HTTP endpoint to return 200. + * @param {string} url + * @param {number} timeoutMs + */ +export function waitForHealth(url, timeoutMs = 15_000) { + const start = Date.now(); + return new Promise((resolve, reject) => { + function attempt() { + if (Date.now() - start > timeoutMs) { + reject(new Error(`Timed out waiting for ${url}`)); + return; + } + const req = http.get(url, (res) => { + if (res.statusCode === 200) { + res.resume(); + resolve(); + } else { + res.resume(); + setTimeout(attempt, 250); + } + }); + req.on('error', () => setTimeout(attempt, 250)); + req.setTimeout(2000, () => { + req.destroy(); + setTimeout(attempt, 250); + }); + } + attempt(); + }); +} diff --git a/admin/ui/e2e/playwright.config.js b/admin/ui/e2e/playwright.config.js new file mode 100644 index 0000000..8ec0c82 --- /dev/null +++ b/admin/ui/e2e/playwright.config.js @@ -0,0 +1,90 @@ +// @ts-check +import { defineConfig } from '@playwright/test'; +import path from 'node:path'; + +const baseURL = 'http://127.0.0.1:8080'; +const storageState = path.join(import.meta.dirname, '.auth', 'user.json'); + +// Specs that are self-contained (no cross-test state dependencies) and safe +// to run on any browser after the Chromium full suite has seeded state. +const crossBrowserSpecs = [ + 'specs/smoke.spec.js', + 'specs/settings.spec.js', + 'specs/instance-detail.spec.js', +]; + +export default defineConfig({ + testDir: '.', + testMatch: ['specs/**/*.spec.js', 'auth.setup.js'], + fullyParallel: false, + workers: 1, + retries: 0, + timeout: 30_000, + expect: { + timeout: 10_000, + }, + use: { + baseURL, + trace: 'retain-on-failure', + screenshot: 'only-on-failure', + testIdAttribute: 'data-testid', + }, + projects: [ + // --- Chromium (primary): full suite --- + { + name: 'setup', + testMatch: 'auth.setup.js', + }, + { + name: 'chromium', + use: { browserName: 'chromium', storageState }, + dependencies: ['setup'], + testMatch: 'specs/**/*.spec.js', + testIgnore: ['specs/auth.spec.js', 'specs/accessibility.spec.js'], + }, + { + name: 'auth', + use: { browserName: 'chromium' }, + testMatch: 'specs/auth.spec.js', + }, + + // --- Accessibility: runs after main suite to avoid state interference --- + { + name: 'a11y', + use: { browserName: 'chromium', storageState }, + dependencies: ['chromium'], + testMatch: 'specs/accessibility.spec.js', + }, + + // --- Firefox: cross-browser subset --- + { + name: 'firefox', + use: { browserName: 'firefox', storageState }, + dependencies: ['chromium'], + testMatch: crossBrowserSpecs, + }, + { + name: 'auth-firefox', + use: { browserName: 'firefox' }, + dependencies: ['auth'], + testMatch: 'specs/auth.spec.js', + }, + + // --- WebKit: cross-browser subset --- + { + name: 'webkit', + use: { browserName: 'webkit', storageState }, + dependencies: ['chromium'], + testMatch: crossBrowserSpecs, + }, + { + name: 'auth-webkit', + use: { browserName: 'webkit' }, + dependencies: ['auth'], + testMatch: 'specs/auth.spec.js', + }, + ], + outputDir: './results', + globalSetup: './global-setup.js', + globalTeardown: './global-teardown.js', +}); diff --git a/admin/ui/e2e/specs/accessibility.spec.js b/admin/ui/e2e/specs/accessibility.spec.js new file mode 100644 index 0000000..1a58160 --- /dev/null +++ b/admin/ui/e2e/specs/accessibility.spec.js @@ -0,0 +1,128 @@ +import AxeBuilder from '@axe-core/playwright'; +import { test, expect } from '../helpers/fixtures.js'; + +const axeTags = ['wcag2a', 'wcag2aa', 'wcag21a', 'wcag21aa']; + +test.describe('Accessibility — authenticated pages', () => { + test('dashboard', async ({ page }) => { + await page.goto('/'); + // Accept whatever state the dashboard is in (welcome screen or instances) + await expect( + page.getByTestId('dashboard-title'), + ).toBeVisible({ timeout: 10_000 }); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); + + test('dashboard — table view', async ({ page, authedAPI }) => { + // Ensure at least one instance exists for table view + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + if (instances.length === 0) { + await authedAPI.post('/api/instances', { + data: { name: 'a11y-proxy-1', address: '127.0.0.1:19091' }, + }); + } + + await page.goto('/'); + await expect(page.getByTestId('instance-card').first()).toBeVisible({ timeout: 15_000 }); + + await page.getByTestId('view-toggle-table').click(); + await expect(page.getByTestId('instance-table')).toBeVisible(); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); + + test('instance detail — overview tab', async ({ page, authedAPI }) => { + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + const inst = instances[0]; + + await page.goto(`/instances/${inst.id}`); + await expect(page.getByTestId('overview-tab')).toBeVisible({ timeout: 15_000 }); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); + + test('instance detail — traffic tab', async ({ page, authedAPI }) => { + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + const inst = instances[0]; + + await page.goto(`/instances/${inst.id}`); + await page.getByTestId('tab-traffic').click(); + await expect(page.getByTestId('traffic-tab')).toBeVisible({ timeout: 15_000 }); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); + + test('audit log', async ({ page }) => { + await page.goto('/audit-log'); + await expect(page.getByTestId('audit-table')).toBeVisible({ timeout: 10_000 }); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); + + test('settings page', async ({ page }) => { + await page.goto('/settings'); + await expect(page.getByTestId('settings-submit')).toBeVisible(); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); + + test('add instance modal', async ({ page }) => { + await page.goto('/'); + await expect(page.getByTestId('dashboard-title')).toBeVisible({ timeout: 10_000 }); + + const addBtn = page.getByTestId('add-instance-btn').or( + page.getByTestId('add-first-instance'), + ); + await addBtn.first().click(); + await expect(page.getByTestId('instance-name')).toBeVisible(); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); +}); + +test.describe('Accessibility — login page', () => { + test.use({ storageState: { cookies: [], origins: [] } }); + + test('login page', async ({ page }) => { + await page.goto('/login'); + await expect(page.getByTestId('login-submit')).toBeVisible(); + + const results = await new AxeBuilder({ page }) + .withTags(axeTags) + .analyze(); + + expect(results.violations).toEqual([]); + }); +}); diff --git a/admin/ui/e2e/specs/audit-log.spec.js b/admin/ui/e2e/specs/audit-log.spec.js new file mode 100644 index 0000000..dba3512 --- /dev/null +++ b/admin/ui/e2e/specs/audit-log.spec.js @@ -0,0 +1,56 @@ +import { test, expect } from '@playwright/test'; + +test.describe('Audit Log', () => { + test('shows audit entries for previous actions', async ({ page }) => { + await page.goto('/audit-log'); + + // Should have entries from dashboard tests (instance.create, user.login, etc.) + await expect(page.getByTestId('audit-table')).toBeVisible({ timeout: 10_000 }); + await expect(page.getByTestId('audit-row').first()).toBeVisible(); + }); + + test('search filters results', async ({ page }) => { + await page.goto('/audit-log'); + await expect(page.getByTestId('audit-table')).toBeVisible(); + + const responsePromise = page.waitForResponse((resp) => resp.url().includes('/api/audit')); + await page.getByTestId('audit-search').fill('proxy'); + await responsePromise; + + // Results may match or be empty; accept either the table or the empty state + await expect( + page.getByTestId('audit-table').or(page.getByText('No matching entries')), + ).toBeVisible(); + }); + + test('action type dropdown filters', async ({ page }) => { + await page.goto('/audit-log'); + await expect(page.getByTestId('audit-table')).toBeVisible(); + + await page.getByTestId('audit-action-filter').selectOption('user.login'); + + // All visible rows should contain the login action label + const rows = page.getByTestId('audit-row'); + const count = await rows.count(); + if (count > 0) { + for (let i = 0; i < Math.min(count, 5); i++) { + await expect(rows.nth(i)).toContainText('logged in'); + } + } + }); + + test('pagination controls work', async ({ page }) => { + await page.goto('/audit-log'); + + const pagination = page.getByTestId('audit-pagination'); + // Pagination may or may not be visible depending on entry count + // If visible, clicking next page should work + if (await pagination.isVisible()) { + const nextBtn = page.getByTestId('audit-next-page'); + if (await nextBtn.isEnabled()) { + await nextBtn.click(); + await expect(page.getByTestId('audit-table')).toBeVisible(); + } + } + }); +}); diff --git a/admin/ui/e2e/specs/auth.spec.js b/admin/ui/e2e/specs/auth.spec.js new file mode 100644 index 0000000..5ca4e95 --- /dev/null +++ b/admin/ui/e2e/specs/auth.spec.js @@ -0,0 +1,60 @@ +import { test, expect } from '@playwright/test'; +import { TEST_USER, TEST_PASSWORD } from '../helpers/constants.js'; + +test.describe('Authentication', () => { + test('login with valid credentials redirects to dashboard', async ({ page }) => { + await page.goto('/login'); + await page.getByTestId('login-username').fill(TEST_USER); + await page.getByTestId('login-password').fill(TEST_PASSWORD); + await page.getByTestId('login-submit').click(); + + await expect(page.getByTestId('dashboard-title')).toBeVisible(); + await expect(page.getByTestId('sidebar-username')).toHaveText(TEST_USER); + }); + + test('login with invalid credentials shows error', async ({ page }) => { + await page.goto('/login'); + await page.getByTestId('login-username').fill(TEST_USER); + await page.getByTestId('login-password').fill('wrongpassword1'); + await page.getByTestId('login-submit').click(); + + await expect(page.getByTestId('login-error')).toBeVisible(); + await expect(page.getByTestId('login-error')).toContainText('Invalid username or password'); + await expect(page).toHaveURL(/\/login/); + }); + + test('unauthenticated user is redirected to login', async ({ page }) => { + await page.goto('/'); + await expect(page).toHaveURL(/\/login/); + }); + + test('redirect back after login', async ({ page }) => { + await page.goto('/audit-log'); + await expect(page).toHaveURL(/\/login\?redirect=/); + + await page.getByTestId('login-username').fill(TEST_USER); + await page.getByTestId('login-password').fill(TEST_PASSWORD); + await page.getByTestId('login-submit').click(); + + await expect(page).toHaveURL(/\/audit-log/); + }); + + test('logout redirects to login', async ({ page }) => { + // First login + await page.goto('/login'); + await page.getByTestId('login-username').fill(TEST_USER); + await page.getByTestId('login-password').fill(TEST_PASSWORD); + await page.getByTestId('login-submit').click(); + await expect(page.getByTestId('dashboard-title')).toBeVisible(); + + // Then logout + await page.getByTestId('sidebar-logout').click(); + await expect(page).toHaveURL(/\/login/); + + // Session is invalidated — going back to / should redirect to login. + // The SPA's auth guard may redirect before the navigation even commits + // (especially in WebKit), so tolerate the "interrupted" error. + await page.goto('/').catch(() => {}); + await expect(page).toHaveURL(/\/login/); + }); +}); diff --git a/admin/ui/e2e/specs/dashboard.spec.js b/admin/ui/e2e/specs/dashboard.spec.js new file mode 100644 index 0000000..3b6bc0c --- /dev/null +++ b/admin/ui/e2e/specs/dashboard.spec.js @@ -0,0 +1,112 @@ +import { test, expect } from '../helpers/fixtures.js'; + +// Tests in this suite are intentionally ordered and sequentially dependent. +// Each test builds on state created by earlier tests (add → verify health → +// seed more → toggle view → edit → delete → navigate). This mirrors the +// real CRUD flow. Requires: fullyParallel: false, workers: 1. +test.describe('Fleet Dashboard', () => { + test('shows welcome screen when no instances registered', async ({ page }) => { + await page.goto('/'); + await expect(page.getByTestId('welcome-screen')).toBeVisible(); + await expect(page.getByTestId('add-first-instance')).toBeVisible(); + }); + + test('add instance via modal', async ({ page }) => { + await page.goto('/'); + + // Open add modal (welcome screen button or header button) + const addBtn = page.getByTestId('add-first-instance').or( + page.getByTestId('add-instance-btn'), + ); + await addBtn.first().click(); + + // Fill form + await page.getByTestId('instance-name').fill('proxy-us-east-1'); + await page.getByTestId('instance-address').fill('127.0.0.1:19091'); + + // Test connection + await page.getByTestId('test-connection').click(); + await expect(page.getByTestId('test-result')).toContainText('Connected successfully'); + + // Save + await page.getByTestId('save-instance').click(); + + // Card should appear + await expect(page.getByTestId('instance-card')).toBeVisible(); + }); + + test('instance becomes healthy after polling', async ({ page }) => { + await page.goto('/'); + + // Wait for status to show healthy (after scrape cycle) + await expect( + page.getByTestId('instance-card').getByTestId('status-healthy'), + ).toBeVisible({ timeout: 15_000 }); + }); + + test('add multiple instances shows KPI panel', async ({ page, authedAPI }) => { + // Seed second and third instances via API + await authedAPI.post('/api/instances', { + data: { name: 'proxy-eu-west-1', address: '127.0.0.1:19092' }, + }); + await authedAPI.post('/api/instances', { + data: { name: 'proxy-ap-south-1', address: '127.0.0.1:19093' }, + }); + + await page.goto('/'); + await expect(page.getByTestId('kpi-panel')).toBeVisible({ timeout: 15_000 }); + }); + + test('view toggle switches between card and table', async ({ page }) => { + await page.goto('/'); + await expect(page.getByTestId('instance-card').first()).toBeVisible(); + + // Switch to table + await page.getByTestId('view-toggle-table').click(); + await expect(page.getByTestId('instance-table')).toBeVisible(); + + // Switch back to cards + await page.getByTestId('view-toggle-card').click(); + await expect(page.getByTestId('instance-card').first()).toBeVisible(); + }); + + test('edit instance', async ({ page }) => { + await page.goto('/'); + + // Find a specific card by name and click its edit button + const card = page.getByTestId('instance-card').filter({ hasText: 'proxy-us-east-1' }); + await card.getByTestId('instance-edit').click(); + + // Modal should be pre-filled + await expect(page.getByTestId('instance-name')).toHaveValue('proxy-us-east-1'); + + // Change name + await page.getByTestId('instance-name').fill('proxy-renamed'); + await page.getByTestId('save-instance').click(); + + // Updated name should appear + await expect(page.getByText('proxy-renamed')).toBeVisible(); + }); + + test('delete instance with confirmation', async ({ page }) => { + await page.goto('/'); + + const cardCount = await page.getByTestId('instance-card').count(); + + // Click remove on last card + await page.getByTestId('instance-card').last().getByTestId('instance-delete').click(); + + // Confirm dialog + await expect(page.getByTestId('confirm-ok')).toBeVisible(); + await page.getByTestId('confirm-ok').click(); + + // One fewer card + await expect(page.getByTestId('instance-card')).toHaveCount(cardCount - 1); + }); + + test('click instance navigates to detail', async ({ page }) => { + await page.goto('/'); + await page.getByTestId('instance-card').first().click(); + await expect(page).toHaveURL(/\/instances\/\d+/); + }); +}); diff --git a/admin/ui/e2e/specs/instance-detail.spec.js b/admin/ui/e2e/specs/instance-detail.spec.js new file mode 100644 index 0000000..a3ef6ca --- /dev/null +++ b/admin/ui/e2e/specs/instance-detail.spec.js @@ -0,0 +1,85 @@ +import { test, expect } from '../helpers/fixtures.js'; + +test.describe('Instance Detail', () => { + // Seed instances before each test (idempotent — skips if already registered). + test.beforeEach(async ({ authedAPI }) => { + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + if (!instances.find((i) => i.address === '127.0.0.1:19091')) { + await authedAPI.post('/api/instances', { + data: { name: 'proxy-us-east-1', address: '127.0.0.1:19091' }, + }); + } + if (!instances.find((i) => i.address === '127.0.0.1:19092')) { + await authedAPI.post('/api/instances', { + data: { name: 'proxy-eu-west-1', address: '127.0.0.1:19092' }, + }); + } + if (!instances.find((i) => i.address === '127.0.0.1:19093')) { + await authedAPI.post('/api/instances', { + data: { name: 'proxy-ap-south-1', address: '127.0.0.1:19093' }, + }); + } + }); + + test('overview tab shows metrics after polling', async ({ page, authedAPI }) => { + // Get first instance ID + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + const inst = instances[0]; + + await page.goto(`/instances/${inst.id}`); + + // Wait for metrics to appear (scraper polls every 3s, may need multiple cycles) + await expect(page.getByTestId('overview-tab')).toBeVisible({ timeout: 15_000 }); + await expect(page.getByTestId('kpi-rps')).toBeVisible({ timeout: 15_000 }); + }); + + test('traffic tab shows vendor breakdown', async ({ page, authedAPI }) => { + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + const inst = instances[0]; + + await page.goto(`/instances/${inst.id}`); + + // Switch to traffic tab + await page.getByTestId('tab-traffic').click(); + await expect(page.getByTestId('traffic-tab')).toBeVisible(); + + // Vendor names should appear + await expect(page.getByText('acme-corp')).toBeVisible({ timeout: 15_000 }); + }); + + test('tab keyboard navigation', async ({ page, authedAPI }) => { + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + const inst = instances[0]; + + await page.goto(`/instances/${inst.id}`); + + // Focus overview tab and press arrow right + await page.getByTestId('tab-overview').focus(); + await page.keyboard.press('ArrowRight'); + + // Traffic tab should now be active + await expect(page.getByTestId('tab-traffic')).toHaveAttribute('aria-selected', 'true'); + }); + + test('breadcrumb Fleet link navigates back', async ({ page, authedAPI }) => { + const res = await authedAPI.get('/api/instances'); + const instances = await res.json(); + const inst = instances[0]; + + await page.goto(`/instances/${inst.id}`); + await page.getByTestId('breadcrumb-fleet').click(); + await expect(page.getByTestId('dashboard-title')).toBeVisible(); + }); + + test('non-existent instance shows not found', async ({ page }) => { + await page.goto('/instances/99999'); + // May show "Instance not found" or "Cannot load metrics" depending on race + await expect( + page.getByText('Instance not found').or(page.getByText('Cannot load metrics')), + ).toBeVisible(); + }); +}); diff --git a/admin/ui/e2e/specs/settings.spec.js b/admin/ui/e2e/specs/settings.spec.js new file mode 100644 index 0000000..e6157ae --- /dev/null +++ b/admin/ui/e2e/specs/settings.spec.js @@ -0,0 +1,61 @@ +import { test, expect } from '@playwright/test'; +import { PW_CHANGE_USER, TEST_PASSWORD } from '../helpers/constants.js'; + +// Use a separate browser context (no saved state) for the password-change user +test.use({ storageState: { cookies: [], origins: [] } }); + +test.describe('Settings — Password Change', () => { + test.beforeEach(async ({ page }) => { + // Login as the dedicated password-change test user + await page.goto('/login'); + await page.getByTestId('login-username').fill(PW_CHANGE_USER); + await page.getByTestId('login-password').fill(TEST_PASSWORD); + await page.getByTestId('login-submit').click(); + await expect(page.getByTestId('dashboard-title')).toBeVisible(); + }); + + test('change password successfully', async ({ page }) => { + await page.goto('/settings'); + + await page.getByTestId('settings-current-password').fill(TEST_PASSWORD); + await page.getByTestId('settings-new-password').fill('newpassword1234'); + await page.getByTestId('settings-confirm-password').fill('newpassword1234'); + await page.getByTestId('settings-submit').click(); + + await expect(page.getByTestId('settings-success')).toBeVisible(); + await expect(page.getByTestId('settings-success')).toContainText('Password changed'); + + // Change it back so tests remain idempotent + await page.getByTestId('settings-current-password').fill('newpassword1234'); + await page.getByTestId('settings-new-password').fill(TEST_PASSWORD); + await page.getByTestId('settings-confirm-password').fill(TEST_PASSWORD); + await page.getByTestId('settings-submit').click(); + await expect(page.getByTestId('settings-success')).toBeVisible(); + }); + + test('wrong current password shows error', async ({ page }) => { + await page.goto('/settings'); + + await page.getByTestId('settings-current-password').fill('wrongpassword1'); + await page.getByTestId('settings-new-password').fill('newpassword1234'); + await page.getByTestId('settings-confirm-password').fill('newpassword1234'); + await page.getByTestId('settings-submit').click(); + + // Backend returns 403 (not 401) for wrong current password, so the + // global 401 interceptor does NOT trigger — user stays on settings page. + await expect(page.getByTestId('settings-error')).toBeVisible(); + await expect(page.getByTestId('settings-error')).toContainText('Current password is incorrect'); + }); + + test('password too short shows validation error', async ({ page }) => { + await page.goto('/settings'); + + await page.getByTestId('settings-current-password').fill(TEST_PASSWORD); + await page.getByTestId('settings-new-password').fill('short'); + await page.getByTestId('settings-confirm-password').fill('short'); + await page.getByTestId('settings-submit').click(); + + // Should show client-side validation error (not server error) + await expect(page.getByTestId('settings-new-password-error')).toBeVisible(); + }); +}); diff --git a/admin/ui/e2e/specs/smoke.spec.js b/admin/ui/e2e/specs/smoke.spec.js new file mode 100644 index 0000000..0520cc6 --- /dev/null +++ b/admin/ui/e2e/specs/smoke.spec.js @@ -0,0 +1,7 @@ +import { test, expect } from '@playwright/test'; + +test('authenticated user sees fleet dashboard', async ({ page }) => { + await page.goto('/'); + await expect(page.getByTestId('dashboard-title')).toBeVisible(); + await expect(page.getByTestId('dashboard-title')).toHaveText('Fleet Dashboard'); +}); diff --git a/admin/ui/eslint.config.js b/admin/ui/eslint.config.js new file mode 100644 index 0000000..269aa21 --- /dev/null +++ b/admin/ui/eslint.config.js @@ -0,0 +1,14 @@ +import pluginVue from "eslint-plugin-vue"; +import configPrettier from "eslint-config-prettier"; + +export default [ + { ignores: ["dist/**"] }, + ...pluginVue.configs["flat/recommended"], + configPrettier, + { + rules: { + "semi": ["error", "always"], + "vue/multi-word-component-names": "off", + }, + }, +]; diff --git a/admin/ui/index.html b/admin/ui/index.html new file mode 100644 index 0000000..ae4a233 --- /dev/null +++ b/admin/ui/index.html @@ -0,0 +1,12 @@ + + + + + + Chaperone Admin + + +
+ + + diff --git a/admin/ui/package.json b/admin/ui/package.json new file mode 100644 index 0000000..bfab9c2 --- /dev/null +++ b/admin/ui/package.json @@ -0,0 +1,42 @@ +{ + "name": "chaperone-admin-ui", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview", + "lint": "eslint .", + "lint:fix": "eslint . --fix", + "format": "prettier --write \"src/**/*.{js,vue,css}\"", + "test": "vitest run", + "test:watch": "vitest", + "e2e": "playwright test --config e2e/playwright.config.js", + "e2e:panel": "pnpm e2e --ui" + }, + "dependencies": { + "echarts": "^6.0.0", + "pinia": "^3.0.4", + "vue": "^3.5.29", + "vue-echarts": "^8.0.1", + "vue-router": "^5.0.3" + }, + "pnpm": { + "onlyBuiltDependencies": [ + "esbuild" + ] + }, + "devDependencies": { + "@axe-core/playwright": "^4.11.1", + "@playwright/test": "^1.58.2", + "@vitejs/plugin-vue": "^6.0.4", + "eslint": "^10.0.2", + "eslint-config-prettier": "^10.1.8", + "eslint-plugin-vue": "^10.8.0", + "jsdom": "^26.1.0", + "prettier": "^3.8.1", + "vite": "^7.3.1", + "vitest": "^3.2.1" + } +} diff --git a/admin/ui/pnpm-lock.yaml b/admin/ui/pnpm-lock.yaml new file mode 100644 index 0000000..aa9a00f --- /dev/null +++ b/admin/ui/pnpm-lock.yaml @@ -0,0 +1,2581 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + echarts: + specifier: ^6.0.0 + version: 6.0.0 + pinia: + specifier: ^3.0.4 + version: 3.0.4(vue@3.5.29) + vue: + specifier: ^3.5.29 + version: 3.5.29 + vue-echarts: + specifier: ^8.0.1 + version: 8.0.1(echarts@6.0.0)(vue@3.5.29) + vue-router: + specifier: ^5.0.3 + version: 5.0.3(@vue/compiler-sfc@3.5.29)(pinia@3.0.4(vue@3.5.29))(vue@3.5.29) + devDependencies: + '@axe-core/playwright': + specifier: ^4.11.1 + version: 4.11.1(playwright-core@1.58.2) + '@playwright/test': + specifier: ^1.58.2 + version: 1.58.2 + '@vitejs/plugin-vue': + specifier: ^6.0.4 + version: 6.0.4(vite@7.3.1(yaml@2.8.2))(vue@3.5.29) + eslint: + specifier: ^10.0.2 + version: 10.0.2 + eslint-config-prettier: + specifier: ^10.1.8 + version: 10.1.8(eslint@10.0.2) + eslint-plugin-vue: + specifier: ^10.8.0 + version: 10.8.0(eslint@10.0.2)(vue-eslint-parser@10.4.0(eslint@10.0.2)) + jsdom: + specifier: ^26.1.0 + version: 26.1.0 + prettier: + specifier: ^3.8.1 + version: 3.8.1 + vite: + specifier: ^7.3.1 + version: 7.3.1(yaml@2.8.2) + vitest: + specifier: ^3.2.1 + version: 3.2.4(jsdom@26.1.0)(yaml@2.8.2) + +packages: + + '@asamuzakjp/css-color@3.2.0': + resolution: {integrity: sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==} + + '@axe-core/playwright@4.11.1': + resolution: {integrity: sha512-mKEfoUIB1MkVTht0BGZFXtSAEKXMJoDkyV5YZ9jbBmZCcWDz71tegNsdTkIN8zc/yMi5Gm2kx7Z5YQ9PfWNAWw==} + peerDependencies: + playwright-core: '>= 1.0.0' + + '@babel/generator@7.29.1': + resolution: {integrity: sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.29.0': + resolution: {integrity: sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.29.0': + resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} + engines: {node: '>=6.9.0'} + + '@csstools/color-helpers@5.1.0': + resolution: {integrity: sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==} + engines: {node: '>=18'} + + '@csstools/css-calc@2.1.4': + resolution: {integrity: sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-color-parser@3.1.0': + resolution: {integrity: sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-parser-algorithms@3.0.5': + resolution: {integrity: sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-tokenizer@3.0.4': + resolution: {integrity: sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==} + engines: {node: '>=18'} + + '@esbuild/aix-ppc64@0.27.3': + resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.3': + resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.3': + resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.3': + resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.3': + resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.3': + resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.3': + resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.3': + resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.3': + resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.3': + resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.3': + resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.3': + resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.3': + resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.3': + resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.3': + resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.3': + resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.3': + resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.3': + resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.3': + resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.3': + resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.3': + resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.3': + resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.3': + resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.3': + resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.3': + resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.3': + resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/config-array@0.23.2': + resolution: {integrity: sha512-YF+fE6LV4v5MGWRGj7G404/OZzGNepVF8fxk7jqmqo3lrza7a0uUcDnROGRBG1WFC1omYUS/Wp1f42i0M+3Q3A==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/config-helpers@0.5.2': + resolution: {integrity: sha512-a5MxrdDXEvqnIq+LisyCX6tQMPF/dSJpCfBgBauY+pNZ28yCtSsTvyTYrMhaI+LK26bVyCJfJkT0u8KIj2i1dQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/core@1.1.0': + resolution: {integrity: sha512-/nr9K9wkr3P1EzFTdFdMoLuo1PmIxjmwvPozwoSodjNBdefGujXQUF93u1DDZpEaTuDvMsIQddsd35BwtrW9Xw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/object-schema@3.0.2': + resolution: {integrity: sha512-HOy56KJt48Bx8KmJ+XGQNSUMT/6dZee/M54XyUyuvTvPXJmsERRvBchsUVx1UMe1WwIH49XLAczNC7V2INsuUw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@eslint/plugin-kit@0.6.0': + resolution: {integrity: sha512-bIZEUzOI1jkhviX2cp5vNyXQc6olzb2ohewQubuYlMXZ2Q/XjBO0x0XhGPvc9fjSIiUN0vw+0hq53BJ4eQSJKQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + '@humanfs/core@0.19.1': + resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} + engines: {node: '>=18.18.0'} + + '@humanfs/node@0.16.7': + resolution: {integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==} + engines: {node: '>=18.18.0'} + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/retry@0.4.3': + resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} + engines: {node: '>=18.18'} + + '@jridgewell/gen-mapping@0.3.13': + resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} + + '@jridgewell/remapping@2.3.5': + resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@playwright/test@1.58.2': + resolution: {integrity: sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA==} + engines: {node: '>=18'} + hasBin: true + + '@rolldown/pluginutils@1.0.0-rc.2': + resolution: {integrity: sha512-izyXV/v+cHiRfozX62W9htOAvwMo4/bXKDrQ+vom1L1qRuexPock/7VZDAhnpHCLNejd3NJ6hiab+tO0D44Rgw==} + + '@rollup/rollup-android-arm-eabi@4.59.0': + resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.59.0': + resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.59.0': + resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.59.0': + resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.59.0': + resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.59.0': + resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.59.0': + resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm-musleabihf@4.59.0': + resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} + cpu: [arm] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-arm64-gnu@4.59.0': + resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm64-musl@4.59.0': + resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-loong64-gnu@4.59.0': + resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} + cpu: [loong64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-loong64-musl@4.59.0': + resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} + cpu: [loong64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-ppc64-gnu@4.59.0': + resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-ppc64-musl@4.59.0': + resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} + cpu: [ppc64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-riscv64-gnu@4.59.0': + resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-riscv64-musl@4.59.0': + resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} + cpu: [riscv64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-s390x-gnu@4.59.0': + resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-gnu@4.59.0': + resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-musl@4.59.0': + resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} + cpu: [x64] + os: [linux] + libc: [musl] + + '@rollup/rollup-openbsd-x64@4.59.0': + resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.59.0': + resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.59.0': + resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.59.0': + resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.59.0': + resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.59.0': + resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==} + cpu: [x64] + os: [win32] + + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + + '@types/esrecurse@4.3.1': + resolution: {integrity: sha512-xJBAbDifo5hpffDBuHl0Y8ywswbiAp/Wi7Y/GtAgSlZyIABppyurxVueOPE8LUQOxdlgi6Zqce7uoEpqNTeiUw==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@vitejs/plugin-vue@6.0.4': + resolution: {integrity: sha512-uM5iXipgYIn13UUQCZNdWkYk+sysBeA97d5mHsAoAt1u/wpN3+zxOmsVJWosuzX+IMGRzeYUNytztrYznboIkQ==} + engines: {node: ^20.19.0 || >=22.12.0} + peerDependencies: + vite: ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 + vue: ^3.2.25 + + '@vitest/expect@3.2.4': + resolution: {integrity: sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==} + + '@vitest/mocker@3.2.4': + resolution: {integrity: sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 || ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@3.2.4': + resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} + + '@vitest/runner@3.2.4': + resolution: {integrity: sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==} + + '@vitest/snapshot@3.2.4': + resolution: {integrity: sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==} + + '@vitest/spy@3.2.4': + resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} + + '@vitest/utils@3.2.4': + resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} + + '@vue-macros/common@3.1.2': + resolution: {integrity: sha512-h9t4ArDdniO9ekYHAD95t9AZcAbb19lEGK+26iAjUODOIJKmObDNBSe4+6ELQAA3vtYiFPPBtHh7+cQCKi3Dng==} + engines: {node: '>=20.19.0'} + peerDependencies: + vue: ^2.7.0 || ^3.2.25 + peerDependenciesMeta: + vue: + optional: true + + '@vue/compiler-core@3.5.29': + resolution: {integrity: sha512-cuzPhD8fwRHk8IGfmYaR4eEe4cAyJEL66Ove/WZL7yWNL134nqLddSLwNRIsFlnnW1kK+p8Ck3viFnC0chXCXw==} + + '@vue/compiler-dom@3.5.29': + resolution: {integrity: sha512-n0G5o7R3uBVmVxjTIYcz7ovr8sy7QObFG8OQJ3xGCDNhbG60biP/P5KnyY8NLd81OuT1WJflG7N4KWYHaeeaIg==} + + '@vue/compiler-sfc@3.5.29': + resolution: {integrity: sha512-oJZhN5XJs35Gzr50E82jg2cYdZQ78wEwvRO6Y63TvLVTc+6xICzJHP1UIecdSPPYIbkautNBanDiWYa64QSFIA==} + + '@vue/compiler-ssr@3.5.29': + resolution: {integrity: sha512-Y/ARJZE6fpjzL5GH/phJmsFwx3g6t2KmHKHx5q+MLl2kencADKIrhH5MLF6HHpRMmlRAYBRSvv347Mepf1zVNw==} + + '@vue/devtools-api@7.7.9': + resolution: {integrity: sha512-kIE8wvwlcZ6TJTbNeU2HQNtaxLx3a84aotTITUuL/4bzfPxzajGBOoqjMhwZJ8L9qFYDU/lAYMEEm11dnZOD6g==} + + '@vue/devtools-api@8.0.7': + resolution: {integrity: sha512-tc1TXAxclsn55JblLkFVcIRG7MeSJC4fWsPjfM7qu/IcmPUYnQ5Q8vzWwBpyDY24ZjmZTUCCwjRSNbx58IhlAA==} + + '@vue/devtools-kit@7.7.9': + resolution: {integrity: sha512-PyQ6odHSgiDVd4hnTP+aDk2X4gl2HmLDfiyEnn3/oV+ckFDuswRs4IbBT7vacMuGdwY/XemxBoh302ctbsptuA==} + + '@vue/devtools-kit@8.0.7': + resolution: {integrity: sha512-H6esJGHGl5q0E9iV3m2EoBQHJ+V83WMW83A0/+Fn95eZ2iIvdsq4+UCS6yT/Fdd4cGZSchx/MdWDreM3WqMsDw==} + + '@vue/devtools-shared@7.7.9': + resolution: {integrity: sha512-iWAb0v2WYf0QWmxCGy0seZNDPdO3Sp5+u78ORnyeonS6MT4PC7VPrryX2BpMJrwlDeaZ6BD4vP4XKjK0SZqaeA==} + + '@vue/devtools-shared@8.0.7': + resolution: {integrity: sha512-CgAb9oJH5NUmbQRdYDj/1zMiaICYSLtm+B1kxcP72LBrifGAjUmt8bx52dDH1gWRPlQgxGPqpAMKavzVirAEhA==} + + '@vue/reactivity@3.5.29': + resolution: {integrity: sha512-zcrANcrRdcLtmGZETBxWqIkoQei8HaFpZWx/GHKxx79JZsiZ8j1du0VUJtu4eJjgFvU/iKL5lRXFXksVmI+5DA==} + + '@vue/runtime-core@3.5.29': + resolution: {integrity: sha512-8DpW2QfdwIWOLqtsNcds4s+QgwSaHSJY/SUe04LptianUQ/0xi6KVsu/pYVh+HO3NTVvVJjIPL2t6GdeKbS4Lg==} + + '@vue/runtime-dom@3.5.29': + resolution: {integrity: sha512-AHvvJEtcY9tw/uk+s/YRLSlxxQnqnAkjqvK25ZiM4CllCZWzElRAoQnCM42m9AHRLNJ6oe2kC5DCgD4AUdlvXg==} + + '@vue/server-renderer@3.5.29': + resolution: {integrity: sha512-G/1k6WK5MusLlbxSE2YTcqAAezS+VuwHhOvLx2KnQU7G2zCH6KIb+5Wyt6UjMq7a3qPzNEjJXs1hvAxDclQH+g==} + peerDependencies: + vue: 3.5.29 + + '@vue/shared@3.5.29': + resolution: {integrity: sha512-w7SR0A5zyRByL9XUkCfdLs7t9XOHUyJ67qPGQjOou3p6GvBeBW+AVjUUmlxtZ4PIYaRvE+1LmK44O4uajlZwcg==} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.16.0: + resolution: {integrity: sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==} + engines: {node: '>=0.4.0'} + hasBin: true + + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + + ajv@6.14.0: + resolution: {integrity: sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==} + + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + ast-kit@2.2.0: + resolution: {integrity: sha512-m1Q/RaVOnTp9JxPX+F+Zn7IcLYMzM8kZofDImfsKZd8MbR+ikdOzTeztStWqfrqIxZnYWryyI9ePm3NGjnZgGw==} + engines: {node: '>=20.19.0'} + + ast-walker-scope@0.8.3: + resolution: {integrity: sha512-cbdCP0PGOBq0ASG+sjnKIoYkWMKhhz+F/h9pRexUdX2Hd38+WOlBkRKlqkGOSm0YQpcFMQBJeK4WspUAkwsEdg==} + engines: {node: '>=20.19.0'} + + axe-core@4.11.1: + resolution: {integrity: sha512-BASOg+YwO2C+346x3LZOeoovTIoTrRqEsqMa6fmfAV0P+U9mFr9NsyOEpiYvFjbc64NMrSswhV50WdXzdb/Z5A==} + engines: {node: '>=4'} + + balanced-match@4.0.4: + resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==} + engines: {node: 18 || 20 || >=22} + + birpc@2.9.0: + resolution: {integrity: sha512-KrayHS5pBi69Xi9JmvoqrIgYGDkD6mcSe/i6YKi3w5kekCLzrX4+nawcXqrj2tIp50Kw/mT/s3p+GVK0A0sKxw==} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + brace-expansion@5.0.4: + resolution: {integrity: sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==} + engines: {node: 18 || 20 || >=22} + + cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + + chai@5.3.3: + resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} + engines: {node: '>=18'} + + check-error@2.1.3: + resolution: {integrity: sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==} + engines: {node: '>= 16'} + + chokidar@5.0.0: + resolution: {integrity: sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==} + engines: {node: '>= 20.19.0'} + + confbox@0.1.8: + resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + + confbox@0.2.4: + resolution: {integrity: sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ==} + + copy-anything@4.0.5: + resolution: {integrity: sha512-7Vv6asjS4gMOuILabD3l739tsaxFQmC+a7pLZm02zyvs8p977bL3zEgq3yDk5rn9B0PbYgIv++jmHcuUab4RhA==} + engines: {node: '>=18'} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + cssstyle@4.6.0: + resolution: {integrity: sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==} + engines: {node: '>=18'} + + csstype@3.2.3: + resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + + data-urls@5.0.0: + resolution: {integrity: sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==} + engines: {node: '>=18'} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decimal.js@10.6.0: + resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} + + deep-eql@5.0.2: + resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} + engines: {node: '>=6'} + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + echarts@6.0.0: + resolution: {integrity: sha512-Tte/grDQRiETQP4xz3iZWSvoHrkCQtwqd6hs+mifXcjrCuo2iKWbajFObuLJVBlDIJlOzgQPd1hsaKt/3+OMkQ==} + + entities@6.0.1: + resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} + engines: {node: '>=0.12'} + + entities@7.0.1: + resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} + engines: {node: '>=0.12'} + + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + esbuild@0.27.3: + resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} + engines: {node: '>=18'} + hasBin: true + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-config-prettier@10.1.8: + resolution: {integrity: sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w==} + hasBin: true + peerDependencies: + eslint: '>=7.0.0' + + eslint-plugin-vue@10.8.0: + resolution: {integrity: sha512-f1J/tcbnrpgC8suPN5AtdJ5MQjuXbSU9pGRSSYAuF3SHoiYCOdEX6O22pLaRyLHXvDcOe+O5ENgc1owQ587agA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@stylistic/eslint-plugin': ^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0 + '@typescript-eslint/parser': ^7.0.0 || ^8.0.0 + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + vue-eslint-parser: ^10.0.0 + peerDependenciesMeta: + '@stylistic/eslint-plugin': + optional: true + '@typescript-eslint/parser': + optional: true + + eslint-scope@9.1.1: + resolution: {integrity: sha512-GaUN0sWim5qc8KVErfPBWmc31LEsOkrUJbvJZV+xuL3u2phMUK4HIvXlWAakfC8W4nzlK+chPEAkYOYb5ZScIw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@5.0.1: + resolution: {integrity: sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + eslint@10.0.2: + resolution: {integrity: sha512-uYixubwmqJZH+KLVYIVKY1JQt7tysXhtj21WSvjcSmU5SVNzMus1bgLe+pAt816yQ8opKfheVVoPLqvVMGejYw==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@11.1.1: + resolution: {integrity: sha512-AVHPqQoZYc+RUM4/3Ly5udlZY/U4LS8pIG05jEjWM2lQMU/oaZ7qshzAl2YP1tfNmXfftH3ohurfwNAug+MnsQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + + esquery@1.7.0: + resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + + exsolve@1.0.8: + resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + file-entry-cache@8.0.0: + resolution: {integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==} + engines: {node: '>=16.0.0'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@4.0.1: + resolution: {integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==} + engines: {node: '>=16'} + + flatted@3.3.4: + resolution: {integrity: sha512-3+mMldrTAPdta5kjX2G2J7iX4zxtnwpdA8Tr2ZSjkyPSanvbZAcy6flmtnXbEybHrDcU9641lxrMfFuUxVz9vA==} + + fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + hookable@5.5.3: + resolution: {integrity: sha512-Yc+BQe8SvoXH1643Qez1zqLRmbA5rCL+sSmk6TVos0LWVfNIB7PGncdlId77WzLGSIB5KaWgTaNTs2lNVEI6VQ==} + + html-encoding-sniffer@4.0.0: + resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==} + engines: {node: '>=18'} + + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-potential-custom-element-name@1.0.1: + resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} + + is-what@5.5.0: + resolution: {integrity: sha512-oG7cgbmg5kLYae2N5IVd3jm2s+vldjxJzK1pcu9LfpGuQ93MQSzo0okvRna+7y5ifrD+20FE8FvjusyGaz14fw==} + engines: {node: '>=18'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + + jsdom@26.1.0: + resolution: {integrity: sha512-Cvc9WUhxSMEo4McES3P7oK3QaXldCfNWp7pl2NNeiIFlCoLr3kfq9kb1fxftiwk1FLV7CvpvDfonxtzUDeSOPg==} + engines: {node: '>=18'} + peerDependencies: + canvas: ^3.0.0 + peerDependenciesMeta: + canvas: + optional: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + local-pkg@1.1.2: + resolution: {integrity: sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A==} + engines: {node: '>=14'} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + loupe@3.2.1: + resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} + + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + + magic-string-ast@1.0.3: + resolution: {integrity: sha512-CvkkH1i81zl7mmb94DsRiFeG9V2fR2JeuK8yDgS8oiZSFa++wWLEgZ5ufEOyLHbvSbD1gTRKv9NdX69Rnvr9JA==} + engines: {node: '>=20.19.0'} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + minimatch@10.2.4: + resolution: {integrity: sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==} + engines: {node: 18 || 20 || >=22} + + mitt@3.0.1: + resolution: {integrity: sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==} + + mlly@1.8.0: + resolution: {integrity: sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + muggle-string@0.4.1: + resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + nwsapi@2.2.23: + resolution: {integrity: sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + parse5@7.3.0: + resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + pathval@2.0.1: + resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} + engines: {node: '>= 14.16'} + + perfect-debounce@1.0.0: + resolution: {integrity: sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==} + + perfect-debounce@2.1.0: + resolution: {integrity: sha512-LjgdTytVFXeUgtHZr9WYViYSM/g8MkcTPYDlPa3cDqMirHjKiSZPYd6DoL7pK8AJQr+uWkQvCjHNdiMqsrJs+g==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + pinia@3.0.4: + resolution: {integrity: sha512-l7pqLUFTI/+ESXn6k3nu30ZIzW5E2WZF/LaHJEpoq6ElcLD+wduZoB2kBN19du6K/4FDpPMazY2wJr+IndBtQw==} + peerDependencies: + typescript: '>=4.5.0' + vue: ^3.5.11 + peerDependenciesMeta: + typescript: + optional: true + + pkg-types@1.3.1: + resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + + pkg-types@2.3.0: + resolution: {integrity: sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==} + + playwright-core@1.58.2: + resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} + engines: {node: '>=18'} + hasBin: true + + playwright@1.58.2: + resolution: {integrity: sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A==} + engines: {node: '>=18'} + hasBin: true + + postcss-selector-parser@7.1.1: + resolution: {integrity: sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==} + engines: {node: '>=4'} + + postcss@8.5.8: + resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier@3.8.1: + resolution: {integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==} + engines: {node: '>=14'} + hasBin: true + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + quansync@0.2.11: + resolution: {integrity: sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA==} + + readdirp@5.0.0: + resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==} + engines: {node: '>= 20.19.0'} + + rfdc@1.4.1: + resolution: {integrity: sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==} + + rollup@4.59.0: + resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + rrweb-cssom@0.8.0: + resolution: {integrity: sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + saxes@6.0.0: + resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} + engines: {node: '>=v12.22.7'} + + scule@1.3.0: + resolution: {integrity: sha512-6FtHJEvt+pVMIB9IBY+IcCJ6Z5f1iQnytgyfKMhDKgmzYG+TeH/wx1y3l27rshSbLiSanrR9ffZDrEsmjlQF2g==} + + semver@7.7.4: + resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} + engines: {node: '>=10'} + hasBin: true + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + speakingurl@14.0.1: + resolution: {integrity: sha512-1POYv7uv2gXoyGFpBCmpDVSNV74IfsWlDW216UPjbWufNf+bSU6GdbDsxdcxtfwb4xlI3yxzOTKClUosxARYrQ==} + engines: {node: '>=0.10.0'} + + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + + strip-literal@3.1.0: + resolution: {integrity: sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==} + + superjson@2.2.6: + resolution: {integrity: sha512-H+ue8Zo4vJmV2nRjpx86P35lzwDT3nItnIsocgumgr0hHMQ+ZGq5vrERg9kJBo5AWGmxZDhzDo+WVIJqkB0cGA==} + engines: {node: '>=16'} + + symbol-tree@3.2.4: + resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} + + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + tinypool@1.1.1: + resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} + engines: {node: ^18.0.0 || >=20.0.0} + + tinyrainbow@2.0.0: + resolution: {integrity: sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==} + engines: {node: '>=14.0.0'} + + tinyspy@4.0.4: + resolution: {integrity: sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==} + engines: {node: '>=14.0.0'} + + tldts-core@6.1.86: + resolution: {integrity: sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==} + + tldts@6.1.86: + resolution: {integrity: sha512-WMi/OQ2axVTf/ykqCQgXiIct+mSQDFdH2fkwhPwgEwvJ1kSzZRiinb0zF2Xb8u4+OqPChmyI6MEu4EezNJz+FQ==} + hasBin: true + + tough-cookie@5.1.2: + resolution: {integrity: sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==} + engines: {node: '>=16'} + + tr46@5.1.1: + resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==} + engines: {node: '>=18'} + + tslib@2.3.0: + resolution: {integrity: sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==} + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + ufo@1.6.3: + resolution: {integrity: sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==} + + unplugin-utils@0.3.1: + resolution: {integrity: sha512-5lWVjgi6vuHhJ526bI4nlCOmkCIF3nnfXkCMDeMJrtdvxTs6ZFCM8oNufGTsDbKv/tJ/xj8RpvXjRuPBZJuJog==} + engines: {node: '>=20.19.0'} + + unplugin@3.0.0: + resolution: {integrity: sha512-0Mqk3AT2TZCXWKdcoaufeXNukv2mTrEZExeXlHIOZXdqYoHHr4n51pymnwV8x2BOVxwXbK2HLlI7usrqMpycdg==} + engines: {node: ^20.19.0 || >=22.12.0} + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vite-node@3.2.4: + resolution: {integrity: sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + + vite@7.3.1: + resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitest@3.2.4: + resolution: {integrity: sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/debug': ^4.1.12 + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + '@vitest/browser': 3.2.4 + '@vitest/ui': 3.2.4 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/debug': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + + vue-echarts@8.0.1: + resolution: {integrity: sha512-23rJTFLu1OUEGRWjJGmdGt8fP+8+ja1gVgzMYPIPaHWpXegcO1viIAaeu2H4QHESlVeHzUAHIxKXGrwjsyXAaA==} + peerDependencies: + echarts: ^6.0.0 + vue: ^3.3.0 + + vue-eslint-parser@10.4.0: + resolution: {integrity: sha512-Vxi9pJdbN3ZnVGLODVtZ7y4Y2kzAAE2Cm0CZ3ZDRvydVYxZ6VrnBhLikBsRS+dpwj4Jv4UCv21PTEwF5rQ9WXg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + + vue-router@5.0.3: + resolution: {integrity: sha512-nG1c7aAFac7NYj8Hluo68WyWfc41xkEjaR0ViLHCa3oDvTQ/nIuLJlXJX1NUPw/DXzx/8+OKMng045HHQKQKWw==} + peerDependencies: + '@pinia/colada': '>=0.21.2' + '@vue/compiler-sfc': ^3.5.17 + pinia: ^3.0.4 + vue: ^3.5.0 + peerDependenciesMeta: + '@pinia/colada': + optional: true + '@vue/compiler-sfc': + optional: true + pinia: + optional: true + + vue@3.5.29: + resolution: {integrity: sha512-BZqN4Ze6mDQVNAni0IHeMJ5mwr8VAJ3MQC9FmprRhcBYENw+wOAAjRj8jfmN6FLl0j96OXbR+CjWhmAmM+QGnA==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + w3c-xmlserializer@5.0.0: + resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} + engines: {node: '>=18'} + + webidl-conversions@7.0.0: + resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==} + engines: {node: '>=12'} + + webpack-virtual-modules@0.6.2: + resolution: {integrity: sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==} + + whatwg-encoding@3.1.1: + resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} + engines: {node: '>=18'} + deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation + + whatwg-mimetype@4.0.0: + resolution: {integrity: sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==} + engines: {node: '>=18'} + + whatwg-url@14.2.0: + resolution: {integrity: sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==} + engines: {node: '>=18'} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + ws@8.19.0: + resolution: {integrity: sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + xml-name-validator@4.0.0: + resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} + engines: {node: '>=12'} + + xml-name-validator@5.0.0: + resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} + engines: {node: '>=18'} + + xmlchars@2.2.0: + resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + + yaml@2.8.2: + resolution: {integrity: sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==} + engines: {node: '>= 14.6'} + hasBin: true + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zrender@6.0.0: + resolution: {integrity: sha512-41dFXEEXuJpNecuUQq6JlbybmnHaqqpGlbH1yxnA5V9MMP4SbohSVZsJIwz+zdjQXSSlR1Vc34EgH1zxyTDvhg==} + +snapshots: + + '@asamuzakjp/css-color@3.2.0': + dependencies: + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-color-parser': 3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + lru-cache: 10.4.3 + + '@axe-core/playwright@4.11.1(playwright-core@1.58.2)': + dependencies: + axe-core: 4.11.1 + playwright-core: 1.58.2 + + '@babel/generator@7.29.1': + dependencies: + '@babel/parser': 7.29.0 + '@babel/types': 7.29.0 + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + jsesc: 3.1.0 + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.29.0': + dependencies: + '@babel/types': 7.29.0 + + '@babel/types@7.29.0': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@csstools/color-helpers@5.1.0': {} + + '@csstools/css-calc@2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-color-parser@3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/color-helpers': 5.1.0 + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-tokenizer@3.0.4': {} + + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@10.0.2)': + dependencies: + eslint: 10.0.2 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/config-array@0.23.2': + dependencies: + '@eslint/object-schema': 3.0.2 + debug: 4.4.3 + minimatch: 10.2.4 + transitivePeerDependencies: + - supports-color + + '@eslint/config-helpers@0.5.2': + dependencies: + '@eslint/core': 1.1.0 + + '@eslint/core@1.1.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/object-schema@3.0.2': {} + + '@eslint/plugin-kit@0.6.0': + dependencies: + '@eslint/core': 1.1.0 + levn: 0.4.1 + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.7': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.4.3 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.4.3': {} + + '@jridgewell/gen-mapping@0.3.13': + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/remapping@2.3.5': + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@playwright/test@1.58.2': + dependencies: + playwright: 1.58.2 + + '@rolldown/pluginutils@1.0.0-rc.2': {} + + '@rollup/rollup-android-arm-eabi@4.59.0': + optional: true + + '@rollup/rollup-android-arm64@4.59.0': + optional: true + + '@rollup/rollup-darwin-arm64@4.59.0': + optional: true + + '@rollup/rollup-darwin-x64@4.59.0': + optional: true + + '@rollup/rollup-freebsd-arm64@4.59.0': + optional: true + + '@rollup/rollup-freebsd-x64@4.59.0': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.59.0': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.59.0': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.59.0': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.59.0': + optional: true + + '@rollup/rollup-linux-x64-musl@4.59.0': + optional: true + + '@rollup/rollup-openbsd-x64@4.59.0': + optional: true + + '@rollup/rollup-openharmony-arm64@4.59.0': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.59.0': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.59.0': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.59.0': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.59.0': + optional: true + + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 + + '@types/deep-eql@4.0.2': {} + + '@types/esrecurse@4.3.1': {} + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@vitejs/plugin-vue@6.0.4(vite@7.3.1(yaml@2.8.2))(vue@3.5.29)': + dependencies: + '@rolldown/pluginutils': 1.0.0-rc.2 + vite: 7.3.1(yaml@2.8.2) + vue: 3.5.29 + + '@vitest/expect@3.2.4': + dependencies: + '@types/chai': 5.2.3 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.3.3 + tinyrainbow: 2.0.0 + + '@vitest/mocker@3.2.4(vite@7.3.1(yaml@2.8.2))': + dependencies: + '@vitest/spy': 3.2.4 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(yaml@2.8.2) + + '@vitest/pretty-format@3.2.4': + dependencies: + tinyrainbow: 2.0.0 + + '@vitest/runner@3.2.4': + dependencies: + '@vitest/utils': 3.2.4 + pathe: 2.0.3 + strip-literal: 3.1.0 + + '@vitest/snapshot@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + magic-string: 0.30.21 + pathe: 2.0.3 + + '@vitest/spy@3.2.4': + dependencies: + tinyspy: 4.0.4 + + '@vitest/utils@3.2.4': + dependencies: + '@vitest/pretty-format': 3.2.4 + loupe: 3.2.1 + tinyrainbow: 2.0.0 + + '@vue-macros/common@3.1.2(vue@3.5.29)': + dependencies: + '@vue/compiler-sfc': 3.5.29 + ast-kit: 2.2.0 + local-pkg: 1.1.2 + magic-string-ast: 1.0.3 + unplugin-utils: 0.3.1 + optionalDependencies: + vue: 3.5.29 + + '@vue/compiler-core@3.5.29': + dependencies: + '@babel/parser': 7.29.0 + '@vue/shared': 3.5.29 + entities: 7.0.1 + estree-walker: 2.0.2 + source-map-js: 1.2.1 + + '@vue/compiler-dom@3.5.29': + dependencies: + '@vue/compiler-core': 3.5.29 + '@vue/shared': 3.5.29 + + '@vue/compiler-sfc@3.5.29': + dependencies: + '@babel/parser': 7.29.0 + '@vue/compiler-core': 3.5.29 + '@vue/compiler-dom': 3.5.29 + '@vue/compiler-ssr': 3.5.29 + '@vue/shared': 3.5.29 + estree-walker: 2.0.2 + magic-string: 0.30.21 + postcss: 8.5.8 + source-map-js: 1.2.1 + + '@vue/compiler-ssr@3.5.29': + dependencies: + '@vue/compiler-dom': 3.5.29 + '@vue/shared': 3.5.29 + + '@vue/devtools-api@7.7.9': + dependencies: + '@vue/devtools-kit': 7.7.9 + + '@vue/devtools-api@8.0.7': + dependencies: + '@vue/devtools-kit': 8.0.7 + + '@vue/devtools-kit@7.7.9': + dependencies: + '@vue/devtools-shared': 7.7.9 + birpc: 2.9.0 + hookable: 5.5.3 + mitt: 3.0.1 + perfect-debounce: 1.0.0 + speakingurl: 14.0.1 + superjson: 2.2.6 + + '@vue/devtools-kit@8.0.7': + dependencies: + '@vue/devtools-shared': 8.0.7 + birpc: 2.9.0 + hookable: 5.5.3 + perfect-debounce: 2.1.0 + + '@vue/devtools-shared@7.7.9': + dependencies: + rfdc: 1.4.1 + + '@vue/devtools-shared@8.0.7': {} + + '@vue/reactivity@3.5.29': + dependencies: + '@vue/shared': 3.5.29 + + '@vue/runtime-core@3.5.29': + dependencies: + '@vue/reactivity': 3.5.29 + '@vue/shared': 3.5.29 + + '@vue/runtime-dom@3.5.29': + dependencies: + '@vue/reactivity': 3.5.29 + '@vue/runtime-core': 3.5.29 + '@vue/shared': 3.5.29 + csstype: 3.2.3 + + '@vue/server-renderer@3.5.29(vue@3.5.29)': + dependencies: + '@vue/compiler-ssr': 3.5.29 + '@vue/shared': 3.5.29 + vue: 3.5.29 + + '@vue/shared@3.5.29': {} + + acorn-jsx@5.3.2(acorn@8.16.0): + dependencies: + acorn: 8.16.0 + + acorn@8.16.0: {} + + agent-base@7.1.4: {} + + ajv@6.14.0: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + assertion-error@2.0.1: {} + + ast-kit@2.2.0: + dependencies: + '@babel/parser': 7.29.0 + pathe: 2.0.3 + + ast-walker-scope@0.8.3: + dependencies: + '@babel/parser': 7.29.0 + ast-kit: 2.2.0 + + axe-core@4.11.1: {} + + balanced-match@4.0.4: {} + + birpc@2.9.0: {} + + boolbase@1.0.0: {} + + brace-expansion@5.0.4: + dependencies: + balanced-match: 4.0.4 + + cac@6.7.14: {} + + chai@5.3.3: + dependencies: + assertion-error: 2.0.1 + check-error: 2.1.3 + deep-eql: 5.0.2 + loupe: 3.2.1 + pathval: 2.0.1 + + check-error@2.1.3: {} + + chokidar@5.0.0: + dependencies: + readdirp: 5.0.0 + + confbox@0.1.8: {} + + confbox@0.2.4: {} + + copy-anything@4.0.5: + dependencies: + is-what: 5.5.0 + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + cssesc@3.0.0: {} + + cssstyle@4.6.0: + dependencies: + '@asamuzakjp/css-color': 3.2.0 + rrweb-cssom: 0.8.0 + + csstype@3.2.3: {} + + data-urls@5.0.0: + dependencies: + whatwg-mimetype: 4.0.0 + whatwg-url: 14.2.0 + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + decimal.js@10.6.0: {} + + deep-eql@5.0.2: {} + + deep-is@0.1.4: {} + + echarts@6.0.0: + dependencies: + tslib: 2.3.0 + zrender: 6.0.0 + + entities@6.0.1: {} + + entities@7.0.1: {} + + es-module-lexer@1.7.0: {} + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + escape-string-regexp@4.0.0: {} + + eslint-config-prettier@10.1.8(eslint@10.0.2): + dependencies: + eslint: 10.0.2 + + eslint-plugin-vue@10.8.0(eslint@10.0.2)(vue-eslint-parser@10.4.0(eslint@10.0.2)): + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@10.0.2) + eslint: 10.0.2 + natural-compare: 1.4.0 + nth-check: 2.1.1 + postcss-selector-parser: 7.1.1 + semver: 7.7.4 + vue-eslint-parser: 10.4.0(eslint@10.0.2) + xml-name-validator: 4.0.0 + + eslint-scope@9.1.1: + dependencies: + '@types/esrecurse': 4.3.1 + '@types/estree': 1.0.8 + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@5.0.1: {} + + eslint@10.0.2: + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@10.0.2) + '@eslint-community/regexpp': 4.12.2 + '@eslint/config-array': 0.23.2 + '@eslint/config-helpers': 0.5.2 + '@eslint/core': 1.1.0 + '@eslint/plugin-kit': 0.6.0 + '@humanfs/node': 0.16.7 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.3 + '@types/estree': 1.0.8 + ajv: 6.14.0 + cross-spawn: 7.0.6 + debug: 4.4.3 + escape-string-regexp: 4.0.0 + eslint-scope: 9.1.1 + eslint-visitor-keys: 5.0.1 + espree: 11.1.1 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + minimatch: 10.2.4 + natural-compare: 1.4.0 + optionator: 0.9.4 + transitivePeerDependencies: + - supports-color + + espree@11.1.1: + dependencies: + acorn: 8.16.0 + acorn-jsx: 5.3.2(acorn@8.16.0) + eslint-visitor-keys: 5.0.1 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + estree-walker@2.0.2: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + esutils@2.0.3: {} + + expect-type@1.3.0: {} + + exsolve@1.0.8: {} + + fast-deep-equal@3.1.3: {} + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.4 + keyv: 4.5.4 + + flatted@3.3.4: {} + + fsevents@2.3.2: + optional: true + + fsevents@2.3.3: + optional: true + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + hookable@5.5.3: {} + + html-encoding-sniffer@4.0.0: + dependencies: + whatwg-encoding: 3.1.1 + + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + + ignore@5.3.2: {} + + imurmurhash@0.1.4: {} + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-potential-custom-element-name@1.0.1: {} + + is-what@5.5.0: {} + + isexe@2.0.0: {} + + js-tokens@9.0.1: {} + + jsdom@26.1.0: + dependencies: + cssstyle: 4.6.0 + data-urls: 5.0.0 + decimal.js: 10.6.0 + html-encoding-sniffer: 4.0.0 + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + is-potential-custom-element-name: 1.0.1 + nwsapi: 2.2.23 + parse5: 7.3.0 + rrweb-cssom: 0.8.0 + saxes: 6.0.0 + symbol-tree: 3.2.4 + tough-cookie: 5.1.2 + w3c-xmlserializer: 5.0.0 + webidl-conversions: 7.0.0 + whatwg-encoding: 3.1.1 + whatwg-mimetype: 4.0.0 + whatwg-url: 14.2.0 + ws: 8.19.0 + xml-name-validator: 5.0.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + jsesc@3.1.0: {} + + json-buffer@3.0.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json5@2.2.3: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + local-pkg@1.1.2: + dependencies: + mlly: 1.8.0 + pkg-types: 2.3.0 + quansync: 0.2.11 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + loupe@3.2.1: {} + + lru-cache@10.4.3: {} + + magic-string-ast@1.0.3: + dependencies: + magic-string: 0.30.21 + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + minimatch@10.2.4: + dependencies: + brace-expansion: 5.0.4 + + mitt@3.0.1: {} + + mlly@1.8.0: + dependencies: + acorn: 8.16.0 + pathe: 2.0.3 + pkg-types: 1.3.1 + ufo: 1.6.3 + + ms@2.1.3: {} + + muggle-string@0.4.1: {} + + nanoid@3.3.11: {} + + natural-compare@1.4.0: {} + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + nwsapi@2.2.23: {} + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parse5@7.3.0: + dependencies: + entities: 6.0.1 + + path-exists@4.0.0: {} + + path-key@3.1.1: {} + + pathe@2.0.3: {} + + pathval@2.0.1: {} + + perfect-debounce@1.0.0: {} + + perfect-debounce@2.1.0: {} + + picocolors@1.1.1: {} + + picomatch@4.0.3: {} + + pinia@3.0.4(vue@3.5.29): + dependencies: + '@vue/devtools-api': 7.7.9 + vue: 3.5.29 + + pkg-types@1.3.1: + dependencies: + confbox: 0.1.8 + mlly: 1.8.0 + pathe: 2.0.3 + + pkg-types@2.3.0: + dependencies: + confbox: 0.2.4 + exsolve: 1.0.8 + pathe: 2.0.3 + + playwright-core@1.58.2: {} + + playwright@1.58.2: + dependencies: + playwright-core: 1.58.2 + optionalDependencies: + fsevents: 2.3.2 + + postcss-selector-parser@7.1.1: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss@8.5.8: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.2.1: {} + + prettier@3.8.1: {} + + punycode@2.3.1: {} + + quansync@0.2.11: {} + + readdirp@5.0.0: {} + + rfdc@1.4.1: {} + + rollup@4.59.0: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.59.0 + '@rollup/rollup-android-arm64': 4.59.0 + '@rollup/rollup-darwin-arm64': 4.59.0 + '@rollup/rollup-darwin-x64': 4.59.0 + '@rollup/rollup-freebsd-arm64': 4.59.0 + '@rollup/rollup-freebsd-x64': 4.59.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.59.0 + '@rollup/rollup-linux-arm-musleabihf': 4.59.0 + '@rollup/rollup-linux-arm64-gnu': 4.59.0 + '@rollup/rollup-linux-arm64-musl': 4.59.0 + '@rollup/rollup-linux-loong64-gnu': 4.59.0 + '@rollup/rollup-linux-loong64-musl': 4.59.0 + '@rollup/rollup-linux-ppc64-gnu': 4.59.0 + '@rollup/rollup-linux-ppc64-musl': 4.59.0 + '@rollup/rollup-linux-riscv64-gnu': 4.59.0 + '@rollup/rollup-linux-riscv64-musl': 4.59.0 + '@rollup/rollup-linux-s390x-gnu': 4.59.0 + '@rollup/rollup-linux-x64-gnu': 4.59.0 + '@rollup/rollup-linux-x64-musl': 4.59.0 + '@rollup/rollup-openbsd-x64': 4.59.0 + '@rollup/rollup-openharmony-arm64': 4.59.0 + '@rollup/rollup-win32-arm64-msvc': 4.59.0 + '@rollup/rollup-win32-ia32-msvc': 4.59.0 + '@rollup/rollup-win32-x64-gnu': 4.59.0 + '@rollup/rollup-win32-x64-msvc': 4.59.0 + fsevents: 2.3.3 + + rrweb-cssom@0.8.0: {} + + safer-buffer@2.1.2: {} + + saxes@6.0.0: + dependencies: + xmlchars: 2.2.0 + + scule@1.3.0: {} + + semver@7.7.4: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + siginfo@2.0.0: {} + + source-map-js@1.2.1: {} + + speakingurl@14.0.1: {} + + stackback@0.0.2: {} + + std-env@3.10.0: {} + + strip-literal@3.1.0: + dependencies: + js-tokens: 9.0.1 + + superjson@2.2.6: + dependencies: + copy-anything: 4.0.5 + + symbol-tree@3.2.4: {} + + tinybench@2.9.0: {} + + tinyexec@0.3.2: {} + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + tinypool@1.1.1: {} + + tinyrainbow@2.0.0: {} + + tinyspy@4.0.4: {} + + tldts-core@6.1.86: {} + + tldts@6.1.86: + dependencies: + tldts-core: 6.1.86 + + tough-cookie@5.1.2: + dependencies: + tldts: 6.1.86 + + tr46@5.1.1: + dependencies: + punycode: 2.3.1 + + tslib@2.3.0: {} + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + ufo@1.6.3: {} + + unplugin-utils@0.3.1: + dependencies: + pathe: 2.0.3 + picomatch: 4.0.3 + + unplugin@3.0.0: + dependencies: + '@jridgewell/remapping': 2.3.5 + picomatch: 4.0.3 + webpack-virtual-modules: 0.6.2 + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + util-deprecate@1.0.2: {} + + vite-node@3.2.4(yaml@2.8.2): + dependencies: + cac: 6.7.14 + debug: 4.4.3 + es-module-lexer: 1.7.0 + pathe: 2.0.3 + vite: 7.3.1(yaml@2.8.2) + transitivePeerDependencies: + - '@types/node' + - jiti + - less + - lightningcss + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + - tsx + - yaml + + vite@7.3.1(yaml@2.8.2): + dependencies: + esbuild: 0.27.3 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.8 + rollup: 4.59.0 + tinyglobby: 0.2.15 + optionalDependencies: + fsevents: 2.3.3 + yaml: 2.8.2 + + vitest@3.2.4(jsdom@26.1.0)(yaml@2.8.2): + dependencies: + '@types/chai': 5.2.3 + '@vitest/expect': 3.2.4 + '@vitest/mocker': 3.2.4(vite@7.3.1(yaml@2.8.2)) + '@vitest/pretty-format': 3.2.4 + '@vitest/runner': 3.2.4 + '@vitest/snapshot': 3.2.4 + '@vitest/spy': 3.2.4 + '@vitest/utils': 3.2.4 + chai: 5.3.3 + debug: 4.4.3 + expect-type: 1.3.0 + magic-string: 0.30.21 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 0.3.2 + tinyglobby: 0.2.15 + tinypool: 1.1.1 + tinyrainbow: 2.0.0 + vite: 7.3.1(yaml@2.8.2) + vite-node: 3.2.4(yaml@2.8.2) + why-is-node-running: 2.3.0 + optionalDependencies: + jsdom: 26.1.0 + transitivePeerDependencies: + - jiti + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + - tsx + - yaml + + vue-echarts@8.0.1(echarts@6.0.0)(vue@3.5.29): + dependencies: + echarts: 6.0.0 + vue: 3.5.29 + + vue-eslint-parser@10.4.0(eslint@10.0.2): + dependencies: + debug: 4.4.3 + eslint: 10.0.2 + eslint-scope: 9.1.1 + eslint-visitor-keys: 5.0.1 + espree: 11.1.1 + esquery: 1.7.0 + semver: 7.7.4 + transitivePeerDependencies: + - supports-color + + vue-router@5.0.3(@vue/compiler-sfc@3.5.29)(pinia@3.0.4(vue@3.5.29))(vue@3.5.29): + dependencies: + '@babel/generator': 7.29.1 + '@vue-macros/common': 3.1.2(vue@3.5.29) + '@vue/devtools-api': 8.0.7 + ast-walker-scope: 0.8.3 + chokidar: 5.0.0 + json5: 2.2.3 + local-pkg: 1.1.2 + magic-string: 0.30.21 + mlly: 1.8.0 + muggle-string: 0.4.1 + pathe: 2.0.3 + picomatch: 4.0.3 + scule: 1.3.0 + tinyglobby: 0.2.15 + unplugin: 3.0.0 + unplugin-utils: 0.3.1 + vue: 3.5.29 + yaml: 2.8.2 + optionalDependencies: + '@vue/compiler-sfc': 3.5.29 + pinia: 3.0.4(vue@3.5.29) + + vue@3.5.29: + dependencies: + '@vue/compiler-dom': 3.5.29 + '@vue/compiler-sfc': 3.5.29 + '@vue/runtime-dom': 3.5.29 + '@vue/server-renderer': 3.5.29(vue@3.5.29) + '@vue/shared': 3.5.29 + + w3c-xmlserializer@5.0.0: + dependencies: + xml-name-validator: 5.0.0 + + webidl-conversions@7.0.0: {} + + webpack-virtual-modules@0.6.2: {} + + whatwg-encoding@3.1.1: + dependencies: + iconv-lite: 0.6.3 + + whatwg-mimetype@4.0.0: {} + + whatwg-url@14.2.0: + dependencies: + tr46: 5.1.1 + webidl-conversions: 7.0.0 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + + word-wrap@1.2.5: {} + + ws@8.19.0: {} + + xml-name-validator@4.0.0: {} + + xml-name-validator@5.0.0: {} + + xmlchars@2.2.0: {} + + yaml@2.8.2: {} + + yocto-queue@0.1.0: {} + + zrender@6.0.0: + dependencies: + tslib: 2.3.0 diff --git a/admin/ui/src/App.vue b/admin/ui/src/App.vue new file mode 100644 index 0000000..6ce1078 --- /dev/null +++ b/admin/ui/src/App.vue @@ -0,0 +1,14 @@ + + + diff --git a/admin/ui/src/assets/global.css b/admin/ui/src/assets/global.css new file mode 100644 index 0000000..9567398 --- /dev/null +++ b/admin/ui/src/assets/global.css @@ -0,0 +1,40 @@ +*, +*::before, +*::after { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +html { + font-size: 16px; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +body { + font-family: var(--font-family); + font-size: var(--font-size-sm); + line-height: var(--line-height-normal); + color: var(--color-text-primary); + background-color: var(--color-bg-primary); +} + +a { + color: var(--color-accent); + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +button { + cursor: pointer; + font-family: inherit; +} + +:focus-visible { + outline: 2px solid var(--color-accent); + outline-offset: 2px; +} diff --git a/admin/ui/src/assets/variables.css b/admin/ui/src/assets/variables.css new file mode 100644 index 0000000..9d85650 --- /dev/null +++ b/admin/ui/src/assets/variables.css @@ -0,0 +1,88 @@ +:root { + /* Colors — Background */ + --color-bg-primary: #f8f9fc; + --color-bg-surface: #ffffff; + --color-bg-sidebar: #0f1729; + --color-bg-sidebar-hover: #1a2340; + --color-bg-sidebar-active: rgba(59, 130, 246, 0.15); + + /* Colors — Accent (AA contrast on both --color-bg-surface and --color-bg-primary) */ + --color-accent: #2563eb; + --color-accent-light: #dbeafe; + --color-accent-hover: #1d4ed8; + + /* Colors — Text (all meet WCAG AA 4.5:1 on --color-bg-surface) */ + --color-text-primary: #111827; + --color-text-secondary: #4b5563; + --color-text-tertiary: #6b7280; + --color-text-sidebar: #94a3b8; + --color-text-sidebar-active: #ffffff; + --color-text-inverse: #ffffff; + + /* Colors — Status */ + --color-success: #047857; + --color-success-bg: #ecfdf5; + --color-success-border: #a7f3d0; + --color-warning: #f59e0b; + --color-warning-bg: #fffbeb; + --color-warning-border: #fde68a; + --color-error: #dc2626; + --color-error-bg: #fef2f2; + --color-error-hover: #b91c1c; + --color-error-border: #fecaca; + --color-purple: #7c3aed; + --color-purple-light: #f3e8ff; + + /* Colors — Border */ + --color-border: #e5e7eb; + --color-border-light: #f3f4f6; + + /* Typography */ + --font-family: + -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Inter', Roboto, sans-serif; + --font-family-mono: 'SF Mono', 'Fira Code', 'Cascadia Code', monospace; + + --font-size-xs: 0.6875rem; /* 11px */ + --font-size-sm: 0.8125rem; /* 13px */ + --font-size-base: 0.875rem; /* 14px */ + --font-size-md: 0.9375rem; /* 15px */ + --font-size-lg: 1.125rem; /* 18px */ + --font-size-xl: 1.375rem; /* 22px */ + --font-size-2xl: 1.75rem; /* 28px */ + + --font-weight-normal: 400; + --font-weight-medium: 500; + --font-weight-semibold: 600; + --font-weight-bold: 700; + + --line-height-tight: 1.4; + --line-height-normal: 1.5; + --line-height-relaxed: 1.6; + + /* Spacing */ + --space-1: 0.25rem; /* 4px */ + --space-2: 0.5rem; /* 8px */ + --space-3: 0.75rem; /* 12px */ + --space-4: 1rem; /* 16px */ + --space-5: 1.25rem; /* 20px */ + --space-6: 1.5rem; /* 24px */ + --space-8: 2rem; /* 32px */ + + /* Border Radius */ + --radius-sm: 4px; + --radius-md: 6px; + --radius-lg: 8px; + --radius-xl: 12px; + --radius-2xl: 16px; + + /* Shadows */ + --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05); + --shadow-md: + 0 4px 6px -1px rgba(0, 0, 0, 0.07), 0 2px 4px -1px rgba(0, 0, 0, 0.04); + --shadow-lg: + 0 10px 15px -3px rgba(0, 0, 0, 0.08), 0 4px 6px -2px rgba(0, 0, 0, 0.04); + --shadow-modal: 0 25px 50px -12px rgba(0, 0, 0, 0.25); + + /* Layout */ + --sidebar-width: 240px; +} diff --git a/admin/ui/src/components/AddInstanceModal.vue b/admin/ui/src/components/AddInstanceModal.vue new file mode 100644 index 0000000..664bc97 --- /dev/null +++ b/admin/ui/src/components/AddInstanceModal.vue @@ -0,0 +1,200 @@ + + + + + diff --git a/admin/ui/src/components/BaseButton.vue b/admin/ui/src/components/BaseButton.vue new file mode 100644 index 0000000..de08318 --- /dev/null +++ b/admin/ui/src/components/BaseButton.vue @@ -0,0 +1,101 @@ + + + + + diff --git a/admin/ui/src/components/BaseCard.vue b/admin/ui/src/components/BaseCard.vue new file mode 100644 index 0000000..1cc00a7 --- /dev/null +++ b/admin/ui/src/components/BaseCard.vue @@ -0,0 +1,34 @@ + + + + + diff --git a/admin/ui/src/components/BaseEmptyState.vue b/admin/ui/src/components/BaseEmptyState.vue new file mode 100644 index 0000000..2a92ff5 --- /dev/null +++ b/admin/ui/src/components/BaseEmptyState.vue @@ -0,0 +1,55 @@ + + + + + diff --git a/admin/ui/src/components/BaseInput.vue b/admin/ui/src/components/BaseInput.vue new file mode 100644 index 0000000..730f89c --- /dev/null +++ b/admin/ui/src/components/BaseInput.vue @@ -0,0 +1,127 @@ + + + + + diff --git a/admin/ui/src/components/ConfirmDialog.vue b/admin/ui/src/components/ConfirmDialog.vue new file mode 100644 index 0000000..de94d9c --- /dev/null +++ b/admin/ui/src/components/ConfirmDialog.vue @@ -0,0 +1,108 @@ + + + + + diff --git a/admin/ui/src/components/FleetKpiPanel.vue b/admin/ui/src/components/FleetKpiPanel.vue new file mode 100644 index 0000000..d0f38e7 --- /dev/null +++ b/admin/ui/src/components/FleetKpiPanel.vue @@ -0,0 +1,78 @@ + + + + + diff --git a/admin/ui/src/components/InstanceCard.vue b/admin/ui/src/components/InstanceCard.vue new file mode 100644 index 0000000..26ffcf2 --- /dev/null +++ b/admin/ui/src/components/InstanceCard.vue @@ -0,0 +1,138 @@ + + + + + diff --git a/admin/ui/src/components/InstanceTable.vue b/admin/ui/src/components/InstanceTable.vue new file mode 100644 index 0000000..3f9352d --- /dev/null +++ b/admin/ui/src/components/InstanceTable.vue @@ -0,0 +1,138 @@ + + + + + diff --git a/admin/ui/src/components/KpiCard.vue b/admin/ui/src/components/KpiCard.vue new file mode 100644 index 0000000..fd0f0f4 --- /dev/null +++ b/admin/ui/src/components/KpiCard.vue @@ -0,0 +1,120 @@ + + + + + diff --git a/admin/ui/src/components/LoadingSpinner.vue b/admin/ui/src/components/LoadingSpinner.vue new file mode 100644 index 0000000..2a30019 --- /dev/null +++ b/admin/ui/src/components/LoadingSpinner.vue @@ -0,0 +1,75 @@ + + + + + diff --git a/admin/ui/src/components/OverviewTab.vue b/admin/ui/src/components/OverviewTab.vue new file mode 100644 index 0000000..712a53f --- /dev/null +++ b/admin/ui/src/components/OverviewTab.vue @@ -0,0 +1,199 @@ + + + + + diff --git a/admin/ui/src/components/StatusIndicator.vue b/admin/ui/src/components/StatusIndicator.vue new file mode 100644 index 0000000..87d33fa --- /dev/null +++ b/admin/ui/src/components/StatusIndicator.vue @@ -0,0 +1,85 @@ + + + + + diff --git a/admin/ui/src/components/TrafficTab.vue b/admin/ui/src/components/TrafficTab.vue new file mode 100644 index 0000000..c22bc38 --- /dev/null +++ b/admin/ui/src/components/TrafficTab.vue @@ -0,0 +1,385 @@ + + + + + diff --git a/admin/ui/src/components/VendorTable.vue b/admin/ui/src/components/VendorTable.vue new file mode 100644 index 0000000..a24bd3d --- /dev/null +++ b/admin/ui/src/components/VendorTable.vue @@ -0,0 +1,222 @@ + + + + + diff --git a/admin/ui/src/composables/useAnimatedValue.js b/admin/ui/src/composables/useAnimatedValue.js new file mode 100644 index 0000000..97e5d41 --- /dev/null +++ b/admin/ui/src/composables/useAnimatedValue.js @@ -0,0 +1,48 @@ +import { ref, watch, toValue, onUnmounted } from 'vue'; + +export function easeOutCubic(t) { + return 1 - Math.pow(1 - t, 3); +} + +export function useAnimatedValue(source, duration = 400) { + const display = ref(toValue(source) ?? 0); + let frameId = null; + let startTime = 0; + let startVal = 0; + let endVal = 0; + + function tick(timestamp) { + if (startTime === 0) startTime = timestamp; + const progress = Math.min((timestamp - startTime) / duration, 1); + display.value = startVal + (endVal - startVal) * easeOutCubic(progress); + + if (progress < 1) { + frameId = requestAnimationFrame(tick); + } else { + frameId = null; + } + } + + watch( + () => toValue(source), + (newVal) => { + if (newVal == null) { + if (frameId != null) cancelAnimationFrame(frameId); + frameId = null; + display.value = 0; + return; + } + if (frameId != null) cancelAnimationFrame(frameId); + startVal = display.value; + endVal = newVal; + startTime = 0; + frameId = requestAnimationFrame(tick); + }, + ); + + onUnmounted(() => { + if (frameId != null) cancelAnimationFrame(frameId); + }); + + return display; +} diff --git a/admin/ui/src/composables/useAnimatedValue.test.js b/admin/ui/src/composables/useAnimatedValue.test.js new file mode 100644 index 0000000..5fe8c83 --- /dev/null +++ b/admin/ui/src/composables/useAnimatedValue.test.js @@ -0,0 +1,144 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { ref, nextTick } from 'vue'; +import { withSetup } from '../utils/test-utils.js'; +import { easeOutCubic, useAnimatedValue } from './useAnimatedValue.js'; + +describe('easeOutCubic', () => { + it('returns 0 at start', () => { + expect(easeOutCubic(0)).toBe(0); + }); + + it('returns 1 at end', () => { + expect(easeOutCubic(1)).toBe(1); + }); + + it('decelerates — first half covers more than 50%', () => { + expect(easeOutCubic(0.5)).toBeGreaterThan(0.5); + }); + + it('is monotonically increasing', () => { + let prev = 0; + for (let t = 0.1; t <= 1; t += 0.1) { + const val = easeOutCubic(t); + expect(val).toBeGreaterThan(prev); + prev = val; + } + }); +}); + +describe('useAnimatedValue', () => { + let rafCallbacks; + let rafId; + + beforeEach(() => { + rafCallbacks = []; + rafId = 0; + vi.spyOn(window, 'requestAnimationFrame').mockImplementation((cb) => { + rafCallbacks.push(cb); + return ++rafId; + }); + vi.spyOn(window, 'cancelAnimationFrame').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + function flushFrames(timestamp) { + const pending = [...rafCallbacks]; + rafCallbacks = []; + pending.forEach((cb) => cb(timestamp)); + } + + it('starts with the initial source value', () => { + const source = ref(100); + const { result } = withSetup(() => useAnimatedValue(source)); + expect(result.value).toBe(100); + }); + + it('starts at 0 for null source', () => { + const source = ref(null); + const { result } = withSetup(() => useAnimatedValue(source)); + expect(result.value).toBe(0); + }); + + it('schedules animation when source changes', async () => { + const source = ref(100); + withSetup(() => useAnimatedValue(source)); + + source.value = 200; + await nextTick(); + + expect(window.requestAnimationFrame).toHaveBeenCalled(); + }); + + it('reaches target value after full duration', async () => { + const duration = 400; + const source = ref(100); + const { result } = withSetup(() => useAnimatedValue(source, duration)); + + source.value = 200; + await nextTick(); + + // First frame sets startTime + flushFrames(1000); + // Frame past duration + flushFrames(1000 + duration + 1); + + expect(result.value).toBe(200); + }); + + it('shows intermediate value mid-animation', async () => { + const duration = 400; + const source = ref(0); + const { result } = withSetup(() => useAnimatedValue(source, duration)); + + source.value = 100; + await nextTick(); + + // First frame at t=0 + flushFrames(1000); + // Frame at t=200 (50% duration) + flushFrames(1200); + + expect(result.value).toBeGreaterThan(0); + expect(result.value).toBeLessThan(100); + }); + + it('resets to 0 when source becomes null', async () => { + const source = ref(100); + const { result } = withSetup(() => useAnimatedValue(source)); + + source.value = null; + await nextTick(); + + expect(result.value).toBe(0); + }); + + it('interrupts running animation on new value', async () => { + const duration = 400; + const source = ref(0); + const { result } = withSetup(() => useAnimatedValue(source, duration)); + + source.value = 100; + await nextTick(); + flushFrames(1000); + flushFrames(1100); // partial animation + + const midValue = result.value; + expect(midValue).toBeGreaterThan(0); + expect(midValue).toBeLessThan(100); + + // Change target mid-animation + source.value = 50; + await nextTick(); + + expect(window.cancelAnimationFrame).toHaveBeenCalled(); + + // Complete the new animation + flushFrames(2000); + flushFrames(2000 + duration + 1); + + expect(result.value).toBe(50); + }); +}); diff --git a/admin/ui/src/composables/useAuditLog.js b/admin/ui/src/composables/useAuditLog.js new file mode 100644 index 0000000..f4c2234 --- /dev/null +++ b/admin/ui/src/composables/useAuditLog.js @@ -0,0 +1,81 @@ +import { ref, computed, watch } from 'vue'; +import { buildAuditQueryString, totalPages } from '../utils/audit.js'; + +export function useAuditLog(api) { + const items = ref([]); + const total = ref(0); + const loading = ref(false); + const error = ref(null); + + const filters = ref({ + q: '', + action: '', + from: '', + to: '', + page: 1, + perPage: 20, + }); + + let fetchId = 0; + + async function fetch() { + const id = ++fetchId; + loading.value = true; + error.value = null; + try { + const qs = buildAuditQueryString(filters.value); + const data = await api.get(`/api/audit${qs}`); + if (id !== fetchId) return; + items.value = data.items; + total.value = data.total; + } catch (err) { + if (id !== fetchId) return; + error.value = err.message || 'Failed to load audit log'; + items.value = []; + total.value = 0; + } finally { + if (id === fetchId) loading.value = false; + } + } + + function setFilter(key, value) { + filters.value = { ...filters.value, [key]: value, page: 1 }; + } + + function setPage(page) { + const max = totalPages(total.value, filters.value.perPage); + filters.value = { + ...filters.value, + page: Math.max(1, Math.min(page, max)), + }; + } + + function nextPage() { + setPage(filters.value.page + 1); + } + + function prevPage() { + setPage(filters.value.page - 1); + } + + const pageCount = computed(() => + totalPages(total.value, filters.value.perPage), + ); + + // Refetch when filters change. + watch(filters, () => fetch(), { deep: true }); + + return { + items, + total, + loading, + error, + filters, + fetch, + setFilter, + setPage, + nextPage, + prevPage, + pageCount, + }; +} diff --git a/admin/ui/src/composables/useAuditLog.test.js b/admin/ui/src/composables/useAuditLog.test.js new file mode 100644 index 0000000..c673eda --- /dev/null +++ b/admin/ui/src/composables/useAuditLog.test.js @@ -0,0 +1,179 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { nextTick } from 'vue'; +import { withSetup } from '../utils/test-utils.js'; +import { useAuditLog } from './useAuditLog.js'; + +function makeApi(response) { + return { + get: vi + .fn() + .mockResolvedValue(response ?? { items: [], total: 0, page: 1 }), + }; +} + +function makeMockData(count = 3) { + return { + items: Array.from({ length: count }, (_, i) => ({ + id: i + 1, + user_id: 1, + user: 'admin', + action: 'instance.create', + instance_id: i + 10, + detail: `Created instance ${i + 1}`, + created_at: '2026-03-09T12:00:00Z', + })), + total: count, + page: 1, + }; +} + +describe('useAuditLog', () => { + let api; + + beforeEach(() => { + api = makeApi(makeMockData()); + }); + + it('initializes with empty state', () => { + const { result } = withSetup(() => useAuditLog(api)); + expect(result.items.value).toEqual([]); + expect(result.total.value).toBe(0); + expect(result.loading.value).toBe(false); + expect(result.error.value).toBeNull(); + }); + + it('fetches audit entries from API', async () => { + const { result } = withSetup(() => useAuditLog(api)); + await result.fetch(); + expect(api.get).toHaveBeenCalledWith('/api/audit'); + expect(result.items.value).toHaveLength(3); + expect(result.total.value).toBe(3); + }); + + it('sets loading state during fetch', async () => { + let resolvePromise; + api.get = vi.fn( + () => + new Promise((resolve) => { + resolvePromise = resolve; + }), + ); + const { result } = withSetup(() => useAuditLog(api)); + + const fetchPromise = result.fetch(); + expect(result.loading.value).toBe(true); + + resolvePromise({ items: [], total: 0, page: 1 }); + await fetchPromise; + expect(result.loading.value).toBe(false); + }); + + it('sets error on API failure', async () => { + api.get = vi.fn().mockRejectedValue(new Error('Network error')); + const { result } = withSetup(() => useAuditLog(api)); + + await result.fetch(); + expect(result.error.value).toBe('Network error'); + expect(result.items.value).toEqual([]); + }); + + it('builds query string from filters', async () => { + const { result } = withSetup(() => useAuditLog(api)); + result.setFilter('action', 'user.login'); + await nextTick(); + // Wait for the watcher-triggered fetch + await vi.waitFor(() => { + expect(api.get).toHaveBeenCalledWith( + expect.stringContaining('action=user.login'), + ); + }); + }); + + it('resets page to 1 when setting a filter', () => { + const { result } = withSetup(() => useAuditLog(api)); + result.filters.value.page = 3; + result.setFilter('q', 'test'); + expect(result.filters.value.page).toBe(1); + }); + + it('navigates pages with nextPage/prevPage', async () => { + api = makeApi({ items: [], total: 60, page: 1 }); + const { result } = withSetup(() => useAuditLog(api)); + await result.fetch(); + + result.nextPage(); + expect(result.filters.value.page).toBe(2); + + result.nextPage(); + expect(result.filters.value.page).toBe(3); + + result.prevPage(); + expect(result.filters.value.page).toBe(2); + }); + + it('clamps page within valid range', async () => { + api = makeApi({ items: [], total: 40, page: 1 }); + const { result } = withSetup(() => useAuditLog(api)); + await result.fetch(); + + result.prevPage(); + expect(result.filters.value.page).toBe(1); + + result.setPage(999); + expect(result.filters.value.page).toBe(2); // total 40, perPage 20 = 2 pages + }); + + it('computes pageCount correctly', async () => { + api = makeApi({ items: [], total: 45, page: 1 }); + const { result } = withSetup(() => useAuditLog(api)); + await result.fetch(); + expect(result.pageCount.value).toBe(3); + }); + + it('discards stale responses from overlapping fetches', async () => { + let resolveFirst; + let resolveSecond; + api.get = vi + .fn() + .mockImplementationOnce( + () => + new Promise((r) => { + resolveFirst = r; + }), + ) + .mockImplementationOnce( + () => + new Promise((r) => { + resolveSecond = r; + }), + ); + const { result } = withSetup(() => useAuditLog(api)); + + const first = result.fetch(); + const second = result.fetch(); + + // Resolve second (newer) first + resolveSecond({ items: [{ id: 2 }], total: 1, page: 1 }); + await second; + + // Resolve first (stale) after + resolveFirst({ items: [{ id: 1 }], total: 1, page: 1 }); + await first; + + // Should keep the second (newer) result + expect(result.items.value).toEqual([{ id: 2 }]); + }); + + it('refetches automatically when filters change', async () => { + const { result } = withSetup(() => useAuditLog(api)); + // Initial fetch + await result.fetch(); + api.get.mockClear(); + + result.setFilter('q', 'proxy'); + await nextTick(); + await vi.waitFor(() => { + expect(api.get).toHaveBeenCalled(); + }); + }); +}); diff --git a/admin/ui/src/composables/useConfirmDialog.js b/admin/ui/src/composables/useConfirmDialog.js new file mode 100644 index 0000000..c008974 --- /dev/null +++ b/admin/ui/src/composables/useConfirmDialog.js @@ -0,0 +1,21 @@ +import { ref } from 'vue'; + +export function useConfirmDialog() { + const pending = ref(null); + + function requestConfirm(item) { + pending.value = item; + } + + async function confirm(action) { + const item = pending.value; + pending.value = null; + if (item && action) await action(item); + } + + function cancel() { + pending.value = null; + } + + return { pending, requestConfirm, confirm, cancel }; +} diff --git a/admin/ui/src/composables/useConfirmDialog.test.js b/admin/ui/src/composables/useConfirmDialog.test.js new file mode 100644 index 0000000..9cebfd0 --- /dev/null +++ b/admin/ui/src/composables/useConfirmDialog.test.js @@ -0,0 +1,41 @@ +import { describe, it, expect, vi } from 'vitest'; +import { withSetup } from '../utils/test-utils.js'; +import { useConfirmDialog } from './useConfirmDialog.js'; + +describe('useConfirmDialog', () => { + it('starts with null pending', () => { + const { result } = withSetup(() => useConfirmDialog()); + expect(result.pending.value).toBeNull(); + }); + + it('requestConfirm sets pending to the item', () => { + const { result } = withSetup(() => useConfirmDialog()); + const item = { id: 1, name: 'proxy-1' }; + result.requestConfirm(item); + expect(result.pending.value).toStrictEqual(item); + }); + + it('confirm calls action with the pending item and clears it', async () => { + const { result } = withSetup(() => useConfirmDialog()); + const item = { id: 1 }; + const action = vi.fn(); + result.requestConfirm(item); + await result.confirm(action); + expect(action).toHaveBeenCalledWith(item); + expect(result.pending.value).toBeNull(); + }); + + it('confirm clears pending even without an action', async () => { + const { result } = withSetup(() => useConfirmDialog()); + result.requestConfirm({ id: 1 }); + await result.confirm(); + expect(result.pending.value).toBeNull(); + }); + + it('cancel clears pending without calling any action', () => { + const { result } = withSetup(() => useConfirmDialog()); + result.requestConfirm({ id: 1 }); + result.cancel(); + expect(result.pending.value).toBeNull(); + }); +}); diff --git a/admin/ui/src/composables/useFocusTrap.js b/admin/ui/src/composables/useFocusTrap.js new file mode 100644 index 0000000..c70a2f0 --- /dev/null +++ b/admin/ui/src/composables/useFocusTrap.js @@ -0,0 +1,40 @@ +import { onMounted, onUnmounted } from 'vue'; + +const FOCUSABLE = + 'a[href], button:not(:disabled), input:not(:disabled), select:not(:disabled), textarea:not(:disabled), [tabindex]:not([tabindex="-1"])'; + +export function useFocusTrap(containerRef) { + const previouslyFocused = document.activeElement; + + function handleKeydown(e) { + if (e.key !== 'Tab') return; + + const container = containerRef.value?.$el ?? containerRef.value; + if (!container) return; + + const focusable = [...container.querySelectorAll(FOCUSABLE)]; + if (focusable.length === 0) return; + + const first = focusable[0]; + const last = focusable[focusable.length - 1]; + + if (e.shiftKey && document.activeElement === first) { + e.preventDefault(); + last.focus(); + } else if (!e.shiftKey && document.activeElement === last) { + e.preventDefault(); + first.focus(); + } + } + + onMounted(() => { + document.addEventListener('keydown', handleKeydown); + }); + + onUnmounted(() => { + document.removeEventListener('keydown', handleKeydown); + if (previouslyFocused && document.contains(previouslyFocused)) { + previouslyFocused.focus(); + } + }); +} diff --git a/admin/ui/src/composables/useFocusTrap.test.js b/admin/ui/src/composables/useFocusTrap.test.js new file mode 100644 index 0000000..c9104c1 --- /dev/null +++ b/admin/ui/src/composables/useFocusTrap.test.js @@ -0,0 +1,134 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { ref } from 'vue'; +import { withSetup } from '../utils/test-utils.js'; +import { useFocusTrap } from './useFocusTrap.js'; + +describe('useFocusTrap', () => { + let container; + let btn1; + let btn2; + let btn3; + let app; + + beforeEach(() => { + container = document.createElement('div'); + btn1 = document.createElement('button'); + btn1.textContent = 'First'; + btn2 = document.createElement('button'); + btn2.textContent = 'Second'; + btn3 = document.createElement('button'); + btn3.textContent = 'Third'; + container.append(btn1, btn2, btn3); + document.body.appendChild(container); + }); + + afterEach(() => { + app?.unmount(); + container.remove(); + }); + + it('wraps focus from last to first on Tab', () => { + const containerRef = ref(container); + ({ app } = withSetup(() => useFocusTrap(containerRef))); + + btn3.focus(); + const event = new KeyboardEvent('keydown', { + key: 'Tab', + bubbles: true, + }); + let prevented = false; + event.preventDefault = () => { + prevented = true; + }; + document.dispatchEvent(event); + + expect(prevented).toBe(true); + }); + + it('wraps focus from first to last on Shift+Tab', () => { + const containerRef = ref(container); + ({ app } = withSetup(() => useFocusTrap(containerRef))); + + btn1.focus(); + const event = new KeyboardEvent('keydown', { + key: 'Tab', + shiftKey: true, + bubbles: true, + }); + let prevented = false; + event.preventDefault = () => { + prevented = true; + }; + document.dispatchEvent(event); + + expect(prevented).toBe(true); + }); + + it('does not trap non-Tab keys', () => { + const containerRef = ref(container); + ({ app } = withSetup(() => useFocusTrap(containerRef))); + + btn1.focus(); + const event = new KeyboardEvent('keydown', { + key: 'Escape', + bubbles: true, + }); + let prevented = false; + event.preventDefault = () => { + prevented = true; + }; + document.dispatchEvent(event); + + expect(prevented).toBe(false); + }); + + it('does not interfere when focus is in the middle', () => { + const containerRef = ref(container); + ({ app } = withSetup(() => useFocusTrap(containerRef))); + + btn2.focus(); + const event = new KeyboardEvent('keydown', { + key: 'Tab', + bubbles: true, + }); + let prevented = false; + event.preventDefault = () => { + prevented = true; + }; + document.dispatchEvent(event); + + expect(prevented).toBe(false); + }); + + it('restores focus to previously focused element on unmount', () => { + btn1.focus(); + expect(document.activeElement).toBe(btn1); + + const containerRef = ref(container); + ({ app } = withSetup(() => useFocusTrap(containerRef))); + + // Focus moves elsewhere during the trap's lifetime. + btn3.focus(); + expect(document.activeElement).toBe(btn3); + + app.unmount(); + app = null; + expect(document.activeElement).toBe(btn1); + }); + + it('skips restore when previously focused element was removed from DOM', () => { + btn1.focus(); + expect(document.activeElement).toBe(btn1); + + const containerRef = ref(container); + ({ app } = withSetup(() => useFocusTrap(containerRef))); + + // Simulate the trigger element being removed while the modal is open. + btn1.remove(); + + app.unmount(); + app = null; + // Should not throw; focus stays wherever it was. + expect(document.activeElement).not.toBe(btn1); + }); +}); diff --git a/admin/ui/src/composables/useInstanceForm.js b/admin/ui/src/composables/useInstanceForm.js new file mode 100644 index 0000000..3faafc1 --- /dev/null +++ b/admin/ui/src/composables/useInstanceForm.js @@ -0,0 +1,66 @@ +import { ref, reactive } from 'vue'; +import { validateInstanceForm } from '../utils/validation.js'; + +export function useInstanceForm(store, instance = null) { + const editing = !!instance; + const name = ref(instance?.name || ''); + const address = ref(instance?.address || ''); + const errors = reactive({ name: '', address: '' }); + const saving = ref(false); + const testing = ref(false); + const testResult = ref(null); + + function validate() { + const result = validateInstanceForm(name.value, address.value); + errors.name = result.name; + errors.address = result.address; + return !result.name && !result.address; + } + + async function handleTest() { + testResult.value = null; + testing.value = true; + try { + testResult.value = await store.testConnection(address.value.trim()); + } catch { + testResult.value = { ok: false, error: 'Failed to test connection' }; + } finally { + testing.value = false; + } + } + + async function handleSubmit() { + if (!validate()) return false; + saving.value = true; + try { + if (editing) { + await store.updateInstance( + instance.id, + name.value.trim(), + address.value.trim(), + ); + } else { + await store.createInstance(name.value.trim(), address.value.trim()); + } + return true; + } catch (e) { + errors.address = e.message; + return false; + } finally { + saving.value = false; + } + } + + return { + editing, + name, + address, + errors, + saving, + testing, + testResult, + validate, + handleTest, + handleSubmit, + }; +} diff --git a/admin/ui/src/composables/useInstanceForm.test.js b/admin/ui/src/composables/useInstanceForm.test.js new file mode 100644 index 0000000..bc71a30 --- /dev/null +++ b/admin/ui/src/composables/useInstanceForm.test.js @@ -0,0 +1,200 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { withSetup } from '../utils/test-utils.js'; +import { useInstanceForm } from './useInstanceForm.js'; + +function mockStore(overrides = {}) { + return { + testConnection: vi.fn().mockResolvedValue({ ok: true, version: '1.0.0' }), + createInstance: vi.fn().mockResolvedValue({ id: 1 }), + updateInstance: vi.fn().mockResolvedValue({ id: 1 }), + ...overrides, + }; +} + +describe('useInstanceForm', () => { + beforeEach(() => { + vi.restoreAllMocks(); + }); + + describe('initialization', () => { + it('starts empty for a new instance', () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + expect(result.editing).toBe(false); + expect(result.name.value).toBe(''); + expect(result.address.value).toBe(''); + expect(result.errors.name).toBe(''); + expect(result.errors.address).toBe(''); + }); + + it('populates from existing instance for editing', () => { + const store = mockStore(); + const { result } = withSetup(() => + useInstanceForm(store, { + id: 5, + name: 'proxy-1', + address: '10.0.0.1:9090', + }), + ); + expect(result.editing).toBe(true); + expect(result.name.value).toBe('proxy-1'); + expect(result.address.value).toBe('10.0.0.1:9090'); + }); + }); + + describe('validate', () => { + it('returns true and clears errors for valid inputs', () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + result.name.value = 'proxy-1'; + result.address.value = '10.0.0.1:9090'; + expect(result.validate()).toBe(true); + expect(result.errors.name).toBe(''); + expect(result.errors.address).toBe(''); + }); + + it('returns false and sets errors for empty inputs', () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + expect(result.validate()).toBe(false); + expect(result.errors.name).toBe('Name is required'); + expect(result.errors.address).toBe('Address is required'); + }); + }); + + describe('handleTest', () => { + it('sets testResult on successful connection', async () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + result.address.value = '10.0.0.1:9090'; + await result.handleTest(); + expect(store.testConnection).toHaveBeenCalledWith('10.0.0.1:9090'); + expect(result.testResult.value).toEqual({ ok: true, version: '1.0.0' }); + }); + + it('sets error testResult on network failure', async () => { + const store = mockStore({ + testConnection: vi.fn().mockRejectedValue(new Error('network')), + }); + const { result } = withSetup(() => useInstanceForm(store)); + result.address.value = '10.0.0.1:9090'; + await result.handleTest(); + expect(result.testResult.value).toEqual({ + ok: false, + error: 'Failed to test connection', + }); + }); + + it('manages testing flag during the request', async () => { + let resolve; + const store = mockStore({ + testConnection: vi + .fn() + .mockReturnValue(new Promise((r) => (resolve = r))), + }); + const { result } = withSetup(() => useInstanceForm(store)); + result.address.value = '10.0.0.1:9090'; + const p = result.handleTest(); + expect(result.testing.value).toBe(true); + resolve({ ok: true, version: '1.0.0' }); + await p; + expect(result.testing.value).toBe(false); + }); + + it('clears previous testResult before starting', async () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + result.testResult.value = { ok: true, version: 'old' }; + result.address.value = '10.0.0.1:9090'; + const p = result.handleTest(); + // testResult is null immediately after calling handleTest + expect(result.testResult.value).toBeNull(); + await p; + }); + }); + + describe('handleSubmit', () => { + it('returns false without calling store when validation fails', async () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + const ok = await result.handleSubmit(); + expect(ok).toBe(false); + expect(store.createInstance).not.toHaveBeenCalled(); + }); + + it('calls createInstance for new instance and returns true', async () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + result.name.value = 'proxy-1'; + result.address.value = '10.0.0.1:9090'; + const ok = await result.handleSubmit(); + expect(ok).toBe(true); + expect(store.createInstance).toHaveBeenCalledWith( + 'proxy-1', + '10.0.0.1:9090', + ); + }); + + it('calls updateInstance for existing instance and returns true', async () => { + const store = mockStore(); + const { result } = withSetup(() => + useInstanceForm(store, { + id: 5, + name: 'proxy-1', + address: '10.0.0.1:9090', + }), + ); + result.name.value = 'proxy-updated'; + const ok = await result.handleSubmit(); + expect(ok).toBe(true); + expect(store.updateInstance).toHaveBeenCalledWith( + 5, + 'proxy-updated', + '10.0.0.1:9090', + ); + }); + + it('sets address error on API failure and returns false', async () => { + const store = mockStore({ + createInstance: vi + .fn() + .mockRejectedValue(new Error('Address already registered')), + }); + const { result } = withSetup(() => useInstanceForm(store)); + result.name.value = 'proxy-1'; + result.address.value = '10.0.0.1:9090'; + const ok = await result.handleSubmit(); + expect(ok).toBe(false); + expect(result.errors.address).toBe('Address already registered'); + }); + + it('manages saving flag during the request', async () => { + let resolve; + const store = mockStore({ + createInstance: vi + .fn() + .mockReturnValue(new Promise((r) => (resolve = r))), + }); + const { result } = withSetup(() => useInstanceForm(store)); + result.name.value = 'proxy-1'; + result.address.value = '10.0.0.1:9090'; + const p = result.handleSubmit(); + expect(result.saving.value).toBe(true); + resolve({ id: 1 }); + await p; + expect(result.saving.value).toBe(false); + }); + + it('trims name and address before sending', async () => { + const store = mockStore(); + const { result } = withSetup(() => useInstanceForm(store)); + result.name.value = ' proxy-1 '; + result.address.value = ' 10.0.0.1:9090 '; + await result.handleSubmit(); + expect(store.createInstance).toHaveBeenCalledWith( + 'proxy-1', + '10.0.0.1:9090', + ); + }); + }); +}); diff --git a/admin/ui/src/composables/usePolling.js b/admin/ui/src/composables/usePolling.js new file mode 100644 index 0000000..1ee8abf --- /dev/null +++ b/admin/ui/src/composables/usePolling.js @@ -0,0 +1,23 @@ +import { onMounted, onUnmounted } from 'vue'; + +export function usePolling(fn, intervalMs = 10000) { + let id = null; + + function start() { + stop(); + fn(); + id = setInterval(fn, intervalMs); + } + + function stop() { + if (id !== null) { + clearInterval(id); + id = null; + } + } + + onMounted(start); + onUnmounted(stop); + + return { start, stop }; +} diff --git a/admin/ui/src/composables/usePolling.test.js b/admin/ui/src/composables/usePolling.test.js new file mode 100644 index 0000000..056f93d --- /dev/null +++ b/admin/ui/src/composables/usePolling.test.js @@ -0,0 +1,55 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { withSetup } from '../utils/test-utils.js'; +import { usePolling } from './usePolling.js'; + +describe('usePolling', () => { + beforeEach(() => { + vi.useFakeTimers(); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it('calls fn immediately on mount', () => { + const fn = vi.fn(); + withSetup(() => usePolling(fn, 5000)); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it('calls fn repeatedly at the configured interval', () => { + const fn = vi.fn(); + withSetup(() => usePolling(fn, 5000)); + expect(fn).toHaveBeenCalledTimes(1); + vi.advanceTimersByTime(5000); + expect(fn).toHaveBeenCalledTimes(2); + vi.advanceTimersByTime(5000); + expect(fn).toHaveBeenCalledTimes(3); + }); + + it('stops polling when stop is called', () => { + const fn = vi.fn(); + const { result } = withSetup(() => usePolling(fn, 5000)); + result.stop(); + vi.advanceTimersByTime(15000); + // Only the initial call on mount + expect(fn).toHaveBeenCalledTimes(1); + }); + + it('cleans up interval when app unmounts', () => { + const fn = vi.fn(); + const { app } = withSetup(() => usePolling(fn, 5000)); + app.unmount(); + vi.advanceTimersByTime(15000); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it('uses 10s default interval', () => { + const fn = vi.fn(); + withSetup(() => usePolling(fn)); + vi.advanceTimersByTime(10000); + expect(fn).toHaveBeenCalledTimes(2); + vi.advanceTimersByTime(5000); + expect(fn).toHaveBeenCalledTimes(2); + }); +}); diff --git a/admin/ui/src/layouts/AppLayout.vue b/admin/ui/src/layouts/AppLayout.vue new file mode 100644 index 0000000..84ad48b --- /dev/null +++ b/admin/ui/src/layouts/AppLayout.vue @@ -0,0 +1,457 @@ + + + + + diff --git a/admin/ui/src/main.js b/admin/ui/src/main.js new file mode 100644 index 0000000..776b771 --- /dev/null +++ b/admin/ui/src/main.js @@ -0,0 +1,11 @@ +import { createApp } from 'vue'; +import { createPinia } from 'pinia'; +import App from './App.vue'; +import router from './router'; +import './assets/variables.css'; +import './assets/global.css'; + +const app = createApp(App); +app.use(createPinia()); +app.use(router); +app.mount('#app'); diff --git a/admin/ui/src/router/index.js b/admin/ui/src/router/index.js new file mode 100644 index 0000000..3ee19de --- /dev/null +++ b/admin/ui/src/router/index.js @@ -0,0 +1,62 @@ +import { createRouter, createWebHistory } from 'vue-router'; +import { useAuthStore } from '../stores/auth.js'; +import DashboardView from '../views/DashboardView.vue'; +import AuditLogView from '../views/AuditLogView.vue'; +import InstanceDetailView from '../views/InstanceDetailView.vue'; +import LoginView from '../views/LoginView.vue'; +import SettingsView from '../views/SettingsView.vue'; + +const routes = [ + { + path: '/login', + name: 'login', + component: LoginView, + meta: { public: true }, + }, + { + path: '/', + name: 'dashboard', + component: DashboardView, + }, + { + path: '/instances/:id', + name: 'instance-detail', + component: InstanceDetailView, + }, + { + path: '/audit-log', + name: 'audit-log', + component: AuditLogView, + }, + { + path: '/settings', + name: 'settings', + component: SettingsView, + }, + { + path: '/:pathMatch(.*)*', + name: 'not-found', + redirect: '/', + }, +]; + +const router = createRouter({ + history: createWebHistory(), + routes, +}); + +router.beforeEach(async (to) => { + const auth = useAuthStore(); + + if (!auth.ready) await auth.checkSession(); + + if (!to.meta.public && !auth.isAuthenticated) { + return { name: 'login', query: { redirect: to.fullPath } }; + } + + if (to.name === 'login' && auth.isAuthenticated) { + return { name: 'dashboard' }; + } +}); + +export default router; diff --git a/admin/ui/src/stores/auth.js b/admin/ui/src/stores/auth.js new file mode 100644 index 0000000..4596b91 --- /dev/null +++ b/admin/ui/src/stores/auth.js @@ -0,0 +1,47 @@ +import { ref, computed } from 'vue'; +import { defineStore } from 'pinia'; +import * as api from '../utils/api.js'; + +export const useAuthStore = defineStore('auth', () => { + const user = ref(null); + const ready = ref(false); + const isAuthenticated = computed(() => user.value !== null); + + async function checkSession() { + try { + const data = await api.get('/api/me'); + user.value = data.user; + } catch { + user.value = null; + } finally { + ready.value = true; + } + } + + async function login(username, password) { + const data = await api.post('/api/login', { username, password }); + user.value = data.user; + } + + async function logout() { + await api.post('/api/logout'); + user.value = null; + } + + async function changePassword(currentPassword, newPassword) { + await api.put('/api/user/password', { + current_password: currentPassword, + new_password: newPassword, + }); + } + + return { + user, + ready, + isAuthenticated, + checkSession, + login, + logout, + changePassword, + }; +}); diff --git a/admin/ui/src/stores/auth.test.js b/admin/ui/src/stores/auth.test.js new file mode 100644 index 0000000..c412716 --- /dev/null +++ b/admin/ui/src/stores/auth.test.js @@ -0,0 +1,89 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setActivePinia, createPinia } from 'pinia'; +import { useAuthStore } from './auth.js'; + +vi.mock('../utils/api.js', () => ({ + get: vi.fn(), + post: vi.fn(), + put: vi.fn(), +})); + +import * as api from '../utils/api.js'; + +describe('useAuthStore', () => { + let store; + + beforeEach(() => { + setActivePinia(createPinia()); + store = useAuthStore(); + vi.restoreAllMocks(); + }); + + describe('checkSession', () => { + it('sets user on valid session', async () => { + api.get.mockResolvedValue({ user: { id: 1, username: 'admin' } }); + await store.checkSession(); + expect(store.user).toEqual({ id: 1, username: 'admin' }); + expect(store.ready).toBe(true); + expect(store.isAuthenticated).toBe(true); + }); + + it('clears user on invalid session', async () => { + api.get.mockRejectedValue(new Error('401')); + await store.checkSession(); + expect(store.user).toBeNull(); + expect(store.ready).toBe(true); + expect(store.isAuthenticated).toBe(false); + }); + }); + + describe('login', () => { + it('sets user on success', async () => { + api.post.mockResolvedValue({ user: { id: 1, username: 'admin' } }); + await store.login('admin', 'password123456'); + expect(api.post).toHaveBeenCalledWith('/api/login', { + username: 'admin', + password: 'password123456', + }); + expect(store.user).toEqual({ id: 1, username: 'admin' }); + }); + + it('propagates error on failure', async () => { + const err = new Error('Invalid'); + err.status = 401; + api.post.mockRejectedValue(err); + await expect(store.login('admin', 'wrong')).rejects.toThrow('Invalid'); + expect(store.user).toBeNull(); + }); + }); + + describe('logout', () => { + it('clears user on success', async () => { + store.user = { id: 1, username: 'admin' }; + api.post.mockResolvedValue(null); + await store.logout(); + expect(api.post).toHaveBeenCalledWith('/api/logout'); + expect(store.user).toBeNull(); + }); + }); + + describe('changePassword', () => { + it('sends correct payload', async () => { + api.put.mockResolvedValue(null); + await store.changePassword('old-password1', 'new-password1'); + expect(api.put).toHaveBeenCalledWith('/api/user/password', { + current_password: 'old-password1', + new_password: 'new-password1', + }); + }); + + it('propagates error on failure', async () => { + const err = new Error('Current password is incorrect'); + err.status = 401; + api.put.mockRejectedValue(err); + await expect( + store.changePassword('wrong', 'new-password1'), + ).rejects.toThrow('Current password is incorrect'); + }); + }); +}); diff --git a/admin/ui/src/stores/instances.js b/admin/ui/src/stores/instances.js new file mode 100644 index 0000000..f161d27 --- /dev/null +++ b/admin/ui/src/stores/instances.js @@ -0,0 +1,47 @@ +import { defineStore } from 'pinia'; +import { ref } from 'vue'; +import * as api from '../utils/api.js'; + +export const useInstanceStore = defineStore('instances', () => { + const instances = ref([]); + const initialized = ref(false); + + async function fetchInstances() { + try { + instances.value = await api.get('/api/instances'); + } finally { + initialized.value = true; + } + } + + async function createInstance(name, address) { + const inst = await api.post('/api/instances', { name, address }); + await fetchInstances(); + return inst; + } + + async function updateInstance(id, name, address) { + const inst = await api.put(`/api/instances/${id}`, { name, address }); + await fetchInstances(); + return inst; + } + + async function deleteInstance(id) { + await api.del(`/api/instances/${id}`); + await fetchInstances(); + } + + async function testConnection(address) { + return api.post('/api/instances/test', { address }); + } + + return { + instances, + initialized, + fetchInstances, + createInstance, + updateInstance, + deleteInstance, + testConnection, + }; +}); diff --git a/admin/ui/src/stores/instances.test.js b/admin/ui/src/stores/instances.test.js new file mode 100644 index 0000000..107eb14 --- /dev/null +++ b/admin/ui/src/stores/instances.test.js @@ -0,0 +1,108 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setActivePinia, createPinia } from 'pinia'; +import { useInstanceStore } from './instances.js'; + +vi.mock('../utils/api.js', () => ({ + get: vi.fn(), + post: vi.fn(), + put: vi.fn(), + del: vi.fn(), +})); + +import * as api from '../utils/api.js'; + +describe('useInstanceStore', () => { + let store; + + beforeEach(() => { + vi.restoreAllMocks(); + setActivePinia(createPinia()); + store = useInstanceStore(); + }); + + describe('fetchInstances', () => { + it('populates instances from API', async () => { + const data = [{ id: 1, name: 'proxy-1' }]; + api.get.mockResolvedValue(data); + await store.fetchInstances(); + expect(api.get).toHaveBeenCalledWith('/api/instances'); + expect(store.instances).toEqual(data); + }); + + it('sets initialized after first fetch', async () => { + expect(store.initialized).toBe(false); + api.get.mockResolvedValue([]); + await store.fetchInstances(); + expect(store.initialized).toBe(true); + }); + + it('sets initialized even on error', async () => { + api.get.mockRejectedValue(new Error('network error')); + await store.fetchInstances().catch(() => {}); + expect(store.initialized).toBe(true); + }); + }); + + describe('createInstance', () => { + it('calls api.post and refreshes instances', async () => { + api.post.mockResolvedValue({ + id: 1, + name: 'proxy-1', + address: '10.0.0.1:9090', + }); + api.get.mockResolvedValue([{ id: 1 }]); + const result = await store.createInstance('proxy-1', '10.0.0.1:9090'); + expect(api.post).toHaveBeenCalledWith('/api/instances', { + name: 'proxy-1', + address: '10.0.0.1:9090', + }); + expect(result).toEqual({ + id: 1, + name: 'proxy-1', + address: '10.0.0.1:9090', + }); + expect(api.get).toHaveBeenCalledWith('/api/instances'); + }); + + it('throws on API error', async () => { + api.post.mockRejectedValue(new Error('Address already registered')); + await expect(store.createInstance('x', '1:2')).rejects.toThrow( + 'Address already registered', + ); + }); + }); + + describe('updateInstance', () => { + it('calls api.put and refreshes instances', async () => { + api.put.mockResolvedValue({ id: 1, name: 'updated' }); + api.get.mockResolvedValue([{ id: 1 }]); + const result = await store.updateInstance(1, 'updated', '10.0.0.1:9090'); + expect(api.put).toHaveBeenCalledWith('/api/instances/1', { + name: 'updated', + address: '10.0.0.1:9090', + }); + expect(result).toEqual({ id: 1, name: 'updated' }); + }); + }); + + describe('deleteInstance', () => { + it('calls api.del and refreshes instances', async () => { + api.del.mockResolvedValue(null); + api.get.mockResolvedValue([]); + await store.deleteInstance(1); + expect(api.del).toHaveBeenCalledWith('/api/instances/1'); + expect(api.get).toHaveBeenCalledWith('/api/instances'); + }); + }); + + describe('testConnection', () => { + it('calls api.post and returns result', async () => { + api.post.mockResolvedValue({ ok: true, version: '1.0.0' }); + const result = await store.testConnection('10.0.0.1:9090'); + expect(api.post).toHaveBeenCalledWith('/api/instances/test', { + address: '10.0.0.1:9090', + }); + expect(result).toEqual({ ok: true, version: '1.0.0' }); + }); + }); +}); diff --git a/admin/ui/src/stores/metrics.js b/admin/ui/src/stores/metrics.js new file mode 100644 index 0000000..342d798 --- /dev/null +++ b/admin/ui/src/stores/metrics.js @@ -0,0 +1,46 @@ +import { defineStore } from 'pinia'; +import { ref } from 'vue'; +import * as api from '../utils/api.js'; + +export const useMetricsStore = defineStore('metrics', () => { + const fleet = ref(null); + const instance = ref(null); + const fleetError = ref(null); + const instanceError = ref(null); + + async function fetchFleetMetrics() { + try { + fleet.value = await api.get('/api/metrics/fleet'); + fleetError.value = null; + } catch (err) { + fleetError.value = err; + } + } + + async function fetchInstanceMetrics(id) { + try { + instance.value = await api.get(`/api/metrics/${id}`); + instanceError.value = null; + } catch (err) { + if (err.status === 404) { + instance.value = null; + } + instanceError.value = err; + } + } + + function clearInstance() { + instance.value = null; + instanceError.value = null; + } + + return { + fleet, + instance, + fleetError, + instanceError, + fetchFleetMetrics, + fetchInstanceMetrics, + clearInstance, + }; +}); diff --git a/admin/ui/src/stores/metrics.test.js b/admin/ui/src/stores/metrics.test.js new file mode 100644 index 0000000..77bbc9f --- /dev/null +++ b/admin/ui/src/stores/metrics.test.js @@ -0,0 +1,94 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { setActivePinia, createPinia } from 'pinia'; +import { useMetricsStore } from './metrics.js'; + +vi.mock('../utils/api.js', () => ({ + get: vi.fn(), +})); + +import * as api from '../utils/api.js'; + +beforeEach(() => { + setActivePinia(createPinia()); + vi.clearAllMocks(); +}); + +describe('useMetricsStore', () => { + describe('fetchFleetMetrics', () => { + it('stores fleet metrics on success', async () => { + const data = { total_rps: 100, fleet_error_rate: 0.02 }; + api.get.mockResolvedValue(data); + + const store = useMetricsStore(); + await store.fetchFleetMetrics(); + + expect(api.get).toHaveBeenCalledWith('/api/metrics/fleet'); + expect(store.fleet).toEqual(data); + expect(store.fleetError).toBe(null); + }); + + it('stores error on failure', async () => { + const err = new Error('network'); + api.get.mockRejectedValue(err); + + const store = useMetricsStore(); + await store.fetchFleetMetrics(); + + expect(store.fleetError).toBe(err); + }); + }); + + describe('fetchInstanceMetrics', () => { + it('stores instance metrics on success', async () => { + const data = { instance_id: 1, rps: 50 }; + api.get.mockResolvedValue(data); + + const store = useMetricsStore(); + await store.fetchInstanceMetrics(1); + + expect(api.get).toHaveBeenCalledWith('/api/metrics/1'); + expect(store.instance).toEqual(data); + expect(store.instanceError).toBe(null); + }); + + it('clears instance on 404', async () => { + const err = new Error('not found'); + err.status = 404; + api.get.mockRejectedValue(err); + + const store = useMetricsStore(); + store.instance = { old: true }; + await store.fetchInstanceMetrics(99); + + expect(store.instance).toBe(null); + expect(store.instanceError).toBe(err); + }); + + it('preserves instance data on non-404 error', async () => { + const err = new Error('server error'); + err.status = 500; + api.get.mockRejectedValue(err); + + const store = useMetricsStore(); + const existing = { instance_id: 1 }; + store.instance = existing; + await store.fetchInstanceMetrics(1); + + expect(store.instance).toStrictEqual(existing); + expect(store.instanceError).toBe(err); + }); + }); + + describe('clearInstance', () => { + it('resets instance state', () => { + const store = useMetricsStore(); + store.instance = { rps: 100 }; + store.instanceError = new Error('old'); + + store.clearInstance(); + + expect(store.instance).toBe(null); + expect(store.instanceError).toBe(null); + }); + }); +}); diff --git a/admin/ui/src/utils/api.js b/admin/ui/src/utils/api.js new file mode 100644 index 0000000..d7456b3 --- /dev/null +++ b/admin/ui/src/utils/api.js @@ -0,0 +1,72 @@ +class ApiError extends Error { + constructor(message, status, code) { + super(message); + this.name = 'ApiError'; + this.status = status; + this.code = code; + } +} + +export function getCsrfToken() { + const match = document.cookie.match(/(?:^|;\s*)csrf_token=([^;]*)/); + return match ? decodeURIComponent(match[1]) : ''; +} + +const writeMethods = new Set(['POST', 'PUT', 'DELETE', 'PATCH']); + +async function request(path, options = {}) { + const headers = { + 'Content-Type': 'application/json', + ...options.headers, + }; + + if (writeMethods.has(options.method)) { + const token = getCsrfToken(); + if (token) headers['X-CSRF-Token'] = token; + } + + const res = await fetch(path, { ...options, headers }); + + if (!res.ok) { + if (res.status === 401 && path !== '/api/login') { + const { useAuthStore } = await import('../stores/auth.js'); + const auth = useAuthStore(); + if (auth.ready) { + auth.user = null; + window.location.href = '/login'; + } + } + + let message = `Request failed (${res.status})`; + let code; + try { + const data = await res.json(); + if (data.error?.message) message = data.error.message; + if (data.error?.code) code = data.error.code; + } catch { + // response body not JSON — keep generic message + } + throw new ApiError(message, res.status, code); + } + + if (res.status === 204) return null; + return res.json(); +} + +export function get(path) { + return request(path); +} + +export function post(path, body) { + return request(path, { method: 'POST', body: JSON.stringify(body) }); +} + +export function put(path, body) { + return request(path, { method: 'PUT', body: JSON.stringify(body) }); +} + +export function del(path) { + return request(path, { method: 'DELETE' }); +} + +export { ApiError }; diff --git a/admin/ui/src/utils/api.test.js b/admin/ui/src/utils/api.test.js new file mode 100644 index 0000000..d2d9c48 --- /dev/null +++ b/admin/ui/src/utils/api.test.js @@ -0,0 +1,208 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { get, post, put, del, getCsrfToken, ApiError } from './api.js'; + +describe('api client', () => { + beforeEach(() => { + vi.restoreAllMocks(); + Object.defineProperty(document, 'cookie', { + writable: true, + value: '', + }); + }); + + function mockFetch(status, body, { json = true } = {}) { + const res = { + ok: status >= 200 && status < 300, + status, + json: json + ? vi.fn().mockResolvedValue(body) + : vi.fn().mockRejectedValue(new Error('not json')), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(res); + return res; + } + + describe('getCsrfToken', () => { + it('returns empty string when no cookie', () => { + document.cookie = ''; + expect(getCsrfToken()).toBe(''); + }); + + it('extracts csrf_token from cookies', () => { + document.cookie = 'session=abc123; csrf_token=my-token-value'; + expect(getCsrfToken()).toBe('my-token-value'); + }); + + it('decodes URL-encoded token', () => { + document.cookie = 'csrf_token=token%20with%20spaces'; + expect(getCsrfToken()).toBe('token with spaces'); + }); + }); + + describe('get', () => { + it('returns parsed JSON on success', async () => { + mockFetch(200, [{ id: 1 }]); + const result = await get('/api/instances'); + expect(result).toEqual([{ id: 1 }]); + expect(globalThis.fetch).toHaveBeenCalledWith('/api/instances', { + headers: { 'Content-Type': 'application/json' }, + }); + }); + + it('does not send CSRF token on GET', async () => { + document.cookie = 'csrf_token=my-token'; + mockFetch(200, {}); + await get('/api/me'); + const [, opts] = globalThis.fetch.mock.calls[0]; + expect(opts.headers['X-CSRF-Token']).toBeUndefined(); + }); + + it('throws ApiError with server message on failure', async () => { + mockFetch(404, { + error: { + code: 'INSTANCE_NOT_FOUND', + message: 'No instance with ID 42', + }, + }); + const err = await get('/api/instances/42').catch((e) => e); + expect(err).toBeInstanceOf(ApiError); + expect(err.message).toBe('No instance with ID 42'); + expect(err.status).toBe(404); + expect(err.code).toBe('INSTANCE_NOT_FOUND'); + }); + + it('throws ApiError with generic message when response is not JSON', async () => { + mockFetch(500, null, { json: false }); + const err = await get('/api/instances').catch((e) => e); + expect(err).toBeInstanceOf(ApiError); + expect(err.message).toBe('Request failed (500)'); + expect(err.status).toBe(500); + }); + + it('redirects to login on 401 when session is established', async () => { + mockFetch(401, { + error: { code: 'UNAUTHORIZED', message: 'No valid session' }, + }); + const mockStore = { user: { id: 1 }, ready: true }; + vi.doMock('../stores/auth.js', () => ({ + useAuthStore: () => mockStore, + })); + delete window.location; + window.location = { href: '/' }; + const err = await get('/api/me').catch((e) => e); + expect(err).toBeInstanceOf(ApiError); + expect(err.status).toBe(401); + expect(mockStore.user).toBeNull(); + expect(window.location.href).toBe('/login'); + vi.doUnmock('../stores/auth.js'); + }); + + it('does not redirect on 401 during initial session check', async () => { + mockFetch(401, { + error: { code: 'UNAUTHORIZED', message: 'No valid session' }, + }); + const mockStore = { user: null, ready: false }; + vi.doMock('../stores/auth.js', () => ({ + useAuthStore: () => mockStore, + })); + delete window.location; + window.location = { href: '/' }; + const err = await get('/api/me').catch((e) => e); + expect(err).toBeInstanceOf(ApiError); + expect(err.status).toBe(401); + expect(window.location.href).toBe('/'); + vi.doUnmock('../stores/auth.js'); + }); + }); + + describe('post', () => { + it('sends JSON body and returns parsed response', async () => { + mockFetch(201, { id: 1, name: 'proxy-1' }); + const result = await post('/api/instances', { + name: 'proxy-1', + address: '10.0.0.1:9090', + }); + expect(result).toEqual({ id: 1, name: 'proxy-1' }); + + const [url, opts] = globalThis.fetch.mock.calls[0]; + expect(url).toBe('/api/instances'); + expect(opts.method).toBe('POST'); + expect(JSON.parse(opts.body)).toEqual({ + name: 'proxy-1', + address: '10.0.0.1:9090', + }); + }); + + it('includes CSRF token on POST', async () => { + document.cookie = 'csrf_token=my-csrf-token'; + mockFetch(200, {}); + await post('/api/logout'); + const [, opts] = globalThis.fetch.mock.calls[0]; + expect(opts.headers['X-CSRF-Token']).toBe('my-csrf-token'); + }); + + it('throws ApiError with server message on conflict', async () => { + mockFetch(409, { + error: { + code: 'DUPLICATE_ADDRESS', + message: 'Address already registered', + }, + }); + const err = await post('/api/instances', { + name: 'x', + address: '1:2', + }).catch((e) => e); + expect(err).toBeInstanceOf(ApiError); + expect(err.message).toBe('Address already registered'); + expect(err.code).toBe('DUPLICATE_ADDRESS'); + }); + }); + + describe('put', () => { + it('sends JSON body with PUT method', async () => { + mockFetch(200, { id: 1, name: 'updated' }); + const result = await put('/api/instances/1', { name: 'updated' }); + expect(result).toEqual({ id: 1, name: 'updated' }); + + const [, opts] = globalThis.fetch.mock.calls[0]; + expect(opts.method).toBe('PUT'); + }); + + it('includes CSRF token on PUT', async () => { + document.cookie = 'csrf_token=put-token'; + mockFetch(204, null); + await put('/api/user/password', { + current_password: 'a', + new_password: 'b', + }); + const [, opts] = globalThis.fetch.mock.calls[0]; + expect(opts.headers['X-CSRF-Token']).toBe('put-token'); + }); + }); + + describe('del', () => { + it('returns null for 204 No Content', async () => { + const res = { + ok: true, + status: 204, + json: vi.fn(), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(res); + const result = await del('/api/instances/1'); + expect(result).toBeNull(); + expect(res.json).not.toHaveBeenCalled(); + }); + + it('sends DELETE method with CSRF token', async () => { + document.cookie = 'csrf_token=del-token'; + const res = { ok: true, status: 204, json: vi.fn() }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(res); + await del('/api/instances/1'); + + const [url, opts] = globalThis.fetch.mock.calls[0]; + expect(url).toBe('/api/instances/1'); + expect(opts.method).toBe('DELETE'); + expect(opts.headers['X-CSRF-Token']).toBe('del-token'); + }); + }); +}); diff --git a/admin/ui/src/utils/audit.js b/admin/ui/src/utils/audit.js new file mode 100644 index 0000000..026d9d9 --- /dev/null +++ b/admin/ui/src/utils/audit.js @@ -0,0 +1,104 @@ +// Action identifiers must match the backend constants in admin/api/audit_actions.go. +const ACTION_LABELS = { + 'instance.create': 'Instance created', + 'instance.update': 'Instance updated', + 'instance.delete': 'Instance deleted', + 'user.login': 'User logged in', + 'user.logout': 'User logged out', + 'user.password_change': 'Password changed', +}; + +const ACTION_OPTIONS = [ + { value: '', label: 'All actions' }, + { value: 'instance.create', label: 'Instance created' }, + { value: 'instance.update', label: 'Instance updated' }, + { value: 'instance.delete', label: 'Instance deleted' }, + { value: 'user.login', label: 'User logged in' }, + { value: 'user.logout', label: 'User logged out' }, + { value: 'user.password_change', label: 'Password changed' }, +]; + +export function getActionLabel(action) { + return ACTION_LABELS[action] || action; +} + +export function getActionOptions() { + return ACTION_OPTIONS; +} + +export function formatAuditTimestamp(isoString) { + if (!isoString) return ''; + const d = new Date(isoString); + if (isNaN(d.getTime())) return ''; + + const now = new Date(); + const diffMs = now - d; + const diffSecs = Math.floor(diffMs / 1000); + + if (diffSecs < 60) return 'just now'; + if (diffSecs < 3600) return `${Math.floor(diffSecs / 60)}m ago`; + + const isToday = + d.getDate() === now.getDate() && + d.getMonth() === now.getMonth() && + d.getFullYear() === now.getFullYear(); + + const time = d.toLocaleTimeString(undefined, { + hour: '2-digit', + minute: '2-digit', + }); + + if (isToday) return `Today ${time}`; + + const yesterday = new Date(now); + yesterday.setDate(yesterday.getDate() - 1); + const isYesterday = + d.getDate() === yesterday.getDate() && + d.getMonth() === yesterday.getMonth() && + d.getFullYear() === yesterday.getFullYear(); + + if (isYesterday) return `Yesterday ${time}`; + + return d.toLocaleDateString(undefined, { + month: 'short', + day: 'numeric', + year: d.getFullYear() !== now.getFullYear() ? 'numeric' : undefined, + hour: '2-digit', + minute: '2-digit', + }); +} + +// Converts a YYYY-MM-DD date string to an RFC 3339 UTC timestamp +// representing the start of that day in the user's local timezone. +export function startOfLocalDay(dateStr) { + if (!dateStr) return ''; + return new Date(dateStr + 'T00:00:00').toISOString(); +} + +// Converts a YYYY-MM-DD date string to an RFC 3339 UTC timestamp +// representing the end of that day in the user's local timezone. +export function endOfLocalDay(dateStr) { + if (!dateStr) return ''; + return new Date(dateStr + 'T23:59:59').toISOString(); +} + +export function buildAuditQueryString(filters) { + const params = new URLSearchParams(); + + if (filters.q) params.set('q', filters.q); + if (filters.action) params.set('action', filters.action); + if (filters.from) params.set('from', startOfLocalDay(filters.from)); + if (filters.to) params.set('to', endOfLocalDay(filters.to)); + if (filters.page && filters.page > 1) + params.set('page', String(filters.page)); + if (filters.perPage && filters.perPage !== 20) + params.set('per_page', String(filters.perPage)); + + const qs = params.toString(); + return qs ? `?${qs}` : ''; +} + +export function totalPages(total, perPage) { + if (total <= 0 || perPage <= 0) return 1; + return Math.ceil(total / perPage); +} diff --git a/admin/ui/src/utils/audit.test.js b/admin/ui/src/utils/audit.test.js new file mode 100644 index 0000000..69e666e --- /dev/null +++ b/admin/ui/src/utils/audit.test.js @@ -0,0 +1,198 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { + getActionLabel, + getActionOptions, + formatAuditTimestamp, + buildAuditQueryString, + totalPages, + startOfLocalDay, + endOfLocalDay, +} from './audit.js'; + +describe('getActionLabel', () => { + it('returns human-readable label for known actions', () => { + expect(getActionLabel('instance.create')).toBe('Instance created'); + expect(getActionLabel('user.login')).toBe('User logged in'); + expect(getActionLabel('user.password_change')).toBe('Password changed'); + }); + + it('returns raw action string for unknown actions', () => { + expect(getActionLabel('some.unknown')).toBe('some.unknown'); + }); +}); + +describe('getActionOptions', () => { + it('returns array with "All actions" as first option', () => { + const options = getActionOptions(); + expect(options[0]).toEqual({ value: '', label: 'All actions' }); + expect(options.length).toBeGreaterThan(1); + }); + + it('includes all known action types', () => { + const options = getActionOptions(); + const values = options.map((o) => o.value); + expect(values).toContain('instance.create'); + expect(values).toContain('instance.delete'); + expect(values).toContain('user.login'); + expect(values).toContain('user.logout'); + }); +}); + +describe('formatAuditTimestamp', () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date('2026-03-09T12:00:00Z')); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it('returns empty string for falsy input', () => { + expect(formatAuditTimestamp(null)).toBe(''); + expect(formatAuditTimestamp('')).toBe(''); + expect(formatAuditTimestamp(undefined)).toBe(''); + }); + + it('returns empty string for invalid date', () => { + expect(formatAuditTimestamp('not-a-date')).toBe(''); + }); + + it('returns "just now" for timestamps less than 60s ago', () => { + expect(formatAuditTimestamp('2026-03-09T11:59:30Z')).toBe('just now'); + }); + + it('returns minutes ago for timestamps less than 1h ago', () => { + expect(formatAuditTimestamp('2026-03-09T11:45:00Z')).toBe('15m ago'); + }); + + it('returns "Today" with time for older timestamps today', () => { + const result = formatAuditTimestamp('2026-03-09T08:30:00Z'); + expect(result).toMatch(/^Today /); + }); + + it('returns "Yesterday" with time for timestamps from yesterday', () => { + const result = formatAuditTimestamp('2026-03-08T14:00:00Z'); + expect(result).toMatch(/^Yesterday /); + }); + + it('returns formatted date for older timestamps', () => { + const result = formatAuditTimestamp('2026-03-01T10:00:00Z'); + expect(result).toBeTruthy(); + expect(result).not.toMatch(/^Today/); + expect(result).not.toMatch(/^Yesterday/); + }); +}); + +describe('buildAuditQueryString', () => { + it('returns empty string when no filters are set', () => { + expect(buildAuditQueryString({})).toBe(''); + }); + + it('includes search query', () => { + expect(buildAuditQueryString({ q: 'test' })).toBe('?q=test'); + }); + + it('includes action filter', () => { + expect(buildAuditQueryString({ action: 'user.login' })).toBe( + '?action=user.login', + ); + }); + + it('converts date-only from/to into RFC 3339 UTC via local timezone', () => { + const qs = buildAuditQueryString({ + from: '2026-03-01', + to: '2026-03-09', + }); + expect(qs).toContain('from='); + expect(qs).toContain('to='); + // The serialized values should be valid ISO timestamps, not date-only + const params = new URLSearchParams(qs.slice(1)); + expect(params.get('from')).toMatch( + /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z$/, + ); + expect(params.get('to')).toMatch( + /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z$/, + ); + // The UTC timestamps must represent local midnight and local end-of-day + expect(new Date(params.get('from')).getTime()).toBe( + new Date('2026-03-01T00:00:00').getTime(), + ); + expect(new Date(params.get('to')).getTime()).toBe( + new Date('2026-03-09T23:59:59').getTime(), + ); + }); + + it('includes page only when > 1', () => { + expect(buildAuditQueryString({ page: 1 })).toBe(''); + expect(buildAuditQueryString({ page: 3 })).toBe('?page=3'); + }); + + it('includes per_page only when not default', () => { + expect(buildAuditQueryString({ perPage: 20 })).toBe(''); + expect(buildAuditQueryString({ perPage: 50 })).toBe('?per_page=50'); + }); + + it('combines multiple filters', () => { + const qs = buildAuditQueryString({ + q: 'proxy', + action: 'instance.create', + page: 2, + }); + expect(qs).toContain('q=proxy'); + expect(qs).toContain('action=instance.create'); + expect(qs).toContain('page=2'); + }); +}); + +describe('totalPages', () => { + it('returns 1 for zero or negative total', () => { + expect(totalPages(0, 20)).toBe(1); + expect(totalPages(-5, 20)).toBe(1); + }); + + it('returns 1 for zero or negative perPage', () => { + expect(totalPages(100, 0)).toBe(1); + expect(totalPages(100, -1)).toBe(1); + }); + + it('computes correct page count', () => { + expect(totalPages(20, 20)).toBe(1); + expect(totalPages(21, 20)).toBe(2); + expect(totalPages(100, 20)).toBe(5); + expect(totalPages(1, 20)).toBe(1); + }); +}); + +describe('startOfLocalDay', () => { + it('returns empty string for falsy input', () => { + expect(startOfLocalDay('')).toBe(''); + expect(startOfLocalDay(null)).toBe(''); + expect(startOfLocalDay(undefined)).toBe(''); + }); + + it('returns an ISO string based on local midnight', () => { + const result = startOfLocalDay('2026-03-09'); + expect(result).toMatch(/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z$/); + // Parse back and verify it represents local midnight + const d = new Date(result); + const local = new Date('2026-03-09T00:00:00'); + expect(d.getTime()).toBe(local.getTime()); + }); +}); + +describe('endOfLocalDay', () => { + it('returns empty string for falsy input', () => { + expect(endOfLocalDay('')).toBe(''); + expect(endOfLocalDay(null)).toBe(''); + expect(endOfLocalDay(undefined)).toBe(''); + }); + + it('returns an ISO string based on local end of day', () => { + const result = endOfLocalDay('2026-03-09'); + expect(result).toMatch(/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z$/); + const d = new Date(result); + const local = new Date('2026-03-09T23:59:59'); + expect(d.getTime()).toBe(local.getTime()); + }); +}); diff --git a/admin/ui/src/utils/chart-setup.js b/admin/ui/src/utils/chart-setup.js new file mode 100644 index 0000000..aca5a59 --- /dev/null +++ b/admin/ui/src/utils/chart-setup.js @@ -0,0 +1,20 @@ +import { use } from 'echarts/core'; +import { CanvasRenderer } from 'echarts/renderers'; +import { LineChart } from 'echarts/charts'; +import { + GridComponent, + TooltipComponent, + LegendComponent, + DataZoomComponent, +} from 'echarts/components'; + +use([ + CanvasRenderer, + LineChart, + GridComponent, + TooltipComponent, + LegendComponent, + DataZoomComponent, +]); + +export { default as VChart } from 'vue-echarts'; diff --git a/admin/ui/src/utils/html.js b/admin/ui/src/utils/html.js new file mode 100644 index 0000000..6135f74 --- /dev/null +++ b/admin/ui/src/utils/html.js @@ -0,0 +1,8 @@ +export function escapeHtml(s) { + return s + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, '''); +} diff --git a/admin/ui/src/utils/instance.js b/admin/ui/src/utils/instance.js new file mode 100644 index 0000000..1edfa41 --- /dev/null +++ b/admin/ui/src/utils/instance.js @@ -0,0 +1,19 @@ +export function formatTime(ts) { + if (!ts) return ''; + const d = new Date(ts); + const secs = Math.floor((Date.now() - d.getTime()) / 1000); + if (secs < 60) return 'just now'; + if (secs < 3600) return `${Math.floor(secs / 60)}m ago`; + if (secs < 86400) return `${Math.floor(secs / 3600)}h ago`; + return d.toLocaleDateString(); +} + +const STATUS_LABELS = { + healthy: 'Healthy', + unreachable: 'Unreachable', + unknown: 'Unknown', +}; + +export function getStatusLabel(status) { + return STATUS_LABELS[status] || STATUS_LABELS.unknown; +} diff --git a/admin/ui/src/utils/instance.test.js b/admin/ui/src/utils/instance.test.js new file mode 100644 index 0000000..ca4b8f8 --- /dev/null +++ b/admin/ui/src/utils/instance.test.js @@ -0,0 +1,56 @@ +import { describe, it, expect, vi, afterEach } from 'vitest'; +import { formatTime, getStatusLabel } from './instance.js'; + +describe('formatTime', () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('returns empty string for null/undefined', () => { + expect(formatTime(null)).toBe(''); + expect(formatTime(undefined)).toBe(''); + }); + + it('returns "just now" for timestamps under 60 seconds ago', () => { + const ts = new Date(Date.now() - 5000).toISOString(); + expect(formatTime(ts)).toBe('just now'); + }); + + it('returns minutes ago for timestamps under 1 hour', () => { + const ts = new Date(Date.now() - 5 * 60 * 1000).toISOString(); + expect(formatTime(ts)).toBe('5m ago'); + }); + + it('returns hours ago for timestamps under 24 hours', () => { + const ts = new Date(Date.now() - 3 * 3600 * 1000).toISOString(); + expect(formatTime(ts)).toBe('3h ago'); + }); + + it('returns locale date string for timestamps over 24 hours', () => { + const d = new Date(Date.now() - 48 * 3600 * 1000); + expect(formatTime(d.toISOString())).toBe(d.toLocaleDateString()); + }); + + it('floors minutes correctly', () => { + const ts = new Date(Date.now() - 90_000).toISOString(); + expect(formatTime(ts)).toBe('1m ago'); + }); + + it('floors hours correctly', () => { + const ts = new Date(Date.now() - 5400 * 1000).toISOString(); + expect(formatTime(ts)).toBe('1h ago'); + }); +}); + +describe('getStatusLabel', () => { + it('returns correct label for each status', () => { + expect(getStatusLabel('healthy')).toBe('Healthy'); + expect(getStatusLabel('unreachable')).toBe('Unreachable'); + expect(getStatusLabel('unknown')).toBe('Unknown'); + }); + + it('falls back to "Unknown" for unrecognized status', () => { + expect(getStatusLabel('bogus')).toBe('Unknown'); + expect(getStatusLabel(undefined)).toBe('Unknown'); + }); +}); diff --git a/admin/ui/src/utils/metrics.js b/admin/ui/src/utils/metrics.js new file mode 100644 index 0000000..7077e78 --- /dev/null +++ b/admin/ui/src/utils/metrics.js @@ -0,0 +1,64 @@ +export const VENDOR_COLORS = [ + '#3b82f6', // blue + '#059669', // emerald + '#8b5cf6', // purple + '#f97316', // orange + '#06b6d4', // cyan + '#ec4899', // pink + '#f59e0b', // amber + '#64748b', // slate +]; + +export const LATENCY_COLORS = { + p50: '#3b82f6', // blue + p95: '#f59e0b', // amber + p99: '#dc2626', // red +}; + +export function formatRps(value) { + if (value == null) return '-'; + if (value >= 10000) return `${(value / 1000).toFixed(1)}k`; + if (value >= 1000) return `${(value / 1000).toFixed(2)}k`; + if (value >= 100) return Math.round(value).toString(); + if (value >= 10) return value.toFixed(1); + return value.toFixed(2); +} + +export function formatLatency(ms) { + if (ms == null) return '-'; + if (ms >= 10000) return `${(ms / 1000).toFixed(1)}s`; + if (ms >= 1000) return `${(ms / 1000).toFixed(2)}s`; + if (ms >= 100) return `${Math.round(ms)}ms`; + if (ms >= 10) return `${ms.toFixed(1)}ms`; + return `${ms.toFixed(2)}ms`; +} + +export function formatErrorRate(rate) { + if (rate == null) return '-'; + const pct = rate * 100; + if (pct >= 10) return `${pct.toFixed(1)}%`; + if (pct >= 1) return `${pct.toFixed(2)}%`; + if (pct >= 0.01) return `${pct.toFixed(3)}%`; + if (pct === 0) return '0%'; + return `${pct.toFixed(3)}%`; +} + +export function formatCount(value) { + if (value == null) return '-'; + return Math.round(value).toLocaleString(); +} + +export function trendDirection(value) { + if (value == null) return null; + if (value > 0) return 'up'; + if (value < 0) return 'down'; + return 'flat'; +} + +export function assignVendorColors(vendorIds) { + const map = {}; + vendorIds.forEach((id, i) => { + map[id] = VENDOR_COLORS[i % VENDOR_COLORS.length]; + }); + return map; +} diff --git a/admin/ui/src/utils/metrics.test.js b/admin/ui/src/utils/metrics.test.js new file mode 100644 index 0000000..b83cc22 --- /dev/null +++ b/admin/ui/src/utils/metrics.test.js @@ -0,0 +1,142 @@ +import { describe, it, expect } from 'vitest'; +import { + formatRps, + formatLatency, + formatErrorRate, + formatCount, + trendDirection, + assignVendorColors, + VENDOR_COLORS, +} from './metrics.js'; + +describe('formatRps', () => { + it('returns dash for null/undefined', () => { + expect(formatRps(null)).toBe('-'); + expect(formatRps(undefined)).toBe('-'); + }); + + it('formats large values with k suffix', () => { + expect(formatRps(10000)).toBe('10.0k'); + expect(formatRps(15432)).toBe('15.4k'); + }); + + it('formats thousands with two decimal k', () => { + expect(formatRps(1000)).toBe('1.00k'); + expect(formatRps(1234)).toBe('1.23k'); + }); + + it('formats hundreds as integers', () => { + expect(formatRps(100)).toBe('100'); + expect(formatRps(456)).toBe('456'); + }); + + it('formats tens with one decimal', () => { + expect(formatRps(10)).toBe('10.0'); + expect(formatRps(42.7)).toBe('42.7'); + }); + + it('formats small values with two decimals', () => { + expect(formatRps(0.5)).toBe('0.50'); + expect(formatRps(9.99)).toBe('9.99'); + }); +}); + +describe('formatLatency', () => { + it('returns dash for null/undefined', () => { + expect(formatLatency(null)).toBe('-'); + expect(formatLatency(undefined)).toBe('-'); + }); + + it('formats large values as seconds', () => { + expect(formatLatency(10000)).toBe('10.0s'); + expect(formatLatency(1500)).toBe('1.50s'); + }); + + it('formats hundreds as integer ms', () => { + expect(formatLatency(245)).toBe('245ms'); + }); + + it('formats tens with one decimal ms', () => { + expect(formatLatency(42.7)).toBe('42.7ms'); + }); + + it('formats small values with two decimal ms', () => { + expect(formatLatency(3.14)).toBe('3.14ms'); + }); +}); + +describe('formatErrorRate', () => { + it('returns dash for null/undefined', () => { + expect(formatErrorRate(null)).toBe('-'); + expect(formatErrorRate(undefined)).toBe('-'); + }); + + it('formats zero as 0%', () => { + expect(formatErrorRate(0)).toBe('0%'); + }); + + it('formats high rates with one decimal', () => { + expect(formatErrorRate(0.15)).toBe('15.0%'); + }); + + it('formats moderate rates with two decimals', () => { + expect(formatErrorRate(0.023)).toBe('2.30%'); + }); + + it('formats small rates with three decimals', () => { + expect(formatErrorRate(0.0005)).toBe('0.050%'); + }); +}); + +describe('formatCount', () => { + it('returns dash for null/undefined', () => { + expect(formatCount(null)).toBe('-'); + expect(formatCount(undefined)).toBe('-'); + }); + + it('rounds and formats with locale separators', () => { + expect(formatCount(42)).toBe('42'); + expect(formatCount(3.7)).toBe('4'); + }); +}); + +describe('trendDirection', () => { + it('returns null for null/undefined', () => { + expect(trendDirection(null)).toBe(null); + expect(trendDirection(undefined)).toBe(null); + }); + + it('returns up for positive values', () => { + expect(trendDirection(5.2)).toBe('up'); + }); + + it('returns down for negative values', () => { + expect(trendDirection(-3.1)).toBe('down'); + }); + + it('returns flat for zero', () => { + expect(trendDirection(0)).toBe('flat'); + }); +}); + +describe('assignVendorColors', () => { + it('assigns unique colors to vendors', () => { + const map = assignVendorColors(['a', 'b', 'c']); + expect(map.a).toBe(VENDOR_COLORS[0]); + expect(map.b).toBe(VENDOR_COLORS[1]); + expect(map.c).toBe(VENDOR_COLORS[2]); + }); + + it('wraps around when more vendors than colors', () => { + const ids = Array.from( + { length: VENDOR_COLORS.length + 1 }, + (_, i) => `v${i}`, + ); + const map = assignVendorColors(ids); + expect(map[`v${VENDOR_COLORS.length}`]).toBe(VENDOR_COLORS[0]); + }); + + it('returns empty map for empty input', () => { + expect(assignVendorColors([])).toEqual({}); + }); +}); diff --git a/admin/ui/src/utils/test-utils.js b/admin/ui/src/utils/test-utils.js new file mode 100644 index 0000000..ead8075 --- /dev/null +++ b/admin/ui/src/utils/test-utils.js @@ -0,0 +1,13 @@ +import { createApp } from 'vue'; + +export function withSetup(composable) { + let result; + const app = createApp({ + setup() { + result = composable(); + return () => {}; + }, + }); + app.mount(document.createElement('div')); + return { result, app }; +} diff --git a/admin/ui/src/utils/validation.js b/admin/ui/src/utils/validation.js new file mode 100644 index 0000000..e6cd435 --- /dev/null +++ b/admin/ui/src/utils/validation.js @@ -0,0 +1,31 @@ +export function validateInstanceForm(name, address) { + return { + name: name.trim() ? '' : 'Name is required', + address: address.trim() ? '' : 'Address is required', + }; +} + +const MIN_PASSWORD_LENGTH = 12; +const MAX_PASSWORD_LENGTH = 72; + +export function validatePasswordChange( + currentPassword, + newPassword, + confirmPassword, +) { + const errors = {}; + if (!currentPassword) errors.currentPassword = 'Current password is required'; + if (!newPassword) { + errors.newPassword = 'New password is required'; + } else if (newPassword.length < MIN_PASSWORD_LENGTH) { + errors.newPassword = `Password must be at least ${MIN_PASSWORD_LENGTH} characters`; + } else if (newPassword.length > MAX_PASSWORD_LENGTH) { + errors.newPassword = `Password must be at most ${MAX_PASSWORD_LENGTH} characters`; + } + if (!confirmPassword) { + errors.confirmPassword = 'Please confirm your new password'; + } else if (newPassword && confirmPassword !== newPassword) { + errors.confirmPassword = 'Passwords do not match'; + } + return errors; +} diff --git a/admin/ui/src/utils/validation.test.js b/admin/ui/src/utils/validation.test.js new file mode 100644 index 0000000..d58ed42 --- /dev/null +++ b/admin/ui/src/utils/validation.test.js @@ -0,0 +1,93 @@ +import { describe, it, expect } from 'vitest'; +import { validateInstanceForm, validatePasswordChange } from './validation.js'; + +describe('validateInstanceForm', () => { + it('returns no errors for valid inputs', () => { + const errors = validateInstanceForm('proxy-1', '10.0.0.1:9090'); + expect(errors.name).toBe(''); + expect(errors.address).toBe(''); + }); + + it('returns name error when name is empty', () => { + const errors = validateInstanceForm('', '10.0.0.1:9090'); + expect(errors.name).toBe('Name is required'); + expect(errors.address).toBe(''); + }); + + it('returns address error when address is empty', () => { + const errors = validateInstanceForm('proxy-1', ''); + expect(errors.address).toBe('Address is required'); + expect(errors.name).toBe(''); + }); + + it('returns both errors when both are empty', () => { + const errors = validateInstanceForm('', ''); + expect(errors.name).toBe('Name is required'); + expect(errors.address).toBe('Address is required'); + }); + + it('treats whitespace-only as empty', () => { + const errors = validateInstanceForm(' ', ' \t '); + expect(errors.name).toBe('Name is required'); + expect(errors.address).toBe('Address is required'); + }); + + it('accepts values with leading/trailing whitespace', () => { + const errors = validateInstanceForm(' proxy-1 ', ' 10.0.0.1:9090 '); + expect(errors.name).toBe(''); + expect(errors.address).toBe(''); + }); +}); + +describe('validatePasswordChange', () => { + it('requires all fields', () => { + const errors = validatePasswordChange('', '', ''); + expect(errors.currentPassword).toBe('Current password is required'); + expect(errors.newPassword).toBe('New password is required'); + expect(errors.confirmPassword).toBe('Please confirm your new password'); + }); + + it('rejects passwords shorter than 12 characters', () => { + const errors = validatePasswordChange('currentpass1', 'short', 'short'); + expect(errors.newPassword).toBe('Password must be at least 12 characters'); + }); + + it('rejects passwords longer than 72 characters', () => { + const long = 'a'.repeat(73); + const errors = validatePasswordChange('currentpass1', long, long); + expect(errors.newPassword).toBe('Password must be at most 72 characters'); + }); + + it('rejects mismatched passwords', () => { + const errors = validatePasswordChange( + 'currentpass1', + 'validpassword1', + 'differentpass1', + ); + expect(errors.confirmPassword).toBe('Passwords do not match'); + }); + + it('returns empty object for valid input', () => { + const errors = validatePasswordChange( + 'currentpass1', + 'newpassword12', + 'newpassword12', + ); + expect(Object.keys(errors)).toHaveLength(0); + }); + + it('accepts exactly 12 character password', () => { + const errors = validatePasswordChange( + 'currentpass1', + 'exactly12chr', + 'exactly12chr', + ); + expect(Object.keys(errors)).toHaveLength(0); + }); + + it('accepts exactly 72 character password', () => { + const pw = 'a'.repeat(72); + const errors = validatePasswordChange('currentpass1', pw, pw); + expect(Object.keys(errors)).toHaveLength(0); + }); +}); diff --git a/admin/ui/src/views/AuditLogView.vue b/admin/ui/src/views/AuditLogView.vue new file mode 100644 index 0000000..f206427 --- /dev/null +++ b/admin/ui/src/views/AuditLogView.vue @@ -0,0 +1,616 @@ + + + + + diff --git a/admin/ui/src/views/DashboardView.vue b/admin/ui/src/views/DashboardView.vue new file mode 100644 index 0000000..457972f --- /dev/null +++ b/admin/ui/src/views/DashboardView.vue @@ -0,0 +1,520 @@ + + + + + diff --git a/admin/ui/src/views/InstanceDetailView.vue b/admin/ui/src/views/InstanceDetailView.vue new file mode 100644 index 0000000..3ee67c8 --- /dev/null +++ b/admin/ui/src/views/InstanceDetailView.vue @@ -0,0 +1,272 @@ + + + + + diff --git a/admin/ui/src/views/LoginView.vue b/admin/ui/src/views/LoginView.vue new file mode 100644 index 0000000..e2509e7 --- /dev/null +++ b/admin/ui/src/views/LoginView.vue @@ -0,0 +1,137 @@ + + + + + diff --git a/admin/ui/src/views/SettingsView.vue b/admin/ui/src/views/SettingsView.vue new file mode 100644 index 0000000..a1e91aa --- /dev/null +++ b/admin/ui/src/views/SettingsView.vue @@ -0,0 +1,185 @@ + + + + + diff --git a/admin/ui/vite.config.js b/admin/ui/vite.config.js new file mode 100644 index 0000000..4c52ae7 --- /dev/null +++ b/admin/ui/vite.config.js @@ -0,0 +1,28 @@ +import { resolve } from "node:path"; +import { fileURLToPath } from "node:url"; +import { defineConfig } from "vite"; +import vue from "@vitejs/plugin-vue"; + +const __dirname = fileURLToPath(new URL(".", import.meta.url)); + +export default defineConfig({ + cacheDir: resolve(__dirname, "node_modules/.vite"), + plugins: [vue()], + test: { + environment: "jsdom", + exclude: ["e2e/**", "node_modules/**"], + }, + server: { + port: 5173, + proxy: { + "/api": { + target: "http://127.0.0.1:8080", + changeOrigin: true, + }, + }, + }, + build: { + outDir: "dist", + emptyOutDir: true, + }, +}); diff --git a/docs/guides/admin-portal.md b/docs/guides/admin-portal.md new file mode 100644 index 0000000..31778ad --- /dev/null +++ b/docs/guides/admin-portal.md @@ -0,0 +1,175 @@ +# Admin Portal Guide + +How to build, configure, and run the Chaperone Admin Portal (`chaperone-admin`) for fleet monitoring. + +## Prerequisites + +| Requirement | Version | Purpose | +|-------------|---------|---------| +| **Go** | 1.26+ | Building the binary | +| **Node.js** | 24 (CI-tested) | Building the Vue SPA | +| **pnpm** | 10 (CI-tested) | Frontend package manager | + +`admin/ui/package.json` has no `engines` field, so other recent Node and pnpm versions will likely work locally — Node 24 and pnpm 10 are what CI runs against. + +You can build, configure, and start the portal without a running proxy. A running Chaperone proxy is only needed once you reach step 3 of [First Run](#first-run), where you register and monitor instances. + +## Build + +```bash +make build-admin +``` + +This produces a single `chaperone-admin` binary at `./bin/chaperone-admin` with the Vue SPA embedded. No separate web server or static file serving needed. + +The examples below assume `chaperone-admin` is on your `PATH`. If not, invoke it as `./bin/chaperone-admin` or add `./bin` to your `PATH`. + +## Run for development + +The dev backend (built with the `dev` build tag via `make run-admin`) serves the SPA from `admin/ui/dist` on disk. Populate that directory first, otherwise the binary exits with `UI dist directory not found`: + +```bash +cd admin/ui && pnpm build +``` + +Once `dist/` exists, pick one of two dev modes: + +- **Backend only** — run `make run-admin`. The Go server reads the SPA from disk. No hot reload; rebuild the SPA with `pnpm build` to pick up frontend changes. +- **Backend + Vite hot module replacement (HMR)** — run `make run-admin` in one terminal and `cd admin/ui && pnpm dev` in another. Open the Vite URL (default `http://localhost:5173`). Vite proxies API calls to the Go backend on `:8080` and reloads SPA changes instantly. + +## Configuration + +Create a `chaperone-admin.yaml` file (or pass `--config /path/to/config.yaml`): + +```yaml +server: + addr: "127.0.0.1:8080" + secure_cookies: false # Set to true when serving behind HTTPS + +database: + path: "./chaperone-admin.db" + +scraper: + interval: "10s" + timeout: "5s" + +session: + max_age: "24h" + idle_timeout: "2h" + +audit: + retention_days: 90 + +log: + level: "info" # debug, info, warn, error + format: "json" # json, text +``` + +The values above are the defaults; the portal starts with zero config for local testing. + +> **Note:** `database.path` is resolved relative to the current working directory when no absolute path is given. Run `create-user`, `reset-password`, and `serve` from the same directory (or pass an absolute path / `--config`), otherwise each invocation will read or create a different SQLite file and you'll get a "user not found" failure at login. + +### Environment variable overrides + +Every config key can be overridden via environment variables using the `CHAPERONE_ADMIN_SECTION_KEY` convention: + +| Config Key | Environment Variable | +|-----------|---------------------| +| `server.addr` | `CHAPERONE_ADMIN_SERVER_ADDR` | +| `server.secure_cookies` | `CHAPERONE_ADMIN_SERVER_SECURE_COOKIES` | +| `database.path` | `CHAPERONE_ADMIN_DATABASE_PATH` | +| `scraper.interval` | `CHAPERONE_ADMIN_SCRAPER_INTERVAL` | +| `scraper.timeout` | `CHAPERONE_ADMIN_SCRAPER_TIMEOUT` | +| `session.max_age` | `CHAPERONE_ADMIN_SESSION_MAX_AGE` | +| `session.idle_timeout` | `CHAPERONE_ADMIN_SESSION_IDLE_TIMEOUT` | +| `audit.retention_days` | `CHAPERONE_ADMIN_AUDIT_RETENTION_DAYS` | +| `log.level` | `CHAPERONE_ADMIN_LOG_LEVEL` | +| `log.format` | `CHAPERONE_ADMIN_LOG_FORMAT` | + +Environment variables take precedence over the config file. + +## First Run + +### 1. Create an admin user + +The portal requires authentication. No users exist on first start, so create one via CLI: + +```bash +chaperone-admin create-user --username admin +``` + +The command prompts for a password and then asks you to confirm it. Constraints: + +- Input is hidden as you type. +- Minimum length is 12 characters. +- A real TTY is required — the prompt cannot be piped via stdin or here-strings. + +> **Note:** The portal returns 401 on all API routes until at least one user exists. + +### 2. Start the server + +```bash +chaperone-admin serve +# or simply: +chaperone-admin +``` + +The `serve` command is the default when no subcommand is given. Open `http://localhost:8080` in your browser and log in with the credentials you created. + +### 3. Confirm network reachability + +The portal polls each proxy's admin port (`/_ops/health`, `/_ops/version`, `GET /metrics`) every 10 seconds. Before registering instances, make sure the admin port is reachable from the portal host. + +| Topology | Proxy Admin Port Config | When to Use | +|----------|------------------------|-------------| +| **Single-host** | Default (`127.0.0.1:9090`) | Portal and proxies on the same machine | +| **Multi-host** | Set `admin_addr` to a reachable interface (e.g., `0.0.0.0:9090`) | Proxies on separate hosts/containers | + +> **Warning:** The admin port exposes health, version, and Prometheus metrics. Keep it within a trusted network (VPC, Kubernetes cluster network, firewall-restricted subnet). Never expose it to the public internet. + +**Kubernetes**: Use `admin_addr: "0.0.0.0:9090"` to make the admin port reachable within the cluster. Do not create a `LoadBalancer` or `NodePort` Service for the admin port. + +### 4. Register proxy instances + +Log in and click "Add Your First Instance" on the welcome screen. Enter: + +- **Name**: A human-readable label (e.g., `proxy-prod-01`) +- **Address**: The proxy's admin `host:port` (e.g., `10.0.0.1:9090`) + +Use "Test Connection" to verify the portal can reach the proxy before saving. If the test fails, check: + +- The proxy is running and its admin server is started +- The admin port is reachable from the portal host (see step 3 above) +- No firewall rules blocking the connection + +## CLI Commands + +**Global flag:** `--config ` works on every command and selects the config file. The `serve` command also accepts `--version` to print the version and exit. + +| Command | Description | +|---------|-------------| +| `chaperone-admin serve [flags]` | Start the portal server (default) | +| `chaperone-admin create-user --username ` | Create a new admin user | +| `chaperone-admin reset-password --username ` | Reset a user's password and invalidate all their sessions | + +## Manage Sessions + +- To adjust session lifetime, set `session.max_age` (absolute TTL, default 24h) and `session.idle_timeout` (inactivity limit, default 2h) in the config file. +- To force a user to re-authenticate, run `chaperone-admin reset-password --username ` — this invalidates all their sessions. +- To end your own session, click "Logout" in the sidebar. The session is invalidated server-side immediately. + +## Review the Audit Log + +All portal actions (instance add/edit/remove, login, logout, password changes) are recorded in the audit log. + +- To view the log, click "Audit Log" in the sidebar. +- To find specific events, use the full-text search bar or filter by action type and date range. +- To change retention, set `audit.retention_days` in the config file (default: 90 days, set to `0` to keep forever). +- To export audit data, query the SQLite database file at the path configured in `database.path`. + +## Monitor Metrics and Health + +- To view per-instance metrics, open the dashboard. It displays RPS, latency percentiles (p50, p95, p99), error rate, active connections, and panic count for each proxy, computed from each proxy's `/metrics` endpoint polled every 10 seconds. +- To interpret health badges, read them as: **unknown** (before first poll), **healthy** (last poll succeeded), or **unreachable** (3 consecutive failures). A single successful poll restores an unreachable instance to healthy. +- To wait through the post-restart placeholder, give the portal at least two scrape cycles (~20 seconds) after a restart — charts show "Collecting data points..." until two snapshots exist to compute rates from. +- To plan around history retention, note that metrics are kept in memory only. The portal retains 360 scrape snapshots per instance (`DefaultCapacity` in `admin/metrics/metrics.go`), which at 10s intervals is exactly 1 hour of history. A restart clears all metrics. diff --git a/go.work b/go.work index 4fb25d9..f052787 100644 --- a/go.work +++ b/go.work @@ -4,4 +4,5 @@ use ( . ./plugins/contrib ./sdk + ./admin ) diff --git a/test/mock-chaperone/mock-chaperone.js b/test/mock-chaperone/mock-chaperone.js new file mode 100644 index 0000000..e9ac557 --- /dev/null +++ b/test/mock-chaperone/mock-chaperone.js @@ -0,0 +1,429 @@ +#!/usr/bin/env node +/** + * Copyright 2026 CloudBlue LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +'use strict'; + +const http = require('node:http'); + +// ─── Configuration ────────────────────────────────────────────────────────── +// Edit this section to customize the mock fleet. +// Alternatively, pass CLI args: node mock-chaperone.js [count] [startPort] + +const CONFIG = { + instances: [ + { port: 19091, name: 'proxy-us-east-1', profile: 'healthy' }, + { port: 19092, name: 'proxy-eu-west-1', profile: 'healthy' }, + { port: 19093, name: 'proxy-ap-south-1', profile: 'degraded' }, + ], + vendors: ['acme-corp', 'globex-inc', 'initech-llc'], + version: '0.8.2', + tickIntervalMs: 1000, +}; + +// ─── Profile Definitions ──────────────────────────────────────────────────── +// Each profile defines traffic characteristics per tick (1 second). + +const PROFILES = { + healthy: { + rpsRange: [20, 60], + errorRate: 0.02, + latencyMean: 0.15, + latencyStddev: 0.08, + activeConnRange: [5, 25], + panicRate: 0.0001, + available: true, + }, + degraded: { + rpsRange: [10, 30], + errorRate: 0.15, + latencyMean: 0.6, + latencyStddev: 0.3, + activeConnRange: [15, 50], + panicRate: 0.005, + available: true, + }, + flapping: { + rpsRange: [20, 60], + errorRate: 0.02, + latencyMean: 0.15, + latencyStddev: 0.08, + activeConnRange: [5, 25], + panicRate: 0.0001, + available: true, + flapIntervalMs: 30000, + }, +}; + +// ─── Histogram Buckets ────────────────────────────────────────────────────── +// Matches chaperone's APILatencyBuckets exactly (internal/telemetry/metrics.go). + +const BUCKETS = [0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1, 2, 5, 10]; + +// ─── Helpers ──────────────────────────────────────────────────────────────── + +function randInt(min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +function gaussianRandom(mean, stddev) { + const u1 = Math.random(); + const u2 = Math.random(); + const z = Math.sqrt(-2 * Math.log(u1)) * Math.cos(2 * Math.PI * u2); + return mean + z * stddev; +} + +function weightedSplit(total, weights) { + const result = {}; + let remaining = total; + const entries = Object.entries(weights); + for (let i = 0; i < entries.length - 1; i++) { + const [key, weight] = entries[i]; + const count = Math.round(total * weight); + result[key] = count; + remaining -= count; + } + result[entries[entries.length - 1][0]] = Math.max(0, remaining); + return result; +} + +function addHistogramObservation(histogram, value) { + histogram.sum += value; + histogram.count++; + for (let i = 0; i < BUCKETS.length; i++) { + if (value <= BUCKETS[i]) { + histogram.buckets[i]++; + return; + } + } + // Falls into +Inf only (counted in .count but not in any named bucket) +} + +// ─── Instance State ───────────────────────────────────────────────────────── + +class InstanceState { + constructor(instanceConfig, vendors) { + this.instanceConfig = instanceConfig; + this.profile = PROFILES[instanceConfig.profile]; + this.startTime = new Date().toISOString(); + this.vendors = vendors; + this.available = this.profile.available; + + // Counters: { vendor: { '2xx': { GET: n, ... }, '4xx': {...}, '5xx': {...} } } + this.requests = {}; + // Histograms: { vendor: { buckets: [...], sum: n, count: n } } + this.requestDuration = {}; + this.upstreamDuration = {}; + this.activeConnections = 0; + this.panicsTotal = 0; + + // V0.5 — cache stats + this.cacheHits = 0; + this.cacheMisses = 0; + this.cacheEvictions = 0; + + for (const vendor of vendors) { + this.requests[vendor] = {}; + for (const cls of ['2xx', '3xx', '4xx', '5xx']) { + this.requests[vendor][cls] = { GET: 0, POST: 0, PUT: 0, DELETE: 0 }; + } + this.requestDuration[vendor] = { + buckets: new Array(BUCKETS.length).fill(0), sum: 0, count: 0, + }; + this.upstreamDuration[vendor] = { + buckets: new Array(BUCKETS.length).fill(0), sum: 0, count: 0, + }; + } + + // Flapping: toggle availability on interval + if (instanceConfig.profile === 'flapping') { + setInterval(() => { this.available = !this.available; }, + this.profile.flapIntervalMs || 30000); + } + } + + tick() { + if (!this.available) return; + + const p = this.profile; + const rps = randInt(p.rpsRange[0], p.rpsRange[1]); + + for (const vendor of this.vendors) { + const vendorRps = Math.max(1, Math.round(rps / this.vendors.length + randInt(-3, 3))); + const methods = weightedSplit(vendorRps, { GET: 0.6, POST: 0.2, PUT: 0.15, DELETE: 0.05 }); + + for (const [method, count] of Object.entries(methods)) { + const errors = Math.round(count * p.errorRate); + const clientErrors = Math.round(errors * 0.7); + const serverErrors = errors - clientErrors; + + this.requests[vendor]['2xx'][method] += count - errors; + this.requests[vendor]['4xx'][method] += clientErrors; + this.requests[vendor]['5xx'][method] += serverErrors; + + for (let i = 0; i < count; i++) { + const latency = Math.max(0.001, gaussianRandom(p.latencyMean, p.latencyStddev)); + addHistogramObservation(this.requestDuration[vendor], latency); + addHistogramObservation(this.upstreamDuration[vendor], latency * (0.7 + Math.random() * 0.2)); + } + } + + // Cache: ~80% hit rate for healthy, ~50% for degraded + const hitRate = p.errorRate < 0.1 ? 0.8 : 0.5; + const cacheOps = Math.round(vendorRps * 0.6); + this.cacheHits += Math.round(cacheOps * hitRate); + this.cacheMisses += cacheOps - Math.round(cacheOps * hitRate); + if (Math.random() < 0.01) this.cacheEvictions += randInt(1, 5); + } + + this.activeConnections = randInt(p.activeConnRange[0], p.activeConnRange[1]); + if (Math.random() < p.panicRate) this.panicsTotal++; + } + + toPrometheus() { + const lines = []; + + // chaperone_requests_total + lines.push('# HELP chaperone_requests_total Total number of requests processed'); + lines.push('# TYPE chaperone_requests_total counter'); + for (const vendor of this.vendors) { + for (const [cls, methods] of Object.entries(this.requests[vendor])) { + for (const [method, count] of Object.entries(methods)) { + if (count > 0) { + lines.push(`chaperone_requests_total{vendor_id="${vendor}",status_class="${cls}",method="${method}"} ${count}`); + } + } + } + } + + // chaperone_request_duration_seconds + lines.push(''); + lines.push('# HELP chaperone_request_duration_seconds Total request duration including plugin and upstream'); + lines.push('# TYPE chaperone_request_duration_seconds histogram'); + for (const vendor of this.vendors) { + const h = this.requestDuration[vendor]; + let cumulative = 0; + for (let i = 0; i < BUCKETS.length; i++) { + cumulative += h.buckets[i]; + lines.push(`chaperone_request_duration_seconds_bucket{vendor_id="${vendor}",le="${BUCKETS[i]}"} ${cumulative}`); + } + lines.push(`chaperone_request_duration_seconds_bucket{vendor_id="${vendor}",le="+Inf"} ${h.count}`); + lines.push(`chaperone_request_duration_seconds_sum{vendor_id="${vendor}"} ${h.sum.toFixed(6)}`); + lines.push(`chaperone_request_duration_seconds_count{vendor_id="${vendor}"} ${h.count}`); + } + + // chaperone_upstream_duration_seconds + lines.push(''); + lines.push('# HELP chaperone_upstream_duration_seconds Time spent waiting for upstream response'); + lines.push('# TYPE chaperone_upstream_duration_seconds histogram'); + for (const vendor of this.vendors) { + const h = this.upstreamDuration[vendor]; + let cumulative = 0; + for (let i = 0; i < BUCKETS.length; i++) { + cumulative += h.buckets[i]; + lines.push(`chaperone_upstream_duration_seconds_bucket{vendor_id="${vendor}",le="${BUCKETS[i]}"} ${cumulative}`); + } + lines.push(`chaperone_upstream_duration_seconds_bucket{vendor_id="${vendor}",le="+Inf"} ${h.count}`); + lines.push(`chaperone_upstream_duration_seconds_sum{vendor_id="${vendor}"} ${h.sum.toFixed(6)}`); + lines.push(`chaperone_upstream_duration_seconds_count{vendor_id="${vendor}"} ${h.count}`); + } + + // chaperone_active_connections + lines.push(''); + lines.push('# HELP chaperone_active_connections Number of active connections'); + lines.push('# TYPE chaperone_active_connections gauge'); + lines.push(`chaperone_active_connections ${this.activeConnections}`); + + // chaperone_panics_total + lines.push(''); + lines.push('# HELP chaperone_panics_total Total number of recovered panics'); + lines.push('# TYPE chaperone_panics_total counter'); + lines.push(`chaperone_panics_total ${this.panicsTotal}`); + + return lines.join('\n') + '\n'; + } + + getCacheStats() { + const total = this.cacheHits + this.cacheMisses; + return { + entries: Math.min(Math.round(total * 0.1), 10000), + hits: this.cacheHits, + misses: this.cacheMisses, + hit_ratio: total > 0 ? +(this.cacheHits / total).toFixed(4) : 0, + evictions: this.cacheEvictions, + }; + } + + getTlsStatus() { + const now = new Date(); + const notBefore = new Date(now); + notBefore.setMonth(notBefore.getMonth() - 6); + const notAfter = new Date(now); + notAfter.setMonth(notAfter.getMonth() + 3); + const daysUntilExpiry = Math.round((notAfter - now) / (1000 * 60 * 60 * 24)); + + return { + issuer: 'CN=Chaperone Internal CA,O=CloudBlue,C=US', + subject: `CN=${this.instanceConfig.name}.chaperone.local`, + not_before: notBefore.toISOString(), + not_after: notAfter.toISOString(), + days_until_expiry: daysUntilExpiry, + serial: 'AB:CD:EF:01:23:45:67:89', + }; + } + + getConfig() { + return { + server: { + addr: `:8443`, + admin_addr: `:${this.instanceConfig.port}`, + }, + upstream: { + allowed_hosts: ['*.vendor-api.com', 'api.acme-corp.com', 'api.globex-inc.com'], + timeout: '30s', + }, + tls: { + cert_file: '[REDACTED]', + key_file: '[REDACTED]', + ca_file: '[REDACTED]', + min_version: 'TLS1.3', + }, + cache: { ttl: '5m', max_entries: 10000 }, + headers: { prefix: 'X-Connect' }, + observability: { metrics_enabled: true, profiling_enabled: false }, + }; + } + + testAllowlist(testUrl) { + const allowedPatterns = ['*.vendor-api.com', 'api.acme-corp.com', 'api.globex-inc.com']; + let hostname = ''; + try { + hostname = new URL(testUrl).hostname; + } catch { + return { allowed: false, matched_host: '', matched_pattern: '', explanation: 'Invalid URL' }; + } + for (const pattern of allowedPatterns) { + if (pattern.startsWith('*.')) { + const suffix = pattern.slice(1); // ".vendor-api.com" + if (hostname.endsWith(suffix) || hostname === pattern.slice(2)) { + return { allowed: true, matched_host: hostname, matched_pattern: pattern, explanation: `Wildcard match: ${hostname} matches ${pattern}` }; + } + } else if (hostname === pattern) { + return { allowed: true, matched_host: hostname, matched_pattern: pattern, explanation: `Exact match: ${hostname}` }; + } + } + return { allowed: false, matched_host: hostname, matched_pattern: '', explanation: `No matching allow-list entry for ${hostname}` }; + } +} + +// ─── HTTP Server ──────────────────────────────────────────────────────────── + +function createServer(instanceConfig, state) { + return http.createServer((req, res) => { + if (!state.available) { + res.destroy(); + return; + } + + const url = new URL(req.url, `http://localhost:${instanceConfig.port}`); + + if (req.method === 'GET' && url.pathname === '/_ops/health') { + json(res, 200, { status: 'alive' }); + } else if (req.method === 'GET' && url.pathname === '/_ops/version') { + json(res, 200, { version: CONFIG.version, start_time: state.startTime }); + } else if (req.method === 'GET' && url.pathname === '/metrics') { + res.writeHead(200, { 'Content-Type': 'text/plain; version=0.0.4; charset=utf-8' }); + res.end(state.toPrometheus()); + } else if (req.method === 'GET' && url.pathname === '/_ops/config') { + json(res, 200, state.getConfig()); + } else if (req.method === 'GET' && url.pathname === '/_ops/cache/stats') { + json(res, 200, state.getCacheStats()); + } else if (req.method === 'GET' && url.pathname === '/_ops/tls/status') { + json(res, 200, state.getTlsStatus()); + } else if (req.method === 'GET' && url.pathname === '/_ops/allowlist/test') { + json(res, 200, state.testAllowlist(url.searchParams.get('url') || '')); + } else { + json(res, 404, { error: 'not found' }); + } + }); +} + +function json(res, status, body) { + res.writeHead(status, { 'Content-Type': 'application/json' }); + res.end(JSON.stringify(body)); +} + +// ─── CLI ──────────────────────────────────────────────────────────────────── + +function parseArgs() { + const args = process.argv.slice(2); + + if (args.includes('--help') || args.includes('-h')) { + console.log(`Usage: node mock-chaperone.js [options] + +Options: + [count] Number of instances (default: uses CONFIG) + [count] [startPort] Number of instances + starting port (default: 19091) + --help, -h Show this help + +Examples: + node mock-chaperone.js # 3 instances from CONFIG + node mock-chaperone.js 5 # 5 healthy instances on 19091-19095 + node mock-chaperone.js 5 19100 # 5 healthy instances on 19100-19104 + +Edit CONFIG in the script for full customization (profiles, vendors, etc.).`); + process.exit(0); + } + + if (args.length >= 1 && !isNaN(args[0])) { + const count = parseInt(args[0], 10); + const startPort = args.length >= 2 && !isNaN(args[1]) ? parseInt(args[1], 10) : 19091; + const names = ['us-east-1', 'eu-west-1', 'ap-south-1', 'us-west-2', 'eu-central-1', + 'ap-northeast-1', 'sa-east-1', 'af-south-1', 'me-south-1', 'ca-central-1']; + CONFIG.instances = []; + for (let i = 0; i < count; i++) { + const region = names[i % names.length]; + CONFIG.instances.push({ + port: startPort + i, + name: `proxy-${region}`, + profile: i === count - 1 && count > 1 ? 'degraded' : 'healthy', + }); + } + } +} + +// ─── Main ─────────────────────────────────────────────────────────────────── + +function main() { + parseArgs(); + + for (const inst of CONFIG.instances) { + const state = new InstanceState(inst, CONFIG.vendors); + const server = createServer(inst, state); + server.listen(inst.port, () => { + console.log(` [${inst.name}] :${inst.port} (${inst.profile})`); + }); + + // Evolve metrics every tick + setInterval(() => state.tick(), CONFIG.tickIntervalMs); + } + + console.log(`\nMock Chaperone fleet — ${CONFIG.instances.length} instances`); + console.log(`Vendors: ${CONFIG.vendors.join(', ')}`); + console.log(`Metrics tick: every ${CONFIG.tickIntervalMs}ms\n`); + + console.log('Endpoints per instance:'); + console.log(' GET /_ops/health Health check'); + console.log(' GET /_ops/version Version + start_time'); + console.log(' GET /metrics Prometheus metrics'); + console.log(' GET /_ops/config Running config (V0.5)'); + console.log(' GET /_ops/cache/stats Cache statistics (V0.5)'); + console.log(' GET /_ops/tls/status TLS cert info (V0.5)'); + console.log(' GET /_ops/allowlist/test?url=... URL test (V0.5)'); + console.log('\nPress Ctrl+C to stop.\n'); +} + +main();