From 99bf0a7d1406aea5f9a1f637fd1ac59030d1b5a7 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Sat, 13 Dec 2025 22:42:53 +0000 Subject: [PATCH 01/10] microshift-bootc: create a booc microshift based image Creates a bootc based image based on MicroShift for evaluation and small labs. --- deploy/microshift-bootc/Containerfile | 30 + deploy/microshift-bootc/Makefile | 93 + deploy/microshift-bootc/README.md | 420 +++ deploy/microshift-bootc/config-svc/app.py | 2302 +++++++++++++++++ .../config-svc/config-svc.service | 21 + .../config-svc/update-banner.service | 19 + .../config-svc/update-banner.sh | 15 + deploy/microshift-bootc/config.toml | 38 + deploy/microshift-bootc/kustomization.yaml | 11 + deploy/microshift-bootc/output/.gitkeep | 0 deploy/microshift-bootc/run-microshift.sh | 179 ++ 11 files changed, 3128 insertions(+) create mode 100644 deploy/microshift-bootc/Containerfile create mode 100644 deploy/microshift-bootc/Makefile create mode 100644 deploy/microshift-bootc/README.md create mode 100644 deploy/microshift-bootc/config-svc/app.py create mode 100644 deploy/microshift-bootc/config-svc/config-svc.service create mode 100644 deploy/microshift-bootc/config-svc/update-banner.service create mode 100644 deploy/microshift-bootc/config-svc/update-banner.sh create mode 100644 deploy/microshift-bootc/config.toml create mode 100644 deploy/microshift-bootc/kustomization.yaml create mode 100644 deploy/microshift-bootc/output/.gitkeep create mode 100755 deploy/microshift-bootc/run-microshift.sh diff --git a/deploy/microshift-bootc/Containerfile b/deploy/microshift-bootc/Containerfile new file mode 100644 index 00000000..f242ee69 --- /dev/null +++ b/deploy/microshift-bootc/Containerfile @@ -0,0 +1,30 @@ +FROM ghcr.io/microshift-io/microshift:release-4.20-4.20.0-okd-scos.9 + +# Install dependencies for config-svc +RUN dnf install -y epel-release && \ + dnf install -y python3 iproute python3-flask python3-pip && \ + pip3 install python-pam && \ + dnf clean all + +# Install MicroShift manifests +RUN mkdir -p /etc/microshift/manifests.d/002-jumpstarter +COPY deploy/microshift-bootc/kustomization.yaml /etc/microshift/manifests.d/002-jumpstarter/kustomization.yaml +COPY deploy/operator/dist/install.yaml /etc/microshift/manifests.d/002-jumpstarter/install-operator.yaml + +# Configure firewalld to open required ports +# Use firewall-offline-cmd since firewalld is not running during build +RUN firewall-offline-cmd --add-service=http && \ + firewall-offline-cmd --add-service=https && \ + firewall-offline-cmd --add-port=8880/tcp + +# Set root password +RUN echo "root:jumpstarter" | chpasswd + +# Install config-svc systemd service +COPY deploy/microshift-bootc/config-svc/app.py /usr/local/bin/config-svc +RUN chmod +x /usr/local/bin/config-svc +COPY deploy/microshift-bootc/config-svc/update-banner.sh /usr/local/bin/update-banner.sh +RUN chmod +x /usr/local/bin/update-banner.sh +COPY deploy/microshift-bootc/config-svc/config-svc.service /etc/systemd/system/config-svc.service +COPY deploy/microshift-bootc/config-svc/update-banner.service /etc/systemd/system/update-banner.service +RUN systemctl enable config-svc.service update-banner.service \ No newline at end of file diff --git a/deploy/microshift-bootc/Makefile b/deploy/microshift-bootc/Makefile new file mode 100644 index 00000000..604a370e --- /dev/null +++ b/deploy/microshift-bootc/Makefile @@ -0,0 +1,93 @@ +.PHONY: help build bootc-build push bootc-push bootc-run bootc-stop bootc-sh bootc-rm + +# Default image tags +BOOTC_IMG ?= quay.io/jumpstarter-dev/microshift/bootc:latest + + +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build + +build: bootc-build ## Build bootc image (default target) + +bootc-build: ## Build the bootc image with MicroShift + @echo "Building bootc image: $(BOOTC_IMG): building as root to be on the container storage from root" + sudo podman build -t $(BOOTC_IMG) -f Containerfile ../.. + +output/qcow2/disk.qcow2: ## Build a bootable QCOW2 image from the bootc image + @echo "Building QCOW2 image from: $(BOOTC_IMG)" + @echo "Running bootc-image-builder..." + @mkdir -p output + sudo podman run \ + --rm \ + -it \ + --privileged \ + --pull=newer \ + --security-opt label=type:unconfined_t \ + -v ./config.toml:/config.toml:ro \ + -v ./output:/output \ + -v /var/lib/containers/storage:/var/lib/containers/storage \ + quay.io/centos-bootc/bootc-image-builder:latest \ + --type qcow2 \ + -v \ + $(BOOTC_IMG) + @echo "QCOW2 image built successfully in ./output/" + +build-image: bootc-build ## Build the bootc based qcow2 image + @echo "Building image: output/qcow2/disk.qcow2" + @echo "Cleaning up any existing LVM resources to avoid conflicts..." + -sudo vgs --noheadings -o vg_name,vg_uuid | grep myvg1 | while read vg uuid; do sudo vgremove -f --select vg_uuid=$$uuid 2>/dev/null || true; done + -sudo losetup -D 2>/dev/null || true + sudo rm -f output/qcow2/disk.qcow2 + make output/qcow2/disk.qcow2 + @echo "Image built successfully in ./output/" + +##@ Push + +push: bootc-push ## Push bootc image to registry + +bootc-push: ## Push the bootc image to registry + @echo "Pushing bootc image: $(BOOTC_IMG)" + sudo podman push $(BOOTC_IMG) + +##@ Development + +build-all: bootc-build ## Build bootc image + +push-all: bootc-push ## Push bootc image to registry + +bootc-run: ## Run MicroShift in a bootc container + @echo "Running MicroShift container with image: $(BOOTC_IMG)" + @BOOTC_IMG=$(BOOTC_IMG) sudo -E ./run-microshift.sh + +bootc-stop: ## Stop the running MicroShift container + @echo "Stopping MicroShift container..." + -sudo podman stop jumpstarter-microshift-okd + +bootc-rm: bootc-stop ## Remove the MicroShift container + @echo "Removing MicroShift container..." + -sudo podman rm -f jumpstarter-microshift-okd + @echo "Cleaning up LVM resources..." + -sudo vgremove -f myvg1 2>/dev/null || true + -sudo losetup -d $$(sudo losetup -j /var/lib/microshift-okd/lvmdisk.image | cut -d: -f1) 2>/dev/null || true + @echo "LVM cleanup complete" + +bootc-sh: ## Open a shell in the running MicroShift container + @echo "Opening shell in MicroShift container..." + sudo podman exec -it jumpstarter-microshift-okd /bin/bash -l + +bootc-reload-app: ## Reload the config service app without rebuilding (dev mode) + @echo "Reloading config-svc app..." + sudo podman cp config-svc/app.py jumpstarter-microshift-okd:/usr/local/bin/config-svc + sudo podman exec jumpstarter-microshift-okd systemctl restart config-svc + @echo "Config service reloaded successfully!" + +clean: ## Clean up local images and build artifacts + @echo "Removing local images..." + -sudo podman rmi $(BOOTC_IMG) + @echo "Removing QCOW2 output..." + -sudo rm -rf output/qcow2/disk.qcow2 + @echo "Removing LVM disk image..." + -sudo rm -f /var/lib/microshift-okd/lvmdisk.image + diff --git a/deploy/microshift-bootc/README.md b/deploy/microshift-bootc/README.md new file mode 100644 index 00000000..b35d9234 --- /dev/null +++ b/deploy/microshift-bootc/README.md @@ -0,0 +1,420 @@ +# MicroShift Bootc Deployment + +This directory contains the configuration and scripts to build a bootable container (bootc) image with MicroShift and the Jumpstarter operator pre-installed. + +> **⚠️ Community Edition Disclaimer** +> +> This MicroShift-based deployment is a **community-supported edition** intended for development, testing, and evaluation scenarios. It is **not officially supported** for production use, although it can be OK for small labs. +> +> **For production deployments**, we strongly recommend using the official Jumpstarter Controller deployment on Kubernetes or OpenShift clusters with proper high availability, security, and support. See the [official installation documentation](https://jumpstarter.dev/main/getting-started/installation/service/index.html) for production deployment guides. + +## Overview + +This community edition deployment provides a lightweight, all-in-one solution ideal for: +- **Edge devices** with limited resources +- **Development and testing** environments +- **Proof-of-concept** deployments +- **Local experimentation** with Jumpstarter + +**Features:** +- **MicroShift 4.20 (OKD)** - Lightweight Kubernetes distribution +- **Jumpstarter Operator** - Pre-installed and ready to use +- **TopoLVM CSI** - Dynamic storage provisioning using LVM +- **Configuration Web UI** - Easy setup and management at port 8880 +- **Pod Monitoring** - Real-time pod status dashboard + +## Prerequisites + +- **Fedora/RHEL-based system** (tested on Fedora 42) +- **Podman** installed and configured +- **Root/sudo access** required for privileged operations +- **At least 4GB RAM** and 20GB disk space recommended + +## Quick Start + +### 1. Build the Bootc Image + +```bash +make bootc-build +``` + +This builds a container image with MicroShift and all dependencies. + +### 2. Run as Container (Development/Testing) + +```bash +make bootc-run +``` + +This will: +- Create a 1GB LVM disk image at `/var/lib/microshift-okd/lvmdisk.image` +- Start MicroShift in a privileged container +- Set up LVM volume groups inside the container for TopoLVM +- Wait for MicroShift to be ready + +**Output example:** +``` +MicroShift is running in a bootc container +Hostname: jumpstarter.10.0.2.2.nip.io +Container: jumpstarter-microshift-okd +LVM disk: /var/lib/microshift-okd/lvmdisk.image +VG name: myvg1 +Ports: HTTP:80, HTTPS:443, Config Service:8880 +``` + +### 3. Access the Services + +#### Configuration Web UI +- URL: `http://localhost:8880` +- Login: `root` / `jumpstarter` (default - you'll be required to change it) +- Features: + - Configure hostname and base domain + - Set controller image version + - Change root password (required on first use) + - Download kubeconfig + - Monitor pod status + +#### MicroShift API +- URL: `https://jumpstarter..nip.io:6443` +- Download kubeconfig from the web UI or extract from container + +#### Pod Monitoring Dashboard +- URL: `http://localhost:8880/pods` +- Auto-refreshes every 5 seconds +- Shows all pods across all namespaces + +## Container Management + +### View Running Pods + +```bash +sudo podman exec -it jumpstarter-microshift-okd oc get pods -A +``` + +### Open Shell in Container + +```bash +make bootc-sh +``` + +### Stop Container + +```bash +make bootc-stop +``` + +### Remove Container + +```bash +make bootc-rm +``` + +This will: +- Stop the container +- Remove the container +- Clean up LVM volume groups (myvg1) +- Detach loop devices + +**Note:** The LVM disk image (`/var/lib/microshift-okd/lvmdisk.image`) is preserved. To remove it completely, use `make clean`. + +### Complete Rebuild + +```bash +make bootc-rm bootc-build bootc-run +``` + +This stops, removes, rebuilds, and restarts the container with the latest changes. + +## Creating a Bootable QCOW2 Image + +For production deployments, you can create a bootable QCOW2 disk image that can be: +- Installed on bare metal +- Used in virtual machines (KVM/QEMU, OpenStack, etc.) +- Deployed to edge devices + +### Build QCOW2 Image + +```bash +make build-image +``` + +This will: +1. Clean up any existing LVM resources to avoid conflicts +2. Build the bootc container image (if not already built) +3. Use `bootc-image-builder` to create a bootable QCOW2 image +4. Output the image to `./output/qcow2/disk.qcow2` + +**Note:** This process takes several minutes and requires significant disk space (20GB+). + +**Important:** If you're running the container (`make bootc-run`) and want to build the image, stop the container first with `make bootc-rm` to avoid LVM conflicts. + +### Configuration + +The QCOW2 image is configured via `config.toml`: +- **LVM partitioning:** Creates `myvg1` volume group with 20GB minimum +- **Root filesystem:** XFS on LVM (10GB minimum) +- **Default password:** `root:jumpstarter` (change via web UI on first boot) + +### Using the QCOW2 Image + +#### In a Virtual Machine (KVM/QEMU) + +```bash +qemu-system-x86_64 \ + -m 4096 \ + -smp 2 \ + -drive file=output/qcow2/disk.qcow2,format=qcow2 \ + -net nic -net user,hostfwd=tcp::8880-:8880,hostfwd=tcp::443-:443 +``` + +#### Convert to Other Formats + +```bash +# Convert to raw disk image +qemu-img convert -f qcow2 -O raw output/qcow2/disk.qcow2 output/disk.raw + +# Convert to VirtualBox VDI +qemu-img convert -f qcow2 -O vdi output/qcow2/disk.qcow2 output/disk.vdi +``` + +## Architecture + +### Components + +``` +┌─────────────────────────────────────────────┐ +│ Bootc Container / Image │ +├─────────────────────────────────────────────┤ +│ • Fedora CoreOS 9 base │ +│ • MicroShift 4.20 (OKD) │ +│ • Jumpstarter Operator │ +│ • TopoLVM CSI (storage) │ +│ • Configuration Service (Python/Flask) │ +│ • Firewalld (ports 22, 80, 443, 8880) │ +└─────────────────────────────────────────────┘ +``` + +### Storage Setup + +When running as a container: +1. Script creates `/var/lib/microshift-okd/lvmdisk.image` (1GB) +2. Image is copied into the container +3. Loop device is created inside container +4. LVM volume group `myvg1` is created +5. TopoLVM uses `myvg1` for dynamic PV provisioning + +When deployed from QCOW2: +1. Bootc image builder creates proper disk partitioning +2. LVM volume group `myvg1` is set up on disk +3. Root filesystem uses part of the VG +4. Remaining space available for TopoLVM + +## Customization + +### Change Default Image + +```bash +BOOTC_IMG=quay.io/your-org/microshift-bootc:v1.0 make bootc-build +``` + +### Modify Manifests + +Add Kubernetes manifests to `/etc/microshift/manifests.d/002-jumpstarter/` by editing: +- `kustomization.yaml` - Kustomize configuration +- Additional YAML files will be automatically applied + +### Update Configuration Service + +Edit `config-svc/app.py` and rebuild: + +```bash +make bootc-build +``` + +For live testing without rebuild: + +```bash +make bootc-reload-app +``` + +## Troubleshooting + +### LVM/TopoLVM Issues + +Check if volume group exists in container: + +```bash +sudo podman exec jumpstarter-microshift-okd vgs +sudo podman exec jumpstarter-microshift-okd pvs +``` + +If TopoLVM pods are crashing, recreate the LVM setup: + +```bash +make bootc-rm # Automatically cleans up VG and loop devices +make clean # Remove the disk image for a fresh start +make bootc-run +``` + +### MicroShift Not Starting + +Check logs: + +```bash +sudo podman logs jumpstarter-microshift-okd +sudo podman exec jumpstarter-microshift-okd journalctl -u microshift -f +``` + +### Configuration Service Issues + +Check service status: + +```bash +sudo podman exec jumpstarter-microshift-okd systemctl status config-svc +sudo podman exec jumpstarter-microshift-okd journalctl -u config-svc -f +``` + +### Port Conflicts + +If ports 80, 443, or 8880 are in use, modify `run-microshift.sh`: + +```bash +HTTP_PORT=8080 +HTTPS_PORT=8443 +CONFIG_SVC_PORT=9880 +``` + +### Bootc Image Builder Fails + +Ensure sufficient disk space and clean up: + +```bash +sudo podman system prune -a +sudo rm -rf output/ +``` + +## Makefile Targets + +| Target | Description | +|--------|-------------| +| `make help` | Display all available targets | +| `make bootc-build` | Build the bootc container image | +| `make bootc-run` | Run MicroShift in a container | +| `make bootc-stop` | Stop the running container | +| `make bootc-rm` | Remove container and clean up LVM resources | +| `make bootc-sh` | Open shell in container | +| `make bootc-reload-app` | Reload config service without rebuild (dev mode) | +| `make build-image` | Create bootable QCOW2 image | +| `make bootc-push` | Push image to registry | +| `make clean` | Clean up images, artifacts, and LVM disk | + +## Files + +| File | Description | +|------|-------------| +| `Containerfile` | Container build definition | +| `config.toml` | Bootc image builder configuration | +| `run-microshift.sh` | Container startup script | +| `kustomization.yaml` | Kubernetes manifests configuration | +| `config-svc/app.py` | Configuration web UI service | +| `config-svc/config-svc.service` | Systemd service definition | + +## Network Configuration + +### Hostname Resolution + +The system uses `nip.io` for automatic DNS resolution: +- Default: `jumpstarter..nip.io` +- Example: `jumpstarter.10.0.2.2.nip.io` resolves to `10.0.2.2` + +### Firewall Ports + +| Port | Service | Description | +|------|---------|-------------| +| 80 | HTTP | MicroShift ingress | +| 443 | HTTPS | MicroShift API and ingress | +| 8880 | Config UI | Web configuration interface | +| 6443 | API Server | Kubernetes API (internal) | + +## Security Notes + +⚠️ **Important Security Considerations:** + +1. **Default Password:** The system ships with `root:jumpstarter` as the default password + - **Console login:** You will be forced to change the password on first SSH/console login + - **Web UI:** You must change the password before accessing the configuration interface +2. **TLS Certificates:** MicroShift uses self-signed certs by default +3. **Privileged Container:** Required for systemd, LVM, and networking +4. **Authentication:** Web UI uses PAM authentication with root credentials +5. **Production Use:** Consider additional hardening for production deployments + +## Development Workflow + +Typical development cycle: + +```bash +# 1. Make changes to code/configuration +vim config-svc/app.py + +# 2. Quick reload (no rebuild needed) +make bootc-reload-app + +# 3. Access and test +curl http://localhost:8880 + +# 4. Check logs if issues +make bootc-sh +journalctl -u config-svc -f + +# 5. For major changes, do full rebuild +make bootc-rm bootc-build bootc-run +``` + +## Production Deployment + +1. **Build QCOW2 image:** + ```bash + make build-image + ``` + +2. **Copy image to target system:** + ```bash + scp output/qcow2/disk.qcow2 target-host:/var/lib/libvirt/images/ + ``` + +3. **Create VM or write to disk:** + ```bash + # For VM + virt-install --name jumpstarter \ + --memory 4096 \ + --vcpus 2 \ + --disk path=/var/lib/libvirt/images/disk.qcow2 \ + --import \ + --os-variant fedora39 + + # For bare metal + dd if=output/qcow2/disk.qcow2 of=/dev/sdX bs=4M status=progress + ``` + +4. **First boot:** + - Console login will require password change from default `jumpstarter` + - Access web UI at `http://:8880` and set new password + +## Resources + +### Jumpstarter Documentation +- [Official Installation Guide](https://jumpstarter.dev/main/getting-started/installation/service/index.html) - **Recommended for production** +- [Jumpstarter Project](https://github.com/jumpstarter-dev/jumpstarter) + +### Technology Stack +- [MicroShift Documentation](https://microshift.io/) +- [Bootc Documentation](https://containers.github.io/bootc/) +- [TopoLVM Documentation](https://github.com/topolvm/topolvm) + +## Support + +For issues and questions: +- File issues on the Jumpstarter GitHub repository +- Check container logs: `sudo podman logs jumpstarter-microshift-okd` +- Review systemd journals: `make bootc-sh` then `journalctl -xe` + diff --git a/deploy/microshift-bootc/config-svc/app.py b/deploy/microshift-bootc/config-svc/app.py new file mode 100644 index 00000000..1d2825ab --- /dev/null +++ b/deploy/microshift-bootc/config-svc/app.py @@ -0,0 +1,2302 @@ +#!/usr/bin/env python3 +""" +Jumpstarter Configuration Web UI + +A simple web service for configuring Jumpstarter deployment settings: +- Hostname configuration with smart defaults +- Jumpstarter CR management (baseDomain + image version) +- MicroShift kubeconfig download +""" + +import json +import os +import re +import socket +import subprocess +import sys +import tempfile +from functools import wraps +from io import BytesIO +from pathlib import Path + +from flask import Flask, request, send_file, render_template_string, Response, jsonify + +app = Flask(__name__) + +# MicroShift kubeconfig path +KUBECONFIG_PATH = '/var/lib/microshift/resources/kubeadmin/kubeconfig' + + +def validate_hostname(hostname): + """ + Validate hostname according to RFC 1123 standards. + + Rules: + - Total length <= 253 characters + - Each label 1-63 characters + - Labels match /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/i (case-insensitive) + - No leading/trailing hyphen in labels + - Reject empty or illegal characters + - Optionally reject trailing dot + + Returns: (is_valid: bool, error_message: str) + """ + if not hostname: + return False, "Hostname cannot be empty" + + # Remove trailing dot if present (optional rejection) + if hostname.endswith('.'): + hostname = hostname.rstrip('.') + + # Check total length + if len(hostname) > 253: + return False, f"Hostname too long: {len(hostname)} characters (maximum 253)" + + # Split into labels + labels = hostname.split('.') + + # Check each label + label_pattern = re.compile(r'^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', re.IGNORECASE) + + for i, label in enumerate(labels): + if not label: + return False, f"Empty label at position {i+1} (consecutive dots not allowed)" + + if len(label) > 63: + return False, f"Label '{label}' too long: {len(label)} characters (maximum 63)" + + if not label_pattern.match(label): + return False, f"Label '{label}' contains invalid characters. Labels must start and end with alphanumeric characters and can contain hyphens in between" + + # Additional check: no leading/trailing hyphen (pattern should catch this, but be explicit) + if label.startswith('-') or label.endswith('-'): + return False, f"Label '{label}' cannot start or end with a hyphen" + + return True, "" + + +def validate_password(password): + """ + Validate password to prevent chpasswd injection and enforce security. + + Rules: + - Reject newline characters ('\n') + - Reject colon characters (':') + - Minimum length: 8 characters + - Maximum length: 128 characters (reasonable limit) + + Returns: (is_valid: bool, error_message: str) + """ + if not password: + return False, "Password cannot be empty" + + # Check for forbidden characters + if '\n' in password: + return False, "Password cannot contain newline characters" + + if ':' in password: + return False, "Password cannot contain colon characters" + + # Check length + if len(password) < 8: + return False, f"Password too short: {len(password)} characters (minimum 8)" + + if len(password) > 128: + return False, f"Password too long: {len(password)} characters (maximum 128)" + + return True, "" + + +def check_auth(username, password): + """Check if a username/password combination is valid using PAM.""" + if username != 'root': + return False + + try: + # Try using PAM authentication first + import pam + p = pam.pam() + return p.authenticate(username, password) + except ImportError: + # Fallback: use subprocess to authenticate via su + try: + result = subprocess.run( + ['su', username, '-c', 'true'], + input=password.encode(), + capture_output=True, + timeout=5 + ) + return result.returncode == 0 + except Exception as e: + print(f"Authentication error: {e}", file=sys.stderr) + return False + + +def is_default_password(): + """Check if the root password is still the default 'jumpstarter'.""" + return check_auth('root', 'jumpstarter') + + +def authenticate(): + """Send a 401 response that enables basic auth.""" + return Response( + 'Authentication required. Please login with root credentials.', + 401, + {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} + ) + + +def requires_auth(f): + """Decorator to require HTTP Basic Authentication.""" + @wraps(f) + def decorated(*args, **kwargs): + auth = request.authorization + if not auth or not check_auth(auth.username, auth.password): + return authenticate() + return f(*args, **kwargs) + return decorated + + +# HTML template for forced password change +PASSWORD_REQUIRED_TEMPLATE = """ + + + + + Password Change Required - Jumpstarter + + + + +
+ + +
+

Security Setup Required

+ + {% for msg in messages %} +
{{ msg.text }}
+ {% endfor %} + +
+

⚠️ Default Password Detected

+

You are using the default password. For security reasons, you must change the root password before accessing the configuration interface.

+
+ +
+
+ + +
Minimum 8 characters
+
+
+ + +
Re-enter your new password
+
+ +
+
+
+ +""" + +# HTML template for the main page +HTML_TEMPLATE = """ + + + + + Jumpstarter Configuration + + + + +
+ + + + +
+ {% for msg in messages %} +
{{ msg.text }}
+ {% endfor %} + +
+

Jumpstarter Deployment Configuration

+
+
+
+ + +
The base domain for Jumpstarter routes
+
+
+ + +
The Jumpstarter controller container image to use
+
+
+ + +
When to pull the container image
+
+ +
+ +

Hostname Configuration

+
+
+ + +
Set the system hostname
+
+ +
+
+ +
+

Change Root Password

+
+
+ + +
Minimum 8 characters
+
+
+ + +
Re-enter your new password
+
+ +
+
+ +
+

System Information

+
+
Loading system statistics...
+
+
+ +
+
+

Kubeconfig

+

+ Download the MicroShift kubeconfig file to access the Kubernetes cluster from your local machine. +

+ Download Kubeconfig +
+ +
+

Routes

+
+ +
+ + + + + + + + + + + + + + + + + + +
NamespaceNameHostServicePortTLSAdmittedAge
Loading routes...
+
+
+ +
+

Pod Status

+
+ +
+ + + + + + + + + + + + + + + + + + +
NamespaceNameReadyStatusRestartsAgeNodeActions
Loading pods...
+
+
+
+
+
+ + + +""" + + +@app.route('/static/styles.css') +def serve_css(): + """Serve the consolidated CSS stylesheet.""" + css = """ + * { + margin: 0; + padding: 0; + box-sizing: border-box; + } + html { + scroll-behavior: smooth; + } + body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + background: linear-gradient(135deg, #4c4c4c 0%, #1a1a1a 100%); + min-height: 100vh; + display: flex; + justify-content: center; + align-items: center; + padding: 20px; + } + .container { + background: white; + border-radius: 12px; + box-shadow: 0 10px 60px rgba(0,0,0,0.5), 0 0 0 1px rgba(255, 193, 7, 0.1); + max-width: 1000px; + width: 100%; + padding: 40px; + } + .banner { + margin: -40px -40px 30px -40px; + padding: 25px 40px; + background: linear-gradient(135deg, #757575 0%, #616161 100%); + border-radius: 12px 12px 0 0; + text-align: center; + } + .banner-text { + color: white; + font-size: 14px; + margin-bottom: 20px; + font-weight: 500; + } + .logos { + display: flex; + justify-content: center; + align-items: center; + gap: 40px; + flex-wrap: wrap; + } + .logo-link { + display: inline-block; + transition: opacity 0.3s; + } + .logo-link:hover { + opacity: 0.9; + } + .logo-link img { + height: 45px; + width: auto; + } + .microshift-logo { + height: 40px !important; + filter: brightness(0) invert(1); + } + .jumpstarter-logo { + height: 40px !important; + } + .nav-bar { + display: flex; + gap: 0; + margin: 0 -40px 30px -40px; + border-bottom: 1px solid #e0e0e0; + background: #fafafa; + } + .nav-link { + flex: 1; + text-align: center; + padding: 15px 20px; + text-decoration: none; + color: #666; + font-size: 14px; + font-weight: 500; + transition: all 0.3s; + border-bottom: 3px solid transparent; + } + .nav-link:hover { + background: #f5f5f5; + color: #333; + border-bottom-color: #ffc107; + } + .nav-link.active { + color: #000; + border-bottom-color: #ffc107; + background: white; + } + .content-area { + padding: 0 40px 40px 40px; + margin: 0 -40px -40px -40px; + } + h2 { + color: #333; + font-size: 20px; + margin-bottom: 15px; + } + .section { + display: none; + padding: 20px 0; + animation: fadeIn 0.3s ease-in; + } + @keyframes fadeIn { + from { + opacity: 0; + transform: translateY(10px); + } + to { + opacity: 1; + transform: translateY(0); + } + } + .info { + background: #f8f9fa; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 15px; + font-size: 14px; + color: #555; + } + .info strong { + color: #333; + } + .warning-box { + background: #fff3cd; + border: 1px solid #ffc107; + border-radius: 6px; + padding: 16px; + margin-bottom: 30px; + } + .warning-box h2 { + color: #856404; + font-size: 18px; + margin-bottom: 10px; + } + .warning-box p { + color: #856404; + font-size: 14px; + line-height: 1.5; + } + .form-group { + margin-bottom: 15px; + } + label { + display: block; + margin-bottom: 6px; + color: #555; + font-size: 14px; + font-weight: 500; + } + input[type="text"], + input[type="password"] { + width: 100%; + padding: 10px 12px; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 14px; + transition: border-color 0.3s, opacity 0.3s; + } + input[type="text"]:focus, + input[type="password"]:focus { + outline: none; + border-color: #ffc107; + box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); + } + input[type="text"]:disabled, + input[type="password"]:disabled { + background-color: #f5f5f5; + cursor: not-allowed; + opacity: 0.6; + } + select { + width: 100%; + padding: 10px 12px; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 14px; + background-color: white; + cursor: pointer; + transition: border-color 0.3s; + } + select:focus { + outline: none; + border-color: #ffc107; + box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); + } + .hint { + font-size: 12px; + color: #888; + margin-top: 4px; + } + button { + background: #ffc107; + color: #000; + border: none; + padding: 12px 24px; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + cursor: pointer; + transition: background 0.3s, opacity 0.3s; + } + button:hover { + background: #ffb300; + } + button:disabled { + background: #666; + color: #999; + cursor: not-allowed; + opacity: 0.6; + } + button:disabled:hover { + background: #666; + } + button[type="submit"] { + width: 100%; + } + .download-btn { + background: #ffc107; + display: inline-block; + text-decoration: none; + color: #000; + padding: 12px 24px; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + transition: background 0.3s; + } + .download-btn:hover { + background: #ffb300; + } + .message { + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 20px; + font-size: 14px; + } + .message.success { + background: #d4edda; + color: #155724; + border: 1px solid #c3e6cb; + } + .message.error { + background: #f8d7da; + color: #721c24; + border: 1px solid #f5c6cb; + } + /* MicroShift page specific styles */ + .status-badge { + display: inline-block; + padding: 4px 8px; + border-radius: 4px; + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + } + .status-running { + background: #d4edda; + color: #155724; + } + .status-pending { + background: #fff3cd; + color: #856404; + } + .status-failed { + background: #f8d7da; + color: #721c24; + } + .status-succeeded { + background: #d1ecf1; + color: #0c5460; + } + .status-crashloopbackoff { + background: #f8d7da; + color: #721c24; + } + .status-terminating { + background: #ffeaa7; + color: #856404; + } + .status-unknown { + background: #e2e3e5; + color: #383d41; + } + table { + width: 100%; + border-collapse: collapse; + margin-top: 20px; + font-size: 13px; + } + th { + background: #f8f9fa; + padding: 12px 8px; + text-align: left; + font-weight: 600; + color: #333; + border-bottom: 2px solid #dee2e6; + position: sticky; + top: 0; + z-index: 10; + } + td { + padding: 10px 8px; + border-bottom: 1px solid #eee; + color: #555; + } + tr:hover { + background: #f8f9fa; + } + .table-wrapper { + overflow-x: auto; + max-height: 70vh; + overflow-y: auto; + } + .loading { + text-align: center; + padding: 40px; + color: #666; + } + .error { + background: #f8d7da; + color: #721c24; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 20px; + } + .pod-count { + color: #666; + font-size: 14px; + margin-bottom: 10px; + } + .microshift-section { + margin-bottom: 30px; + padding-bottom: 30px; + border-bottom: 1px solid #eee; + } + .microshift-section:last-child { + border-bottom: none; + } + .action-icon { + text-decoration: none; + font-size: 18px; + padding: 4px 6px; + margin: 0 2px; + border-radius: 4px; + transition: all 0.3s; + display: inline-block; + cursor: pointer; + } + .action-icon:hover { + background: #fff3e0; + transform: scale(1.2); + } + """ + return Response(css, mimetype='text/css') + + +@app.route('/logout') +def logout(): + """Logout endpoint that forces re-authentication.""" + return Response( + 'Logged out. Please close this dialog to log in again.', + 401, + {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} + ) + + +@app.route('/') +@requires_auth +def index(): + """Serve the main configuration page.""" + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + password_required = is_default_password() + + # Force password change if still using default + if password_required: + return render_template_string( + PASSWORD_REQUIRED_TEMPLATE, + messages=[], + current_hostname=current_hostname + ) + + return render_template_string( + HTML_TEMPLATE, + messages=[], + current_hostname=current_hostname, + jumpstarter_config=jumpstarter_config, + password_required=password_required + ) + + +@app.route('/change-password', methods=['POST']) +@requires_auth +def change_password(): + """Handle password change request.""" + new_password = request.form.get('newPassword', '').strip() + confirm_password = request.form.get('confirmPassword', '').strip() + + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + was_default = is_default_password() + + messages = [] + + # Validate password format and security + password_valid, password_error = validate_password(new_password) + if not password_valid: + messages.append({'type': 'error', 'text': password_error}) + elif new_password != confirm_password: + messages.append({'type': 'error', 'text': 'Passwords do not match'}) + else: + password_success, password_message = set_root_password(new_password) + if not password_success: + messages.append({'type': 'error', 'text': f'Failed to set password: {password_message}'}) + else: + if was_default: + # Update login banner on first password change + update_login_banner() + + # First time changing from default - show success and trigger re-auth + messages.append({'type': 'success', 'text': 'Password changed successfully! Redirecting to login with your new password...'}) + # Remove warning box and form, show only success with auto-redirect + success_template = PASSWORD_REQUIRED_TEMPLATE.replace( + '
', + ' - + +
- -
Minimum 8 characters
+ +
Minimum 8 characters (required to change from default password)
- +
Re-enter your new password
- +
+ + +
One SSH public key per line. Leave empty to clear existing keys.
+
+ +
@@ -286,18 +368,24 @@ def decorated(*args, **kwargs):

Change Root Password

-
+ +
- - -
Minimum 8 characters
+ + +
Leave empty to only update SSH keys. Minimum 8 characters if provided.
- - -
Re-enter your new password
+ + +
Re-enter your new password (required if password is provided)
+
+
+ + +
One SSH public key per line. Leave empty to clear existing keys.
- +
@@ -520,6 +608,69 @@ def decorated(*args, **kwargs): }); } + // Handle password change form submission via API + const mainPasswordForm = document.getElementById('main-password-change-form'); + if (mainPasswordForm) { + mainPasswordForm.addEventListener('submit', function(e) { + e.preventDefault(); + + const data = { + newPassword: document.getElementById('mainNewPassword').value, + confirmPassword: document.getElementById('mainConfirmPassword').value, + sshKeys: document.getElementById('mainSshKeys').value + }; + + const submitBtn = document.getElementById('main-password-submit-btn'); + const messagesContainer = document.getElementById('main-password-messages-container'); + const originalText = submitBtn.textContent; + submitBtn.disabled = true; + submitBtn.textContent = 'Processing...'; + + // Clear previous messages + messagesContainer.innerHTML = ''; + + fetch('/api/change-password', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin', + body: JSON.stringify(data) + }) + .then(response => response.json()) + .then(result => { + // Display messages + result.messages.forEach(msg => { + const messageDiv = document.createElement('div'); + messageDiv.className = `message ${msg.type}`; + messageDiv.textContent = msg.text; + messagesContainer.appendChild(messageDiv); + }); + + // Update SSH keys textarea if they were updated + if (result.ssh_updated && result.ssh_keys !== undefined) { + document.getElementById('mainSshKeys').value = result.ssh_keys; + } + + // Clear password fields if password was successfully updated + if (result.password_updated) { + document.getElementById('mainNewPassword').value = ''; + document.getElementById('mainConfirmPassword').value = ''; + } + + // Scroll to messages + messagesContainer.scrollIntoView({ behavior: 'smooth', block: 'nearest' }); + }) + .catch(error => { + messagesContainer.innerHTML = '
Failed to update: ' + error.message + '
'; + }) + .finally(() => { + submitBtn.disabled = false; + submitBtn.textContent = originalText; + }); + }); + } + function loadSystemStats() { fetch('/api/system-stats') .then(response => response.json()) @@ -946,22 +1097,30 @@ def serve_css(): font-weight: 500; } input[type="text"], - input[type="password"] { + input[type="password"], + textarea { width: 100%; padding: 10px 12px; border: 1px solid #ddd; border-radius: 6px; font-size: 14px; transition: border-color 0.3s, opacity 0.3s; + font-family: inherit; + } + textarea { + font-family: monospace; + resize: vertical; } input[type="text"]:focus, - input[type="password"]:focus { + input[type="password"]:focus, + textarea:focus { outline: none; border-color: #ffc107; box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); } input[type="text"]:disabled, - input[type="password"]:disabled { + input[type="password"]:disabled, + textarea:disabled { background-color: #f5f5f5; cursor: not-allowed; opacity: 0.6; @@ -1042,6 +1201,11 @@ def serve_css(): color: #721c24; border: 1px solid #f5c6cb; } + .message.info { + background: #d1ecf1; + color: #0c5460; + border: 1px solid #bee5eb; + } /* MicroShift page specific styles */ .status-badge { display: inline-block; @@ -1169,13 +1333,15 @@ def index(): current_hostname = get_current_hostname() jumpstarter_config = get_jumpstarter_config() password_required = is_default_password() + ssh_keys = get_ssh_authorized_keys() # Force password change if still using default if password_required: return render_template_string( PASSWORD_REQUIRED_TEMPLATE, messages=[], - current_hostname=current_hostname + current_hostname=current_hostname, + ssh_keys=ssh_keys ) return render_template_string( @@ -1183,99 +1349,75 @@ def index(): messages=[], current_hostname=current_hostname, jumpstarter_config=jumpstarter_config, - password_required=password_required + password_required=password_required, + ssh_keys=ssh_keys ) -@app.route('/change-password', methods=['POST']) +@app.route('/api/change-password', methods=['POST']) @requires_auth -def change_password(): - """Handle password change request.""" - new_password = request.form.get('newPassword', '').strip() - confirm_password = request.form.get('confirmPassword', '').strip() +def api_change_password(): + """API endpoint to handle password change request (returns JSON).""" + data = request.get_json() if request.is_json else {} + new_password = data.get('newPassword', request.form.get('newPassword', '')).strip() + confirm_password = data.get('confirmPassword', request.form.get('confirmPassword', '')).strip() + ssh_keys_value = data.get('sshKeys', request.form.get('sshKeys', '')).strip() - current_hostname = get_current_hostname() - jumpstarter_config = get_jumpstarter_config() was_default = is_default_password() + existing_ssh_keys = get_ssh_authorized_keys() messages = [] + password_updated = False + ssh_updated = False + requires_redirect = False - # Validate password format and security - password_valid, password_error = validate_password(new_password) - if not password_valid: - messages.append({'type': 'error', 'text': password_error}) - elif new_password != confirm_password: - messages.append({'type': 'error', 'text': 'Passwords do not match'}) - else: - password_success, password_message = set_root_password(new_password) - if not password_success: - messages.append({'type': 'error', 'text': f'Failed to set password: {password_message}'}) + # If password is provided, validate and set it + if new_password: + # Validate password format and security + password_valid, password_error = validate_password(new_password) + if not password_valid: + messages.append({'type': 'error', 'text': password_error}) + elif new_password != confirm_password: + messages.append({'type': 'error', 'text': 'Passwords do not match'}) else: - if was_default: - # Update login banner on first password change - update_login_banner() - - # First time changing from default - show success and trigger re-auth - messages.append({'type': 'success', 'text': 'Password changed successfully! Redirecting to login with your new password...'}) - # Remove warning box and form, show only success with auto-redirect - success_template = PASSWORD_REQUIRED_TEMPLATE.replace( - '
', - '
@@ -518,11 +523,7 @@ def decorated(*args, **kwargs): form.style.opacity = '1'; } else { // Operator not ready - disable form and show status - statusContainer.innerHTML = - '
' + - '⏳ ' + data.message + '
' + - 'The configuration form will be available once the operator is ready. Checking status every 5 seconds...' + - '
'; + statusContainer.innerHTML = '
⏳ ' + data.message + '
The configuration form will be available once the operator is ready. Checking status every 5 seconds...
'; baseDomainInput.disabled = true; imageInput.disabled = true; submitBtn.disabled = true; @@ -672,84 +673,86 @@ def decorated(*args, **kwargs): } function loadSystemStats() { + const container = document.getElementById('system-stats-container'); + if (!container) { + console.error('system-stats-container not found'); + return; + } + fetch('/api/system-stats') - .then(response => response.json()) + .then(response => { + if (!response.ok) { + throw new Error('HTTP ' + response.status + ': ' + response.statusText); + } + return response.json(); + }) .then(data => { - const container = document.getElementById('system-stats-container'); - if (data.error) { container.innerHTML = '
' + data.error + '
'; return; } - container.innerHTML = ` -
-
- 💾 Disk Usage
-
- Root: ${data.disk.used} / ${data.disk.total} (${data.disk.percent}%)
-
-
-
- Available: ${data.disk.available} -
-
- -
- 🧠 Memory
-
- Used: ${data.memory.used} / ${data.memory.total} (${data.memory.percent}%)
-
-
-
- Available: ${data.memory.available} -
-
- -
- ⚙️ CPU
-
- Cores: ${data.cpu.cores}
- Usage: ${data.cpu.usage}%
-
-
-
-
-
- -
- 🖥️ System
-
- Kernel: ${data.system.kernel}
- Uptime: ${data.system.uptime}
- Hostname: ${data.system.hostname} -
-
- -
- 🌐 Network
-
- ${data.network.interfaces.map(iface => - iface.name + ': ' + iface.ip - ).join('
')} -
-
- -
- 📊 Load Average
-
- 1 min: ${data.system.load_1}
- 5 min: ${data.system.load_5}
- 15 min: ${data.system.load_15} -
-
-
- `; + const diskColor = data.disk.percent > 80 ? '#f44336' : '#ffc107'; + const memoryColor = data.memory.percent > 80 ? '#f44336' : '#4caf50'; + const cpuColor = data.cpu.usage > 80 ? '#f44336' : '#2196f3'; + const networkInfo = data.network.interfaces.map(iface => iface.name + ': ' + iface.ip).join('
'); + + container.innerHTML = '
💾 Disk Usage
Root: ' + data.disk.used + ' / ' + data.disk.total + ' (' + data.disk.percent + '%)
Available: ' + data.disk.available + '
🧠 Memory
Used: ' + data.memory.used + ' / ' + data.memory.total + ' (' + data.memory.percent + '%)
Available: ' + data.memory.available + '
⚙️ CPU
Cores: ' + data.cpu.cores + '
Usage: ' + data.cpu.usage + '%
🖥️ System
Kernel: ' + data.system.kernel + '
Uptime: ' + data.system.uptime + '
Hostname: ' + data.system.hostname + '
🌐 Network
' + networkInfo + '
📊 Load Average
1 min: ' + data.system.load_1 + '
5 min: ' + data.system.load_5 + '
15 min: ' + data.system.load_15 + '
'; }) .catch(error => { console.error('Error fetching system stats:', error); - document.getElementById('system-stats-container').innerHTML = - '
Failed to fetch system statistics: ' + error.message + '
'; + if (container) { + container.innerHTML = '
Failed to fetch system statistics: ' + error.message + '
'; + } + }); + } + + function loadKernelLog() { + const container = document.getElementById('kernel-log-container'); + if (!container) { + console.error('kernel-log-container not found'); + return; + } + + fetch('/api/dmesg') + .then(response => { + if (!response.ok) { + throw new Error('HTTP ' + response.status + ': ' + response.statusText); + } + return response.json(); + }) + .then(data => { + if (data.error) { + container.innerHTML = '
' + data.error + '
'; + return; + } + + if (!data.log) { + container.innerHTML = '
No log data received
'; + return; + } + + // Escape HTML and format the log + const logLines = data.log.split('\\n').map(line => { + // Escape HTML + const escaped = line.replace(/&/g, '&').replace(//g, '>'); + // Highlight error/warning lines + if (line.toLowerCase().includes('error') || line.toLowerCase().includes('fail')) { + return '' + escaped + ''; + } else if (line.toLowerCase().includes('warn')) { + return '' + escaped + ''; + } + return escaped; + }).join('
'); + + const lineCount = data.line_count || logLines.split('
').length; + container.innerHTML = '
Showing ' + lineCount + ' lines (last 10,000 if more)
' + logLines + '
'; + }) + .catch(error => { + console.error('Error fetching kernel log:', error); + if (container) { + container.innerHTML = '
Failed to fetch kernel log: ' + error.message + '
'; + } }); } @@ -919,6 +922,7 @@ def decorated(*args, **kwargs): if (sectionId === '#system') { loadSystemStats(); + loadKernelLog(); } else if (sectionId === '#microshift') { startMicroshiftUpdates(); } @@ -931,6 +935,7 @@ def decorated(*args, **kwargs): // Explicitly load content for initial section (showSection override is now active) if (initialSection === '#system') { loadSystemStats(); + loadKernelLog(); } else if (initialSection === '#microshift') { startMicroshiftUpdates(); } @@ -1677,6 +1682,38 @@ def get_system_stats(): return jsonify({'error': f'Error gathering system statistics: {str(e)}'}), 500 +@app.route('/api/dmesg') +@requires_auth +def get_dmesg(): + """API endpoint to get kernel log (dmesg).""" + try: + # Run dmesg command to get kernel log + result = subprocess.run( + ['dmesg'], + capture_output=True, + text=True, + timeout=10 + ) + + if result.returncode != 0: + return jsonify({'error': f'Failed to get dmesg: {result.stderr.strip()}'}), 500 + + # Return the log (limit to last 10000 lines to avoid huge responses) + log_lines = result.stdout.strip().split('\n') + if len(log_lines) > 10000: + log_lines = log_lines[-10000:] + + return jsonify({ + 'log': '\n'.join(log_lines), + 'line_count': len(log_lines) + }) + + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error getting dmesg: {str(e)}'}), 500 + + @app.route('/api/operator-status') @requires_auth def get_operator_status(): diff --git a/deploy/microshift-bootc/config-svc/update-banner.sh b/deploy/microshift-bootc/config-svc/update-banner.sh index e46fcd46..57cba770 100644 --- a/deploy/microshift-bootc/config-svc/update-banner.sh +++ b/deploy/microshift-bootc/config-svc/update-banner.sh @@ -4,27 +4,35 @@ python3 << 'EOF' import sys import os -sys.path.insert(0, '/usr/local/bin') # Import and call the update function import importlib.util +import importlib.machinery config_svc_path = '/usr/local/bin/config-svc' if not os.path.exists(config_svc_path): print(f"Error: {config_svc_path} does not exist", file=sys.stderr) sys.exit(1) -spec = importlib.util.spec_from_file_location('config_svc', config_svc_path) -if spec is None: - print(f"Error: Failed to create spec for {config_svc_path}", file=sys.stderr) +# Try to create spec with explicit loader for files without .py extension +try: + # Use SourceFileLoader explicitly for files without .py extension + loader = importlib.machinery.SourceFileLoader('config_svc', config_svc_path) + spec = importlib.util.spec_from_loader('config_svc', loader) + + if spec is None: + print(f"Error: Failed to create spec for {config_svc_path}", file=sys.stderr) + sys.exit(1) + + if spec.loader is None: + print(f"Error: Failed to get loader for {config_svc_path}", file=sys.stderr) + sys.exit(1) + + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + module.update_login_banner() +except Exception as e: + print(f"Error loading or executing {config_svc_path}: {e}", file=sys.stderr) sys.exit(1) - -if spec.loader is None: - print(f"Error: Failed to get loader for {config_svc_path}", file=sys.stderr) - sys.exit(1) - -module = importlib.util.module_from_spec(spec) -spec.loader.exec_module(module) -module.update_login_banner() EOF From 0ea2aa4b52cf04292607cf4b929a1e05cf9326f5 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Mon, 15 Dec 2025 09:04:37 +0000 Subject: [PATCH 05/10] microshift-bootc: add support for bootc updates and better disk info --- deploy/microshift-bootc/config-svc/app.py | 569 +++++++++++++++++++++- 1 file changed, 565 insertions(+), 4 deletions(-) diff --git a/deploy/microshift-bootc/config-svc/app.py b/deploy/microshift-bootc/config-svc/app.py index 628b669e..7ad2515f 100644 --- a/deploy/microshift-bootc/config-svc/app.py +++ b/deploy/microshift-bootc/config-svc/app.py @@ -390,11 +390,29 @@ def decorated(*args, **kwargs):
-

System Information

+

BootC Operations

+
+
+ + +
Container image reference to switch to (e.g., quay.io/jumpstarter-dev/microshift/bootc:latest)
+
+
+ + + +
+ +

System Information

Loading system statistics...
+

BootC Status

+
+
Loading BootC status...
+
+

Kernel Log

Loading kernel log...
@@ -697,7 +715,22 @@ def decorated(*args, **kwargs): const cpuColor = data.cpu.usage > 80 ? '#f44336' : '#2196f3'; const networkInfo = data.network.interfaces.map(iface => iface.name + ': ' + iface.ip).join('
'); - container.innerHTML = '
💾 Disk Usage
Root: ' + data.disk.used + ' / ' + data.disk.total + ' (' + data.disk.percent + '%)
Available: ' + data.disk.available + '
🧠 Memory
Used: ' + data.memory.used + ' / ' + data.memory.total + ' (' + data.memory.percent + '%)
Available: ' + data.memory.available + '
⚙️ CPU
Cores: ' + data.cpu.cores + '
Usage: ' + data.cpu.usage + '%
🖥️ System
Kernel: ' + data.system.kernel + '
Uptime: ' + data.system.uptime + '
Hostname: ' + data.system.hostname + '
🌐 Network
' + networkInfo + '
📊 Load Average
1 min: ' + data.system.load_1 + '
5 min: ' + data.system.load_5 + '
15 min: ' + data.system.load_15 + '
'; + // Build info boxes + let infoBoxes = '
💾 Disk Usage
Root: ' + data.disk.used + ' / ' + data.disk.total + ' (' + data.disk.percent + '%)
Available: ' + data.disk.available + '
'; + + // Add LVM PV info if available + if (data.lvm) { + const lvmColor = data.lvm.percent > 80 ? '#f44336' : '#2196f3'; + infoBoxes += '
💿 LVM Physical Volume
PV: ' + data.lvm.pv_device + '
VG: ' + data.lvm.vg_name + '
Used: ' + data.lvm.used + ' / ' + data.lvm.total + ' (' + data.lvm.percent + '%)
Free: ' + data.lvm.free + '
'; + } + + infoBoxes += '
🧠 Memory
Used: ' + data.memory.used + ' / ' + data.memory.total + ' (' + data.memory.percent + '%)
Available: ' + data.memory.available + '
'; + infoBoxes += '
⚙️ CPU
Cores: ' + data.cpu.cores + '
Usage: ' + data.cpu.usage + '%
'; + infoBoxes += '
🖥️ System
Kernel: ' + data.system.kernel + '
Uptime: ' + data.system.uptime + '
Hostname: ' + data.system.hostname + '
'; + infoBoxes += '
🌐 Network
' + networkInfo + '
'; + infoBoxes += '
📊 Load Average
1 min: ' + data.system.load_1 + '
5 min: ' + data.system.load_5 + '
15 min: ' + data.system.load_15 + '
'; + + container.innerHTML = '
' + infoBoxes + '
'; }) .catch(error => { console.error('Error fetching system stats:', error); @@ -756,6 +789,177 @@ def decorated(*args, **kwargs): }); } + function loadBootcStatus() { + const container = document.getElementById('bootc-status-container'); + if (!container) { + console.error('bootc-status-container not found'); + return; + } + + fetch('/api/bootc-status') + .then(response => { + if (!response.ok) { + throw new Error('HTTP ' + response.status + ': ' + response.statusText); + } + return response.json(); + }) + .then(data => { + if (data.error) { + container.innerHTML = '
' + data.error + '
'; + return; + } + + let html = '
'; + if (data.status) { + html += '📦 BootC Status
'; + html += '
' + 
+                                data.status.replace(/&/g, '&').replace(//g, '>') + '
'; + html += '
'; + } + if (data.upgrade_check) { + html += '🔄 Upgrade Check
'; + html += '
' + 
+                                data.upgrade_check.replace(/&/g, '&').replace(//g, '>') + '
'; + html += '
'; + } + html += '
'; + container.innerHTML = html; + }) + .catch(error => { + console.error('Error fetching bootc status:', error); + if (container) { + container.innerHTML = '
Failed to fetch BootC status: ' + error.message + '
'; + } + }); + } + + // BootC operation handlers + document.addEventListener('DOMContentLoaded', function() { + const upgradeCheckBtn = document.getElementById('bootc-upgrade-btn'); + const upgradeApplyBtn = document.getElementById('bootc-upgrade-apply-btn'); + const switchBtn = document.getElementById('bootc-switch-btn'); + const messagesContainer = document.getElementById('bootc-messages-container'); + + if (upgradeCheckBtn) { + upgradeCheckBtn.addEventListener('click', function() { + const originalText = upgradeCheckBtn.textContent; + upgradeCheckBtn.disabled = true; + upgradeCheckBtn.textContent = 'Checking...'; + messagesContainer.innerHTML = ''; + + fetch('/api/bootc-upgrade-check', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin' + }) + .then(response => response.json()) + .then(result => { + if (result.success) { + messagesContainer.innerHTML = '
Upgrade check completed. Status updated.
'; + loadBootcStatus(); // Refresh status + } else { + messagesContainer.innerHTML = '
' + (result.error || 'Failed to check for upgrades') + '
'; + } + }) + .catch(error => { + messagesContainer.innerHTML = '
Error: ' + error.message + '
'; + }) + .finally(() => { + upgradeCheckBtn.disabled = false; + upgradeCheckBtn.textContent = originalText; + }); + }); + } + + if (upgradeApplyBtn) { + upgradeApplyBtn.addEventListener('click', function() { + if (!confirm('Are you sure you want to apply the upgrade? This will download and install the new image.')) { + return; + } + + const originalText = upgradeApplyBtn.textContent; + upgradeApplyBtn.disabled = true; + upgradeApplyBtn.textContent = 'Upgrading...'; + messagesContainer.innerHTML = '
Upgrade in progress. This may take several minutes...
'; + + fetch('/api/bootc-upgrade', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin' + }) + .then(response => response.json()) + .then(result => { + if (result.success) { + messagesContainer.innerHTML = '
Upgrade completed successfully! ' + + (result.message || '') + '
'; + loadBootcStatus(); // Refresh status + } else { + messagesContainer.innerHTML = '
' + (result.error || 'Failed to apply upgrade') + '
'; + } + }) + .catch(error => { + messagesContainer.innerHTML = '
Error: ' + error.message + '
'; + }) + .finally(() => { + upgradeApplyBtn.disabled = false; + upgradeApplyBtn.textContent = originalText; + }); + }); + } + + if (switchBtn) { + switchBtn.addEventListener('click', function() { + const imageInput = document.getElementById('bootcSwitchImage'); + const image = imageInput ? imageInput.value.trim() : ''; + + if (!image) { + messagesContainer.innerHTML = '
Please enter an image reference to switch to.
'; + return; + } + + if (!confirm('Are you sure you want to switch to image: ' + image + '? This will download and install the new image.')) { + return; + } + + const originalText = switchBtn.textContent; + switchBtn.disabled = true; + switchBtn.textContent = 'Switching...'; + messagesContainer.innerHTML = '
Switching to new image. This may take several minutes...
'; + + fetch('/api/bootc-switch', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + credentials: 'same-origin', + body: JSON.stringify({ image: image }) + }) + .then(response => response.json()) + .then(result => { + if (result.success) { + messagesContainer.innerHTML = '
Switch completed successfully! ' + + (result.message || '') + '
'; + if (imageInput) imageInput.value = ''; + loadBootcStatus(); // Refresh status + } else { + messagesContainer.innerHTML = '
' + (result.error || 'Failed to switch image') + '
'; + } + }) + .catch(error => { + messagesContainer.innerHTML = '
Error: ' + error.message + '
'; + }) + .finally(() => { + switchBtn.disabled = false; + switchBtn.textContent = originalText; + }); + }); + } + }); + // MicroShift pod and route functions let podsInterval = null; let routesInterval = null; @@ -922,6 +1126,7 @@ def decorated(*args, **kwargs): if (sectionId === '#system') { loadSystemStats(); + loadBootcStatus(); loadKernelLog(); } else if (sectionId === '#microshift') { startMicroshiftUpdates(); @@ -935,6 +1140,7 @@ def decorated(*args, **kwargs): // Explicitly load content for initial section (showSection override is now active) if (initialSection === '#system') { loadSystemStats(); + loadBootcStatus(); loadKernelLog(); } else if (initialSection === '#microshift') { startMicroshiftUpdates(); @@ -1557,6 +1763,191 @@ def configure_jumpstarter(): ) +def get_lvm_pv_info(): + """ + Parse pvscan output to get LVM physical volume information. + Returns dict with PV info or None if not available. + """ + try: + result = subprocess.run(['pvscan'], capture_output=True, text=True, timeout=5) + if result.returncode != 0: + return None + + # Parse output like: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" + # or: "Total: 1 [62.41 GiB] / in use: 1 [62.41 GiB] / in no VG: 0 [0 ]" + output = result.stdout.strip() + if not output: + return None + + lines = output.split('\n') + + # Look for PV line + pv_device = None + vg_name = None + total_size = None + free_size = None + + for line in lines: + line = line.strip() + # Match: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" + if line.startswith('PV '): + parts = line.split() + if len(parts) >= 2: + pv_device = parts[1] + # Find VG name + for i, part in enumerate(parts): + if part == 'VG' and i + 1 < len(parts): + vg_name = parts[i + 1] + break + # Find size info in brackets + bracket_match = re.search(r'\[([^\]]+)\]', line) + if bracket_match: + size_info = bracket_match.group(1) + # Parse "62.41 GiB / 52.41 GiB free" + size_parts = size_info.split('/') + if len(size_parts) >= 1: + total_size = size_parts[0].strip() + if len(size_parts) >= 2: + free_match = re.search(r'([\d.]+)\s*([KMGT]i?B)', size_parts[1]) + if free_match: + free_size = free_match.group(1) + ' ' + free_match.group(2) + + if not pv_device or not total_size: + return None + + # Calculate used space and percentage + # Parse sizes to calculate percentage + def parse_size(size_str): + """Parse size string like '62.41 GiB' to bytes.""" + match = re.match(r'([\d.]+)\s*([KMGT]i?)B?', size_str, re.IGNORECASE) + if not match: + return 0 + value = float(match.group(1)) + unit = match.group(2).upper() + multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} + return int(value * multipliers.get(unit, 1)) + + total_bytes = parse_size(total_size) + free_bytes = parse_size(free_size) if free_size else 0 + used_bytes = total_bytes - free_bytes + percent = int((used_bytes / total_bytes * 100)) if total_bytes > 0 else 0 + + # Format used size + def format_size(bytes_val): + """Format bytes to human-readable size.""" + for unit, multiplier in [('TiB', 1024**4), ('GiB', 1024**3), ('MiB', 1024**2), ('KiB', 1024)]: + if bytes_val >= multiplier: + return f"{bytes_val / multiplier:.2f} {unit}" + return f"{bytes_val} B" + + used_size = format_size(used_bytes) + + return { + 'pv_device': pv_device, + 'vg_name': vg_name or 'N/A', + 'total': total_size, + 'free': free_size or '0 B', + 'used': used_size, + 'percent': percent + } + except Exception as e: + print(f"Error parsing LVM PV info: {e}", file=sys.stderr) + return None + + +def get_root_filesystem(): + """ + Detect the real root filesystem mount point. + On bootc systems, /sysroot is the real root filesystem. + Otherwise, find the largest real block device filesystem. + """ + # Check if /sysroot exists and is a mount point (bootc systems) + try: + result = subprocess.run(['findmnt', '-n', '-o', 'TARGET', '/sysroot'], + capture_output=True, text=True, timeout=5) + if result.returncode == 0 and result.stdout.strip(): + return '/sysroot' + except Exception: + pass + + # Fallback: parse df output to find the real root filesystem + try: + df_result = subprocess.run(['df', '-h'], capture_output=True, text=True, timeout=5) + if df_result.returncode != 0: + return '/' # Fallback to root + + lines = df_result.stdout.strip().split('\n') + if len(lines) < 2: + return '/' # Fallback to root + + # Virtual filesystem types to skip + virtual_fs = ('tmpfs', 'overlay', 'composefs', 'devtmpfs', 'proc', 'sysfs', + 'devpts', 'cgroup', 'pstore', 'bpf', 'tracefs', 'debugfs', + 'configfs', 'fusectl', 'mqueue', 'hugetlbfs', 'efivarfs', 'ramfs', + 'nsfs', 'shm', 'vfat') + + # Boot partitions to skip + boot_paths = ('/boot', '/boot/efi') + + best_fs = None + best_size = 0 + + for line in lines[1:]: # Skip header + parts = line.split() + if len(parts) < 6: + continue + + filesystem = parts[0] + mount_point = parts[5] + size_str = parts[1] + + # Skip virtual filesystems + fs_type = filesystem.split('/')[-1] if '/' in filesystem else filesystem + if any(vfs in fs_type.lower() for vfs in virtual_fs): + continue + + # Skip boot partitions + if mount_point in boot_paths: + continue + + # Skip if not a block device (doesn't start with /dev) + if not filesystem.startswith('/dev'): + continue + + # Prefer LVM root volumes + if '/mapper/' in filesystem and 'root' in filesystem.lower(): + return mount_point + + # Calculate size for comparison (convert to bytes for comparison) + try: + # Parse size like "10G", "500M", etc. + size_val = float(size_str[:-1]) + size_unit = size_str[-1].upper() + if size_unit == 'G': + size_bytes = size_val * 1024 * 1024 * 1024 + elif size_unit == 'M': + size_bytes = size_val * 1024 * 1024 + elif size_unit == 'K': + size_bytes = size_val * 1024 + else: + size_bytes = size_val + + if size_bytes > best_size: + best_size = size_bytes + best_fs = mount_point + except (ValueError, IndexError): + continue + + if best_fs: + return best_fs + + except Exception: + pass + + # Final fallback + return '/' + + @app.route('/api/system-stats') @requires_auth def get_system_stats(): @@ -1564,8 +1955,9 @@ def get_system_stats(): try: stats = {} - # Disk usage - disk_result = subprocess.run(['df', '-h', '/'], capture_output=True, text=True) + # Disk usage - use detected root filesystem + root_fs = get_root_filesystem() + disk_result = subprocess.run(['df', '-h', root_fs], capture_output=True, text=True) disk_lines = disk_result.stdout.strip().split('\n') if len(disk_lines) > 1: disk_parts = disk_lines[1].split() @@ -1676,12 +2068,181 @@ def get_system_stats(): 'interfaces': interfaces } + # LVM Physical Volume information + lvm_info = get_lvm_pv_info() + if lvm_info: + stats['lvm'] = lvm_info + return jsonify(stats) except Exception as e: return jsonify({'error': f'Error gathering system statistics: {str(e)}'}), 500 +@app.route('/api/bootc-status') +@requires_auth +def get_bootc_status(): + """API endpoint to get BootC status and upgrade check information.""" + try: + status_output = '' + upgrade_check_output = '' + + # Get bootc status + try: + status_result = subprocess.run( + ['bootc', 'status'], + capture_output=True, + text=True, + timeout=10 + ) + if status_result.returncode == 0: + status_output = status_result.stdout.strip() + else: + status_output = f"Error: {status_result.stderr.strip()}" + except FileNotFoundError: + status_output = "bootc command not found" + except subprocess.TimeoutExpired: + status_output = "Command timed out" + except Exception as e: + status_output = f"Error: {str(e)}" + + # Get upgrade check + try: + upgrade_result = subprocess.run( + ['bootc', 'upgrade', '--check'], + capture_output=True, + text=True, + timeout=30 + ) + if upgrade_result.returncode == 0: + upgrade_check_output = upgrade_result.stdout.strip() + else: + upgrade_check_output = f"Error: {upgrade_result.stderr.strip()}" + except FileNotFoundError: + upgrade_check_output = "bootc command not found" + except subprocess.TimeoutExpired: + upgrade_check_output = "Command timed out" + except Exception as e: + upgrade_check_output = f"Error: {str(e)}" + + return jsonify({ + 'status': status_output, + 'upgrade_check': upgrade_check_output + }) + + except Exception as e: + return jsonify({'error': f'Error getting BootC status: {str(e)}'}), 500 + + +@app.route('/api/bootc-upgrade-check', methods=['POST']) +@requires_auth +def bootc_upgrade_check(): + """API endpoint to check for BootC upgrades.""" + try: + result = subprocess.run( + ['bootc', 'upgrade', '--check'], + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': 'Upgrade check completed' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Upgrade check failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/bootc-upgrade', methods=['POST']) +@requires_auth +def bootc_upgrade(): + """API endpoint to apply BootC upgrade.""" + try: + # Run bootc upgrade (this may take a while) + result = subprocess.run( + ['bootc', 'upgrade'], + capture_output=True, + text=True, + timeout=600 # 10 minutes timeout for upgrade + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': 'Upgrade completed successfully. Reboot may be required.' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Upgrade failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out (upgrade may still be in progress)'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/bootc-switch', methods=['POST']) +@requires_auth +def bootc_switch(): + """API endpoint to switch BootC to a different image.""" + try: + data = request.get_json() if request.is_json else {} + image = data.get('image', '').strip() + + if not image: + return jsonify({'success': False, 'error': 'Image reference is required'}), 400 + + # Validate image format (basic check) + if not (image.startswith('quay.io/') or image.startswith('docker.io/') or + ':' in image or '/' in image): + return jsonify({'success': False, 'error': 'Invalid image reference format'}), 400 + + # Run bootc switch (this may take a while) + result = subprocess.run( + ['bootc', 'switch', image], + capture_output=True, + text=True, + timeout=600 # 10 minutes timeout for switch + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': f'Switched to {image} successfully. Reboot may be required.' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Switch failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out (switch may still be in progress)'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + @app.route('/api/dmesg') @requires_auth def get_dmesg(): From 7a9a93c95b60d89230b6b9f973bce6ff110c2d33 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Tue, 16 Dec 2025 21:05:01 +0000 Subject: [PATCH 06/10] microshif-bootc: switch to cs10 and add multi-arch builds --- deploy/microshift-bootc/Containerfile | 3 +-- deploy/microshift-bootc/Makefile | 35 +++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/deploy/microshift-bootc/Containerfile b/deploy/microshift-bootc/Containerfile index f242ee69..7226c784 100644 --- a/deploy/microshift-bootc/Containerfile +++ b/deploy/microshift-bootc/Containerfile @@ -1,5 +1,4 @@ -FROM ghcr.io/microshift-io/microshift:release-4.20-4.20.0-okd-scos.9 - +FROM ghcr.io/microshift-io/microshift:4.21.0_gbc8e20c07_4.21.0_okd_scos.ec.14 # Install dependencies for config-svc RUN dnf install -y epel-release && \ dnf install -y python3 iproute python3-flask python3-pip && \ diff --git a/deploy/microshift-bootc/Makefile b/deploy/microshift-bootc/Makefile index cc09935b..4db7157c 100644 --- a/deploy/microshift-bootc/Makefile +++ b/deploy/microshift-bootc/Makefile @@ -1,4 +1,4 @@ -.PHONY: help build bootc-build push bootc-push bootc-run bootc-stop bootc-sh bootc-rm build-image build-iso +.PHONY: help build bootc-build bootc-build-multi push bootc-push bootc-push-multi bootc-run bootc-stop bootc-sh bootc-rm build-image build-iso build-all build-all-multi push-all push-all-multi # Default image tags BOOTC_IMG ?= quay.io/jumpstarter-dev/microshift/bootc:latest @@ -15,8 +15,28 @@ bootc-build: ## Build the bootc image with MicroShift @echo "Building bootc image: $(BOOTC_IMG): building as root to be on the container storage from root" sudo podman build -t $(BOOTC_IMG) -f Containerfile ../.. +bootc-build-multi: ## Build the bootc image for multiple architectures (amd64, arm64) + @echo "Building multiarch bootc image: $(BOOTC_IMG)" + @echo "This will build for linux/amd64 and linux/arm64" + @# Remove existing manifest if it exists + -podman manifest rm $(BOOTC_IMG) 2>/dev/null || true + @# Create a new manifest + podman manifest create $(BOOTC_IMG) + @# Build for amd64 + @echo "Building for linux/amd64..." + podman build --platform linux/amd64 -t $(BOOTC_IMG)-amd64 -f Containerfile ../.. + @# Build for arm64 + @echo "Building for linux/arm64..." + podman build --platform linux/arm64 -t $(BOOTC_IMG)-arm64 -f Containerfile ../.. + @# Add both images to the manifest + podman manifest add $(BOOTC_IMG) $(BOOTC_IMG)-amd64 + podman manifest add $(BOOTC_IMG) $(BOOTC_IMG)-arm64 + @echo "Multiarch manifest created successfully!" + @echo "To inspect: podman manifest inspect $(BOOTC_IMG)" + @echo "To push: make bootc-push-multi" + output/qcow2/disk.qcow2: ## Build a bootable QCOW2 image from the bootc image - @echo "Building QCOW2 image from: $(BOOTC_IMG)" + @echo "Building QCOW2 image from: $(BOOTC_IMG)"a @echo "Running bootc-image-builder..." @mkdir -p output sudo podman run \ @@ -79,12 +99,23 @@ bootc-push: ## Push the bootc image to registry @echo "Pushing bootc image: $(BOOTC_IMG)" sudo podman push $(BOOTC_IMG) +bootc-push-multi: ## Push the multiarch manifest to registry + @echo "Pushing multiarch manifest: $(BOOTC_IMG)" + @echo "This will push the manifest list with amd64 and arm64 images" + podman manifest push $(BOOTC_IMG) $(BOOTC_IMG) + @echo "Multiarch manifest pushed successfully!" + @echo "Images available for linux/amd64 and linux/arm64" + ##@ Development build-all: bootc-build ## Build bootc image +build-all-multi: bootc-build-multi ## Build multiarch bootc image + push-all: bootc-push ## Push bootc image to registry +push-all-multi: bootc-push-multi ## Push multiarch bootc image to registry + bootc-run: ## Run MicroShift in a bootc container @echo "Running MicroShift container with image: $(BOOTC_IMG)" @BOOTC_IMG=$(BOOTC_IMG) sudo -E ./run-microshift.sh From d26b469f4e88886f1047c8227f98017b0bf6c0e0 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 18 Dec 2025 20:04:29 +0000 Subject: [PATCH 07/10] microshift: use python3-pam and build image --- .github/workflows/build.yaml | 3 +++ deploy/microshift-bootc/Containerfile | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index bf3509f8..47bae709 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -29,6 +29,9 @@ jobs: - image_name: jumpstarter-dev/jumpstarter-operator-bundle dockerfile: deploy/operator/bundle.Dockerfile context: deploy/operator + - image_name: quay.io/jumpstarter-dev/microshift/bootc:latest + dockerfile: deploy/microshift-bootc/Containerfile + context: . steps: - name: Checkout repository uses: actions/checkout@v4 diff --git a/deploy/microshift-bootc/Containerfile b/deploy/microshift-bootc/Containerfile index 7226c784..2521cc53 100644 --- a/deploy/microshift-bootc/Containerfile +++ b/deploy/microshift-bootc/Containerfile @@ -1,8 +1,7 @@ FROM ghcr.io/microshift-io/microshift:4.21.0_gbc8e20c07_4.21.0_okd_scos.ec.14 # Install dependencies for config-svc RUN dnf install -y epel-release && \ - dnf install -y python3 iproute python3-flask python3-pip && \ - pip3 install python-pam && \ + dnf install -y python3 iproute python3-flask python3-pip kernel-tools python3-pam && \ dnf clean all # Install MicroShift manifests From 11394391ed972d74b708a73af687b023ad823eed Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 18 Dec 2025 20:04:58 +0000 Subject: [PATCH 08/10] microshift: add kickstart.ks for installing from netboot --- deploy/microshift-bootc/kickstart.ks | 71 ++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 deploy/microshift-bootc/kickstart.ks diff --git a/deploy/microshift-bootc/kickstart.ks b/deploy/microshift-bootc/kickstart.ks new file mode 100644 index 00000000..8b886234 --- /dev/null +++ b/deploy/microshift-bootc/kickstart.ks @@ -0,0 +1,71 @@ +lang en_US.UTF-8 +keyboard us +timezone UTC +text +reboot + +# Partition the disk with hardware-specific boot and swap partitions, adding an +# LVM volume that contains a 10GB+ system root. The remainder of the volume will +# be used by the CSI driver for storing data. +zerombr +clearpart --all --initlabel + +# Create boot and swap partitions as required by the current hardware platform +reqpart --add-boot + +# Add an LVM volume group and allocate a system root logical volume +part pv.01 --grow +volgroup myvg1 pv.01 +logvol / --vgname=myvg1 --fstype=xfs --size=65536 --name=root + +# Lock root user account +#rootpw --lock + +rootpw --plaintext jumpstarter + +# Configure network to use DHCP and activate on boot +network --bootproto=dhcp --device=link --activate --onboot=on + +%pre-install --log=/dev/console --erroronfail + +# Create a container registry authentication file +#mkdir -p /etc/ostree/ +#cat > /etc/ostree/auth.json <<'EOF' +#${AUTH_CONFIG} +#EOF + +# Delete an empty file or set permissions +#if [ "$(wc -w < /etc/ostree/auth.json)" -eq 0 ] ; then +# rm -f /etc/ostree/auth.json +#else +# chmod 600 /etc/ostree/auth.json +#fi + +## Create a container registry configuration file +#mkdir -p /etc/containers/registries.conf.d/ +#cat > /etc/containers/registries.conf.d/999-microshift-registry.conf <<'EOF' +#${REGISTRY_CONFIG} +#EOF + +# Delete an empty file or set permissions +#if [ "$(wc -w < /etc/containers/registries.conf.d/999-microshift-registry.conf)" -eq 0 ] ; then +# rm -f /etc/containers/registries.conf.d/999-microshift-registry.conf +#else +# chmod 644 /etc/containers/registries.conf.d/999-microshift-registry.conf +#fi + +%end + +# Pull a bootc image from a remote registry +ostreecontainer --url quay.io/jumpstarter-dev/microshift/bootc:latest + +%post --log=/dev/console --erroronfail + + +# Create an OpenShift pull secret file +#cat > /etc/crio/openshift-pull-secret <<'EOF' +#${PULL_SECRET} +#EOF +#chmod 600 /etc/crio/openshift-pull-secret + +%end From b0b091177429fb5e4f4fa0e2ce0742387d5b736b Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 18 Dec 2025 20:08:50 +0000 Subject: [PATCH 09/10] microshift: remove hostname changes --- deploy/microshift-bootc/Containerfile | 6 +- deploy/microshift-bootc/config-svc/app.py | 70 ------------------- .../config-svc/update-banner.sh | 8 +-- 3 files changed, 8 insertions(+), 76 deletions(-) diff --git a/deploy/microshift-bootc/Containerfile b/deploy/microshift-bootc/Containerfile index 2521cc53..cfb660f4 100644 --- a/deploy/microshift-bootc/Containerfile +++ b/deploy/microshift-bootc/Containerfile @@ -18,6 +18,9 @@ RUN firewall-offline-cmd --add-service=http && \ # Set root password RUN echo "root:jumpstarter" | chpasswd +# Set hostname, we need something stable for microshift +RUN echo "js-community" > /etc/hostname + # Install config-svc systemd service COPY deploy/microshift-bootc/config-svc/app.py /usr/local/bin/config-svc RUN chmod +x /usr/local/bin/config-svc @@ -25,4 +28,5 @@ COPY deploy/microshift-bootc/config-svc/update-banner.sh /usr/local/bin/update-b RUN chmod +x /usr/local/bin/update-banner.sh COPY deploy/microshift-bootc/config-svc/config-svc.service /etc/systemd/system/config-svc.service COPY deploy/microshift-bootc/config-svc/update-banner.service /etc/systemd/system/update-banner.service -RUN systemctl enable config-svc.service update-banner.service \ No newline at end of file +RUN systemctl enable config-svc.service update-banner.service + diff --git a/deploy/microshift-bootc/config-svc/app.py b/deploy/microshift-bootc/config-svc/app.py index 7ad2515f..f00fae60 100644 --- a/deploy/microshift-bootc/config-svc/app.py +++ b/deploy/microshift-bootc/config-svc/app.py @@ -354,16 +354,6 @@ def decorated(*args, **kwargs):
- -

Hostname Configuration

-
-
- - -
Set the system hostname
-
- -
@@ -1631,47 +1621,6 @@ def api_change_password(): }) -@app.route('/configure-hostname', methods=['POST']) -@requires_auth -def configure_hostname(): - """Handle hostname configuration request.""" - hostname = request.form.get('hostname', '').strip() - - current_hostname = get_current_hostname() - jumpstarter_config = get_jumpstarter_config() - password_required = is_default_password() - - messages = [] - - if not hostname: - messages.append({'type': 'error', 'text': 'Hostname is required'}) - else: - # Validate hostname format - hostname_valid, hostname_error = validate_hostname(hostname) - if not hostname_valid: - messages.append({'type': 'error', 'text': f'Invalid hostname: {hostname_error}'}) - else: - hostname_success, hostname_message = set_hostname(hostname) - if not hostname_success: - messages.append({'type': 'error', 'text': f'Failed to update hostname: {hostname_message}'}) - else: - current_hostname = hostname - messages.append({'type': 'success', 'text': f'Hostname updated successfully to: {hostname}'}) - - # Update login banner with the new hostname - banner_success, banner_message = update_login_banner() - if not banner_success: - print(f"Warning: Failed to update login banner: {banner_message}", file=sys.stderr) - - return render_template_string( - HTML_TEMPLATE, - messages=messages, - current_hostname=current_hostname, - jumpstarter_config=jumpstarter_config, - password_required=password_required - ) - - @app.route('/api/configure-jumpstarter', methods=['POST']) @requires_auth def api_configure_jumpstarter(): @@ -2834,25 +2783,6 @@ def get_jumpstarter_config(): return defaults -def set_hostname(hostname): - """Set the system hostname using hostnamectl.""" - try: - subprocess.run( - ['hostnamectl', 'set-hostname', hostname], - capture_output=True, - text=True, - check=True - ) - return True, "Success" - except subprocess.CalledProcessError as e: - error_msg = e.stderr.strip() if e.stderr else str(e) - print(f"Error setting hostname: {error_msg}", file=sys.stderr) - return False, error_msg - except Exception as e: - print(f"Error setting hostname: {e}", file=sys.stderr) - return False, str(e) - - def set_root_password(password): """Set the root user password using chpasswd.""" try: diff --git a/deploy/microshift-bootc/config-svc/update-banner.sh b/deploy/microshift-bootc/config-svc/update-banner.sh index 57cba770..b9aa7edf 100644 --- a/deploy/microshift-bootc/config-svc/update-banner.sh +++ b/deploy/microshift-bootc/config-svc/update-banner.sh @@ -1,14 +1,12 @@ -#!/bin/bash +#!/bin/env python3 # Update login banner with Jumpstarter web UI URL -python3 << 'EOF' import sys import os - -# Import and call the update function import importlib.util import importlib.machinery +def update_login_banner(): config_svc_path = '/usr/local/bin/config-svc' if not os.path.exists(config_svc_path): print(f"Error: {config_svc_path} does not exist", file=sys.stderr) @@ -34,5 +32,5 @@ try: except Exception as e: print(f"Error loading or executing {config_svc_path}: {e}", file=sys.stderr) sys.exit(1) -EOF + From 7ef45e7069509a9a15be34a7f9bc895758002569 Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Pelayo Date: Thu, 18 Dec 2025 22:13:35 +0000 Subject: [PATCH 10/10] microshif: reorganize config-svc in modules and templates --- deploy/microshift-bootc/Containerfile | 19 +- deploy/microshift-bootc/Makefile | 9 +- deploy/microshift-bootc/config-svc/README.md | 177 + .../microshift-bootc/config-svc/__init__.py | 4 + deploy/microshift-bootc/config-svc/api.py | 869 +++++ deploy/microshift-bootc/config-svc/app.py | 3012 +---------------- deploy/microshift-bootc/config-svc/auth.py | 139 + .../config-svc/config-svc.service | 2 +- .../config-svc/pyproject.toml | 75 + deploy/microshift-bootc/config-svc/routes.py | 116 + deploy/microshift-bootc/config-svc/system.py | 569 ++++ .../config-svc/templates/index.html | 857 +++++ .../templates/password_required.html | 134 + .../config-svc/templates/styles.css | 370 ++ .../config-svc/update-banner.sh | 36 +- 15 files changed, 3361 insertions(+), 3027 deletions(-) create mode 100644 deploy/microshift-bootc/config-svc/README.md create mode 100644 deploy/microshift-bootc/config-svc/__init__.py create mode 100644 deploy/microshift-bootc/config-svc/api.py create mode 100644 deploy/microshift-bootc/config-svc/auth.py create mode 100644 deploy/microshift-bootc/config-svc/pyproject.toml create mode 100644 deploy/microshift-bootc/config-svc/routes.py create mode 100644 deploy/microshift-bootc/config-svc/system.py create mode 100644 deploy/microshift-bootc/config-svc/templates/index.html create mode 100644 deploy/microshift-bootc/config-svc/templates/password_required.html create mode 100644 deploy/microshift-bootc/config-svc/templates/styles.css diff --git a/deploy/microshift-bootc/Containerfile b/deploy/microshift-bootc/Containerfile index cfb660f4..9b716f3e 100644 --- a/deploy/microshift-bootc/Containerfile +++ b/deploy/microshift-bootc/Containerfile @@ -21,11 +21,24 @@ RUN echo "root:jumpstarter" | chpasswd # Set hostname, we need something stable for microshift RUN echo "js-community" > /etc/hostname -# Install config-svc systemd service -COPY deploy/microshift-bootc/config-svc/app.py /usr/local/bin/config-svc -RUN chmod +x /usr/local/bin/config-svc +# Install config-svc application +RUN mkdir -p /usr/local/lib/config-svc + +# Copy Python modules and templates +COPY deploy/microshift-bootc/config-svc/*.py /usr/local/lib/config-svc/ +COPY deploy/microshift-bootc/config-svc/templates/ /usr/local/lib/config-svc/templates/ + +# Create wrapper script to run the application +RUN echo '#!/bin/bash' > /usr/local/bin/config-svc && \ + echo 'cd /usr/local/lib/config-svc' >> /usr/local/bin/config-svc && \ + echo 'exec /usr/bin/python3 app.py "$@"' >> /usr/local/bin/config-svc && \ + chmod +x /usr/local/bin/config-svc + +# Install banner update script COPY deploy/microshift-bootc/config-svc/update-banner.sh /usr/local/bin/update-banner.sh RUN chmod +x /usr/local/bin/update-banner.sh + +# Install systemd services COPY deploy/microshift-bootc/config-svc/config-svc.service /etc/systemd/system/config-svc.service COPY deploy/microshift-bootc/config-svc/update-banner.service /etc/systemd/system/update-banner.service RUN systemctl enable config-svc.service update-banner.service diff --git a/deploy/microshift-bootc/Makefile b/deploy/microshift-bootc/Makefile index 4db7157c..927e1945 100644 --- a/deploy/microshift-bootc/Makefile +++ b/deploy/microshift-bootc/Makefile @@ -138,7 +138,14 @@ bootc-sh: ## Open a shell in the running MicroShift container bootc-reload-app: ## Reload the config service app without rebuilding (dev mode) @echo "Reloading config-svc app..." - sudo podman cp config-svc/app.py jumpstarter-microshift-okd:/usr/local/bin/config-svc + @echo "Copying Python modules..." + @for file in config-svc/*.py; do \ + sudo podman cp $$file jumpstarter-microshift-okd:/usr/local/lib/config-svc/; \ + done + @echo "Copying templates..." + @for file in config-svc/templates/*; do \ + sudo podman cp $$file jumpstarter-microshift-okd:/usr/local/lib/config-svc/templates/; \ + done sudo podman exec jumpstarter-microshift-okd systemctl restart config-svc @echo "Config service reloaded successfully!" diff --git a/deploy/microshift-bootc/config-svc/README.md b/deploy/microshift-bootc/config-svc/README.md new file mode 100644 index 00000000..e3ea32bd --- /dev/null +++ b/deploy/microshift-bootc/config-svc/README.md @@ -0,0 +1,177 @@ +# Jumpstarter Configuration Service + +A modular web service for configuring Jumpstarter deployment settings on MicroShift. + +## Features + +- Hostname configuration with smart defaults +- Jumpstarter CR management (baseDomain + image version) +- MicroShift kubeconfig download +- System monitoring and status +- Pod and route management +- BootC operations support + +## Project Structure + +``` +config-svc/ +├── __init__.py # Package initialization +├── app.py # Main application entry point +├── auth.py # Authentication and validation logic +├── system.py # System utility functions +├── api.py # API route handlers +├── routes.py # Main UI route handlers +├── templates/ # HTML and CSS templates +│ ├── index.html # Main page template +│ ├── password_required.html # Password change page +│ └── styles.css # Application styles +├── pyproject.toml # Project configuration and dependencies +├── config-svc.service # Systemd service file +├── update-banner.service # Banner update service +└── update-banner.sh # Banner update script +``` + +## Module Organization + +### `auth.py` +Authentication and validation utilities: +- `validate_hostname()` - RFC 1123 hostname validation +- `validate_password()` - Password security validation +- `check_auth()` - PAM-based authentication +- `requires_auth()` - Flask authentication decorator +- `is_default_password()` - Default password check + +### `system.py` +System utility functions: +- `get_current_hostname()` - Get system hostname +- `get_jumpstarter_config()` - Retrieve Jumpstarter CR configuration +- `set_root_password()` - Set root user password +- `get/set_ssh_authorized_keys()` - Manage SSH keys +- `update_login_banner()` - Update system login banner +- `apply_jumpstarter_cr()` - Apply Jumpstarter Custom Resource +- `get_lvm_pv_info()` - Get LVM physical volume info +- `get_root_filesystem()` - Detect root filesystem +- `calculate_age()` - Calculate Kubernetes resource age +- `get_default_route_ip()` - Get default route IP address + +### `api.py` +API route handlers: +- `/api/change-password` - Password and SSH key management +- `/api/configure-jumpstarter` - Jumpstarter CR configuration +- `/api/system-stats` - System statistics +- `/api/bootc-status` - BootC status information +- `/api/bootc-upgrade-check` - Check for BootC upgrades +- `/api/bootc-upgrade` - Apply BootC upgrade +- `/api/bootc-switch` - Switch BootC image +- `/api/dmesg` - Kernel log +- `/api/operator-status` - Jumpstarter operator status +- `/api/pods` - Pod listing +- `/api/routes` - Route listing +- `/api/pods//` - Delete pod +- `/logs//` - Stream pod logs +- `/kubeconfig` - Download kubeconfig + +### `routes.py` +Main UI route handlers: +- `/` - Main configuration page +- `/static/styles.css` - CSS stylesheet +- `/logout` - Logout endpoint +- `/configure-jumpstarter` - Legacy form submission handler + +### `app.py` +Main application entry point that: +- Creates Flask application +- Registers all routes +- Updates login banner +- Starts the web server + +## Installation + +### Using pip + +```bash +cd config-svc +pip install -e . +``` + +### Using pyproject.toml + +The application is configured using `pyproject.toml` with: +- Project metadata +- Dependencies (Flask 2.3+) +- Optional dependencies (python-pam for PAM auth) +- Build system configuration +- Tool configurations (black, isort, pylint, mypy) + +## Running + +### Direct execution + +```bash +python3 app.py +``` + +### Using systemd + +```bash +systemctl enable --now config-svc.service +``` + +### Environment Variables + +- `PORT` - Server port (default: 8080) + +## Dependencies + +### Required +- Python 3.9+ +- Flask 2.3+ + +### Optional +- python-pam 2.0+ (for PAM authentication, falls back to subprocess) + +### System Commands +The application requires the following system commands: +- `oc` - OpenShift CLI +- `bootc` - BootC CLI +- `pvscan` - LVM commands +- `df`, `free`, `top`, `uptime` - System monitoring +- Standard Linux utilities + +## Development + +### Code Style + +The project uses: +- Black for code formatting (line length: 120) +- isort for import sorting +- pylint for linting +- mypy for type checking + +### Running Linters + +```bash +black app.py auth.py system.py api.py routes.py +isort app.py auth.py system.py api.py routes.py +pylint app.py auth.py system.py api.py routes.py +mypy app.py auth.py system.py api.py routes.py +``` + +## Security + +- HTTP Basic Authentication required for all endpoints +- Password validation (min 8 chars, no special characters) +- SSH key management with proper permissions +- Hostname validation per RFC 1123 +- Default password change enforcement + +## License + +Apache License 2.0 + +## Links + +- Homepage: https://jumpstarter.dev +- Documentation: https://docs.jumpstarter.dev +- Repository: https://github.com/jumpstarter-dev/jumpstarter-controller + diff --git a/deploy/microshift-bootc/config-svc/__init__.py b/deploy/microshift-bootc/config-svc/__init__.py new file mode 100644 index 00000000..63d1deaf --- /dev/null +++ b/deploy/microshift-bootc/config-svc/__init__.py @@ -0,0 +1,4 @@ +"""Jumpstarter Configuration Web UI package.""" + +__version__ = "1.0.0" + diff --git a/deploy/microshift-bootc/config-svc/api.py b/deploy/microshift-bootc/config-svc/api.py new file mode 100644 index 00000000..80bfc463 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/api.py @@ -0,0 +1,869 @@ +"""API route handlers for Jumpstarter Configuration UI.""" + +import json +import os +import re +import subprocess +import sys +from io import BytesIO +from pathlib import Path + +from flask import jsonify, request, Response, send_file + +from auth import requires_auth, is_default_password, validate_hostname, validate_password +from system import ( + get_current_hostname, get_jumpstarter_config, set_root_password, + get_ssh_authorized_keys, set_ssh_authorized_keys, update_login_banner, + apply_jumpstarter_cr, calculate_age, get_default_route_ip, + get_lvm_pv_info, get_root_filesystem, KUBECONFIG_PATH +) + + +def register_api_routes(app): + """Register all API routes with the Flask app.""" + + @app.route('/api/change-password', methods=['POST']) + @requires_auth + def api_change_password(): + """API endpoint to handle password change request (returns JSON).""" + data = request.get_json() if request.is_json else {} + new_password = data.get('newPassword', request.form.get('newPassword', '')).strip() + confirm_password = data.get('confirmPassword', request.form.get('confirmPassword', '')).strip() + ssh_keys_value = data.get('sshKeys', request.form.get('sshKeys', '')).strip() + + was_default = is_default_password() + existing_ssh_keys = get_ssh_authorized_keys() + + messages = [] + password_updated = False + ssh_updated = False + requires_redirect = False + + # If password is provided, validate and set it + if new_password: + # Validate password format and security + password_valid, password_error = validate_password(new_password) + if not password_valid: + messages.append({'type': 'error', 'text': password_error}) + elif new_password != confirm_password: + messages.append({'type': 'error', 'text': 'Passwords do not match'}) + else: + password_success, password_message = set_root_password(new_password) + if not password_success: + messages.append({'type': 'error', 'text': f'Failed to set password: {password_message}'}) + else: + password_updated = True + messages.append({'type': 'success', 'text': 'Password changed successfully!'}) + if was_default: + # Update login banner on first password change + update_login_banner() + requires_redirect = True + elif was_default: + # If we're on the default password screen and no password provided, require it + messages.append({'type': 'error', 'text': 'Password is required to change from default password'}) + + # Process SSH keys (always process if form was submitted) + ssh_success, ssh_message = set_ssh_authorized_keys(ssh_keys_value) + if ssh_success: + ssh_updated = True + if ssh_keys_value: + messages.append({'type': 'success', 'text': ssh_message}) + else: + # Only show message if keys were cleared and there were keys before + if existing_ssh_keys: + messages.append({'type': 'success', 'text': ssh_message}) + else: + messages.append({'type': 'error', 'text': f'Failed to set SSH keys: {ssh_message}'}) + + has_errors = any(msg.get('type') == 'error' for msg in messages) + success = not has_errors and (password_updated or ssh_updated) + + return jsonify({ + 'success': success, + 'messages': messages, + 'password_updated': password_updated, + 'ssh_updated': ssh_updated, + 'requires_redirect': requires_redirect, + 'ssh_keys': get_ssh_authorized_keys() if ssh_updated else existing_ssh_keys + }) + + @app.route('/api/configure-jumpstarter', methods=['POST']) + @requires_auth + def api_configure_jumpstarter(): + """API endpoint to handle Jumpstarter CR configuration request (returns JSON).""" + data = request.get_json() if request.is_json else {} + base_domain = data.get('baseDomain', request.form.get('baseDomain', '')).strip() + image = data.get('image', request.form.get('image', '')).strip() + image_pull_policy = data.get('imagePullPolicy', request.form.get('imagePullPolicy', 'IfNotPresent')).strip() + + messages = [] + success = False + + if not base_domain: + messages.append({'type': 'error', 'text': 'Base domain is required'}) + else: + # Validate base domain format (same as hostname validation) + domain_valid, domain_error = validate_hostname(base_domain) + if not domain_valid: + messages.append({'type': 'error', 'text': f'Invalid base domain: {domain_error}'}) + elif not image: + messages.append({'type': 'error', 'text': 'Controller image is required'}) + else: + # Apply the Jumpstarter CR + cr_success, cr_message = apply_jumpstarter_cr(base_domain, image, image_pull_policy) + + if cr_success: + msg = f'Jumpstarter configuration applied successfully! Base Domain: {base_domain}, Image: {image}' + messages.append({'type': 'success', 'text': msg}) + success = True + else: + messages.append({'type': 'error', 'text': f'Failed to apply Jumpstarter CR: {cr_message}'}) + + return jsonify({ + 'success': success, + 'messages': messages, + 'config': { + 'base_domain': base_domain, + 'image': image, + 'image_pull_policy': image_pull_policy + } if success else None + }) + + @app.route('/api/system-stats') + @requires_auth + def get_system_stats(): + """API endpoint to get system statistics.""" + try: + stats = {} + + # Disk usage - use detected root filesystem + root_fs = get_root_filesystem() + disk_result = subprocess.run(['df', '-h', root_fs], capture_output=True, text=True) + disk_lines = disk_result.stdout.strip().split('\n') + if len(disk_lines) > 1: + disk_parts = disk_lines[1].split() + stats['disk'] = { + 'total': disk_parts[1], + 'used': disk_parts[2], + 'available': disk_parts[3], + 'percent': int(disk_parts[4].rstrip('%')) + } + else: + stats['disk'] = {'total': 'N/A', 'used': 'N/A', 'available': 'N/A', 'percent': 0} + + # Memory usage + mem_result = subprocess.run(['free', '-h'], capture_output=True, text=True) + mem_lines = mem_result.stdout.strip().split('\n') + if len(mem_lines) > 1: + mem_parts = mem_lines[1].split() + # Parse percentage + mem_total_result = subprocess.run(['free'], capture_output=True, text=True) + mem_total_lines = mem_total_result.stdout.strip().split('\n')[1].split() + mem_percent = int((int(mem_total_lines[2]) / int(mem_total_lines[1])) * 100) + + stats['memory'] = { + 'total': mem_parts[1], + 'used': mem_parts[2], + 'available': mem_parts[6] if len(mem_parts) > 6 else mem_parts[3], + 'percent': mem_percent + } + else: + stats['memory'] = {'total': 'N/A', 'used': 'N/A', 'available': 'N/A', 'percent': 0} + + # CPU info + cpu_count_result = subprocess.run(['nproc'], capture_output=True, text=True) + cpu_cores = int(cpu_count_result.stdout.strip()) if cpu_count_result.returncode == 0 else 0 + + # CPU usage - get from top + top_result = subprocess.run(['top', '-bn1'], capture_output=True, text=True) + cpu_usage = 0 + for line in top_result.stdout.split('\n'): + if 'Cpu(s)' in line or '%Cpu' in line: + # Parse line like "%Cpu(s): 2.0 us, 1.0 sy, 0.0 ni, 97.0 id,..." + parts = line.split(',') + for part in parts: + if 'id' in part: + idle = float(part.split()[0]) + cpu_usage = round(100 - idle, 1) + break + break + + stats['cpu'] = { + 'cores': cpu_cores, + 'usage': cpu_usage + } + + # System info + kernel_result = subprocess.run(['uname', '-r'], capture_output=True, text=True) + kernel = kernel_result.stdout.strip() + + hostname = get_current_hostname() + + # Uptime + uptime_result = subprocess.run(['uptime', '-p'], capture_output=True, text=True) + uptime = uptime_result.stdout.strip().replace('up ', '') + + # Load average + loadavg_result = subprocess.run(['cat', '/proc/loadavg'], capture_output=True, text=True) + loadavg_parts = loadavg_result.stdout.strip().split() + + stats['system'] = { + 'kernel': kernel, + 'hostname': hostname, + 'uptime': uptime, + 'load_1': loadavg_parts[0] if len(loadavg_parts) > 0 else '0', + 'load_5': loadavg_parts[1] if len(loadavg_parts) > 1 else '0', + 'load_15': loadavg_parts[2] if len(loadavg_parts) > 2 else '0' + } + + # Network interfaces + ip_result = subprocess.run(['ip', '-4', 'addr', 'show'], capture_output=True, text=True) + interfaces = [] + current_iface = None + # Prefixes to skip (container/virtual interfaces) + skip_prefixes = ('veth', 'docker', 'br-', 'cni', 'flannel', 'cali') + + for line in ip_result.stdout.split('\n'): + line = line.strip() + if line and line[0].isdigit() and ':' in line: + # Interface line + parts = line.split(':') + if len(parts) >= 2: + iface_name = parts[1].strip().split('@')[0] + # Skip virtual/container interfaces + if not iface_name.startswith(skip_prefixes): + current_iface = iface_name + else: + current_iface = None + elif 'inet ' in line and current_iface: + # IP line + ip_addr = line.split()[1].split('/')[0] + if ip_addr != '127.0.0.1': # Skip localhost + interfaces.append({ + 'name': current_iface, + 'ip': ip_addr + }) + current_iface = None + + stats['network'] = { + 'interfaces': interfaces + } + + # LVM Physical Volume information + lvm_info = get_lvm_pv_info() + if lvm_info: + stats['lvm'] = lvm_info + + return jsonify(stats) + + except Exception as e: + return jsonify({'error': f'Error gathering system statistics: {str(e)}'}), 500 + + @app.route('/api/bootc-status') + @requires_auth + def get_bootc_status(): + """API endpoint to get BootC status and upgrade check information.""" + try: + status_output = '' + upgrade_check_output = '' + + # Get bootc status + try: + status_result = subprocess.run( + ['bootc', 'status'], + capture_output=True, + text=True, + timeout=10 + ) + if status_result.returncode == 0: + status_output = status_result.stdout.strip() + else: + status_output = f"Error: {status_result.stderr.strip()}" + except FileNotFoundError: + status_output = "bootc command not found" + except subprocess.TimeoutExpired: + status_output = "Command timed out" + except Exception as e: + status_output = f"Error: {str(e)}" + + # Get upgrade check + try: + upgrade_result = subprocess.run( + ['bootc', 'upgrade', '--check'], + capture_output=True, + text=True, + timeout=30 + ) + if upgrade_result.returncode == 0: + upgrade_check_output = upgrade_result.stdout.strip() + else: + upgrade_check_output = f"Error: {upgrade_result.stderr.strip()}" + except FileNotFoundError: + upgrade_check_output = "bootc command not found" + except subprocess.TimeoutExpired: + upgrade_check_output = "Command timed out" + except Exception as e: + upgrade_check_output = f"Error: {str(e)}" + + return jsonify({ + 'status': status_output, + 'upgrade_check': upgrade_check_output + }) + + except Exception as e: + return jsonify({'error': f'Error getting BootC status: {str(e)}'}), 500 + + @app.route('/api/bootc-upgrade-check', methods=['POST']) + @requires_auth + def bootc_upgrade_check(): + """API endpoint to check for BootC upgrades.""" + try: + result = subprocess.run( + ['bootc', 'upgrade', '--check'], + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': 'Upgrade check completed' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Upgrade check failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + @app.route('/api/bootc-upgrade', methods=['POST']) + @requires_auth + def bootc_upgrade(): + """API endpoint to apply BootC upgrade.""" + try: + # Run bootc upgrade (this may take a while) + result = subprocess.run( + ['bootc', 'upgrade'], + capture_output=True, + text=True, + timeout=600 # 10 minutes timeout for upgrade + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': 'Upgrade completed successfully. Reboot may be required.' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Upgrade failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out (upgrade may still be in progress)'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + @app.route('/api/bootc-switch', methods=['POST']) + @requires_auth + def bootc_switch(): + """API endpoint to switch BootC to a different image.""" + try: + data = request.get_json() if request.is_json else {} + image = data.get('image', '').strip() + + if not image: + return jsonify({'success': False, 'error': 'Image reference is required'}), 400 + + # Validate image format (basic check) + if not (image.startswith('quay.io/') or image.startswith('docker.io/') or + ':' in image or '/' in image): + return jsonify({'success': False, 'error': 'Invalid image reference format'}), 400 + + # Run bootc switch (this may take a while) + result = subprocess.run( + ['bootc', 'switch', image], + capture_output=True, + text=True, + timeout=600 # 10 minutes timeout for switch + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': f'Switched to {image} successfully. Reboot may be required.' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Switch failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out (switch may still be in progress)'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + @app.route('/api/dmesg') + @requires_auth + def get_dmesg(): + """API endpoint to get kernel log (dmesg).""" + try: + # Run dmesg command to get kernel log + result = subprocess.run( + ['dmesg'], + capture_output=True, + text=True, + timeout=10 + ) + + if result.returncode != 0: + return jsonify({'error': f'Failed to get dmesg: {result.stderr.strip()}'}), 500 + + # Return the log (limit to last 10000 lines to avoid huge responses) + log_lines = result.stdout.strip().split('\n') + if len(log_lines) > 10000: + log_lines = log_lines[-10000:] + + return jsonify({ + 'log': '\n'.join(log_lines), + 'line_count': len(log_lines) + }) + + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error getting dmesg: {str(e)}'}), 500 + + @app.route('/api/operator-status') + @requires_auth + def get_operator_status(): + """API endpoint to check if the Jumpstarter operator is ready.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'ready': False, 'message': 'MicroShift kubeconfig not found. Waiting for MicroShift to start...'}), 200 + + # Check if jumpstarter-operator pod is running and ready + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'pods', '-n', 'jumpstarter-operator-system', '-o', 'json'], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + pods_data = json.loads(result.stdout) + + # Look for the operator controller manager pod + for pod in pods_data.get('items', []): + pod_name = pod.get('metadata', {}).get('name', '') + if 'jumpstarter-operator-controller-manager' in pod_name: + # Check if pod is running and ready + status = pod.get('status', {}) + phase = status.get('phase', '') + container_statuses = status.get('containerStatuses', []) + + if phase == 'Running' and container_statuses: + all_ready = all(c.get('ready', False) for c in container_statuses) + if all_ready: + return jsonify({'ready': True, 'message': 'Jumpstarter operator is ready'}), 200 + else: + return jsonify({'ready': False, 'message': 'Jumpstarter operator is starting...'}), 200 + else: + return jsonify({'ready': False, 'message': f'Jumpstarter operator status: {phase}'}), 200 + + # Operator pod not found + return jsonify({'ready': False, 'message': 'Waiting for Jumpstarter operator to deploy...'}), 200 + + except subprocess.CalledProcessError as e: + # Namespace might not exist yet + return jsonify({'ready': False, 'message': 'Waiting for Jumpstarter operator to deploy...'}), 200 + except subprocess.TimeoutExpired: + return jsonify({'ready': False, 'message': 'Timeout checking operator status'}), 200 + except Exception as e: + return jsonify({'ready': False, 'message': 'Checking operator status...'}), 200 + + @app.route('/api/pods') + @requires_auth + def get_pods(): + """API endpoint to get pod status as JSON.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 + + # Run oc get pods -A -o json with explicit kubeconfig + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'pods', '-A', '-o', 'json'], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + pods_data = json.loads(result.stdout) + pods_list = [] + + for pod in pods_data.get('items', []): + metadata = pod.get('metadata', {}) + spec = pod.get('spec', {}) + status = pod.get('status', {}) + + # Calculate ready containers + container_statuses = status.get('containerStatuses', []) + ready_count = sum(1 for c in container_statuses if c.get('ready', False)) + total_count = len(container_statuses) + + # Calculate total restarts + restarts = sum(c.get('restartCount', 0) for c in container_statuses) + + # Check if pod is terminating (has deletionTimestamp) + if metadata.get('deletionTimestamp'): + phase = 'Terminating' + else: + # Determine pod phase/status + phase = status.get('phase', 'Unknown') + + # Check for more specific status from container states + for container in container_statuses: + state = container.get('state', {}) + if 'waiting' in state: + reason = state['waiting'].get('reason', '') + if reason: + phase = reason + break + + # Calculate age + creation_time = metadata.get('creationTimestamp', '') + age = calculate_age(creation_time) + + pods_list.append({ + 'namespace': metadata.get('namespace', 'default'), + 'name': metadata.get('name', 'unknown'), + 'ready': f"{ready_count}/{total_count}", + 'status': phase, + 'restarts': restarts, + 'age': age, + 'node': spec.get('nodeName', 'N/A') + }) + + return jsonify({'pods': pods_list}) + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + return jsonify({'error': f'Failed to get pods: {error_msg}'}), 500 + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error: {str(e)}'}), 500 + + @app.route('/api/routes') + @requires_auth + def get_routes(): + """API endpoint to get OpenShift routes as JSON.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 + + # Run oc get routes -A -o json with explicit kubeconfig + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'routes', '-A', '-o', 'json'], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + routes_data = json.loads(result.stdout) + routes_list = [] + + for route in routes_data.get('items', []): + metadata = route.get('metadata', {}) + spec = route.get('spec', {}) + status = route.get('status', {}) + + # Get route host + host = spec.get('host', 'N/A') + + # Get target service and port + to = spec.get('to', {}) + service_name = to.get('name', 'N/A') + + port = spec.get('port', {}) + target_port = port.get('targetPort', 'N/A') if port else 'N/A' + + # Get TLS configuration + tls = spec.get('tls', {}) + tls_termination = tls.get('termination', 'None') if tls else 'None' + + # Get ingress status + ingresses = status.get('ingress', []) + admitted = 'False' + if ingresses: + for ingress in ingresses: + conditions = ingress.get('conditions', []) + for condition in conditions: + if condition.get('type') == 'Admitted': + admitted = 'True' if condition.get('status') == 'True' else 'False' + break + + # Calculate age + creation_time = metadata.get('creationTimestamp', '') + age = calculate_age(creation_time) + + routes_list.append({ + 'namespace': metadata.get('namespace', 'default'), + 'name': metadata.get('name', 'unknown'), + 'host': host, + 'service': service_name, + 'port': str(target_port), + 'tls': tls_termination, + 'admitted': admitted, + 'age': age + }) + + return jsonify({'routes': routes_list}) + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + return jsonify({'error': f'Failed to get routes: {error_msg}'}), 500 + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error: {str(e)}'}), 500 + + @app.route('/api/pods//', methods=['DELETE']) + @requires_auth + def delete_pod(namespace, pod_name): + """API endpoint to delete a pod (causing it to restart).""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'success': False, 'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 + + # Run oc delete pod with explicit kubeconfig + subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'delete', 'pod', pod_name, '-n', namespace], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + return jsonify({'success': True, 'message': f'Pod {pod_name} deleted successfully'}) + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + return jsonify({'success': False, 'error': f'Failed to delete pod: {error_msg}'}), 500 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + @app.route('/logs//') + @requires_auth + def stream_logs(namespace, pod_name): + """Stream pod logs in real-time.""" + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return "MicroShift kubeconfig not found. Is MicroShift running?", 503 + + def generate(): + """Generator function to stream logs.""" + process = None + try: + # Start oc logs -f process + process = subprocess.Popen( + ['oc', '--kubeconfig', kubeconfig_path, 'logs', '-f', '-n', namespace, pod_name], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1 + ) + + # Stream output line by line + for line in iter(process.stdout.readline, ''): + if not line: + break + yield f"{line}" + + except Exception as e: + yield f"Error streaming logs: {str(e)}\n" + finally: + # Clean up process when connection closes + if process: + try: + process.terminate() + process.wait(timeout=5) + except Exception: + process.kill() + + # Return streaming response with HTML wrapper + html_header = f""" + + + Logs: {namespace}/{pod_name} + + + +
+

📋 Pod Logs

+
Namespace: {namespace} | Pod: {pod_name}
+
+
""" + + html_footer = """
+ + +""" + + def generate_with_html(): + yield html_header + for line in generate(): + yield line.replace('<', '<').replace('>', '>') + yield html_footer + + return Response(generate_with_html(), mimetype='text/html') + + @app.route('/kubeconfig') + @requires_auth + def download_kubeconfig(): + """Serve the kubeconfig file for download with nip.io hostname and insecure TLS.""" + kubeconfig_path = Path(KUBECONFIG_PATH) + + if not kubeconfig_path.exists(): + return "Kubeconfig file not found", 404 + + try: + # Read the original kubeconfig + with open(kubeconfig_path, 'r') as f: + kubeconfig_content = f.read() + + # Always use nip.io format based on default route IP + default_ip = get_default_route_ip() + if default_ip: + nip_hostname = f"jumpstarter.{default_ip}.nip.io" + else: + # Fallback to current hostname if IP detection fails + nip_hostname = get_current_hostname() + + # Extract the original server hostname (likely localhost) before replacing + # This is needed for tls-server-name to match the certificate + original_server_match = re.search(r'server:\s+https://([^:]+):(\d+)', kubeconfig_content) + original_hostname = 'localhost' # Default fallback + if original_server_match: + original_hostname = original_server_match.group(1) + + # Replace localhost with the nip.io hostname + kubeconfig_content = re.sub( + r'server:\s+https://localhost:(\d+)', + f'server: https://{nip_hostname}:\\1', + kubeconfig_content + ) + + # Keep the CA certificate fields (certificate-authority-data or certificate-authority) + # They are needed for certificate chain verification + + # Remove insecure-skip-tls-verify if it exists (we'll replace it with tls-server-name) + kubeconfig_content = re.sub( + r'^\s+insecure-skip-tls-verify:\s+.*\n', + '', + kubeconfig_content, + flags=re.MULTILINE + ) + + # Add tls-server-name to verify the CA but allow hostname mismatch + # This tells the client to verify the certificate as if it were issued for the original hostname + # (e.g., localhost), even though we're connecting via nip.io hostname + kubeconfig_content = re.sub( + r'(server:\s+https://[^\n]+\n)', + f'\\1 tls-server-name: {original_hostname}\n', + kubeconfig_content + ) + + # Create a BytesIO object to send as file + kubeconfig_bytes = BytesIO(kubeconfig_content.encode('utf-8')) + kubeconfig_bytes.seek(0) + + return send_file( + kubeconfig_bytes, + as_attachment=True, + download_name='kubeconfig', + mimetype='application/octet-stream' + ) + except Exception as e: + return f"Error reading kubeconfig: {str(e)}", 500 + diff --git a/deploy/microshift-bootc/config-svc/app.py b/deploy/microshift-bootc/config-svc/app.py index f00fae60..20466cc7 100644 --- a/deploy/microshift-bootc/config-svc/app.py +++ b/deploy/microshift-bootc/config-svc/app.py @@ -8,3014 +8,22 @@ - MicroShift kubeconfig download """ -import json import os -import re -import socket -import subprocess import sys -import tempfile -from functools import wraps -from io import BytesIO -from pathlib import Path -from flask import Flask, request, send_file, render_template_string, Response, jsonify - -app = Flask(__name__) - -# MicroShift kubeconfig path -KUBECONFIG_PATH = '/var/lib/microshift/resources/kubeadmin/kubeconfig' - - -def validate_hostname(hostname): - """ - Validate hostname according to RFC 1123 standards. - - Rules: - - Total length <= 253 characters - - Each label 1-63 characters - - Labels match /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/i (case-insensitive) - - No leading/trailing hyphen in labels - - Reject empty or illegal characters - - Optionally reject trailing dot - - Returns: (is_valid: bool, error_message: str) - """ - if not hostname: - return False, "Hostname cannot be empty" - - # Remove trailing dot if present (optional rejection) - if hostname.endswith('.'): - hostname = hostname.rstrip('.') - - # Check total length - if len(hostname) > 253: - return False, f"Hostname too long: {len(hostname)} characters (maximum 253)" - - # Split into labels - labels = hostname.split('.') - - # Check each label - label_pattern = re.compile(r'^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', re.IGNORECASE) - - for i, label in enumerate(labels): - if not label: - return False, f"Empty label at position {i+1} (consecutive dots not allowed)" - - if len(label) > 63: - return False, f"Label '{label}' too long: {len(label)} characters (maximum 63)" - - if not label_pattern.match(label): - return False, f"Label '{label}' contains invalid characters. Labels must start and end with alphanumeric characters and can contain hyphens in between" - - # Additional check: no leading/trailing hyphen (pattern should catch this, but be explicit) - if label.startswith('-') or label.endswith('-'): - return False, f"Label '{label}' cannot start or end with a hyphen" - - return True, "" - - -def validate_password(password): - """ - Validate password to prevent chpasswd injection and enforce security. - - Rules: - - Reject newline characters ('\n') - - Reject colon characters (':') - - Minimum length: 8 characters - - Maximum length: 128 characters (reasonable limit) - - Returns: (is_valid: bool, error_message: str) - """ - if not password: - return False, "Password cannot be empty" - - # Check for forbidden characters - if '\n' in password: - return False, "Password cannot contain newline characters" - - if ':' in password: - return False, "Password cannot contain colon characters" - - # Check length - if len(password) < 8: - return False, f"Password too short: {len(password)} characters (minimum 8)" - - if len(password) > 128: - return False, f"Password too long: {len(password)} characters (maximum 128)" - - return True, "" - - -def check_auth(username, password): - """Check if a username/password combination is valid using PAM.""" - if username != 'root': - return False - - try: - # Try using PAM authentication first - import pam - p = pam.pam() - return p.authenticate(username, password) - except ImportError: - # Fallback: use subprocess to authenticate via su - try: - result = subprocess.run( - ['su', username, '-c', 'true'], - input=password.encode(), - capture_output=True, - timeout=5 - ) - return result.returncode == 0 - except Exception as e: - print(f"Authentication error: {e}", file=sys.stderr) - return False - - -def is_default_password(): - """Check if the root password is still the default 'jumpstarter'.""" - return check_auth('root', 'jumpstarter') - - -def authenticate(): - """Send a 401 response that enables basic auth.""" - return Response( - 'Authentication required. Please login with root credentials.', - 401, - {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} - ) - - -def requires_auth(f): - """Decorator to require HTTP Basic Authentication.""" - @wraps(f) - def decorated(*args, **kwargs): - auth = request.authorization - if not auth or not check_auth(auth.username, auth.password): - return authenticate() - return f(*args, **kwargs) - return decorated - - -# HTML template for forced password change -PASSWORD_REQUIRED_TEMPLATE = """ - - - - - Password Change Required - Jumpstarter - - - - -
- - -
-

Security Setup Required

- - {% for msg in messages %} -
{{ msg.text }}
- {% endfor %} - -
-

⚠️ Default Password Detected

-

You are using the default password. For security reasons, you must change the root password before accessing the configuration interface.

-
- -
-
-
- - -
Minimum 8 characters (required to change from default password)
-
-
- - -
Re-enter your new password
-
-
- - -
One SSH public key per line. Leave empty to clear existing keys.
-
- -
- -
-
- -""" - -# HTML template for the main page -HTML_TEMPLATE = """ - - - - - Jumpstarter Configuration - - - - -
- - - - -
- {% for msg in messages %} -
{{ msg.text }}
- {% endfor %} - -
-

Jumpstarter Deployment Configuration

-
-
-
- - -
The base domain for Jumpstarter routes
-
-
- - -
The Jumpstarter controller container image to use
-
-
- - -
When to pull the container image
-
- -
-
- -
-

Change Root Password

-
-
-
- - -
Leave empty to only update SSH keys. Minimum 8 characters if provided.
-
-
- - -
Re-enter your new password (required if password is provided)
-
-
- - -
One SSH public key per line. Leave empty to clear existing keys.
-
- -
-
- -
-

BootC Operations

-
-
- - -
Container image reference to switch to (e.g., quay.io/jumpstarter-dev/microshift/bootc:latest)
-
-
- - - -
- -

System Information

-
-
Loading system statistics...
-
- -

BootC Status

-
-
Loading BootC status...
-
- -

Kernel Log

-
-
Loading kernel log...
-
-
- -
-
-

Kubeconfig

-

- Download the MicroShift kubeconfig file to access the Kubernetes cluster from your local machine. -

- Download Kubeconfig -
- -
-

Routes

-
- -
- - - - - - - - - - - - - - - - - - -
NamespaceNameHostServicePortTLSAdmittedAge
Loading routes...
-
-
- -
-

Pod Status

-
- -
- - - - - - - - - - - - - - - - - - -
NamespaceNameReadyStatusRestartsAgeNodeActions
Loading pods...
-
-
-
-
-
- - - -""" - - -@app.route('/static/styles.css') -def serve_css(): - """Serve the consolidated CSS stylesheet.""" - css = """ - * { - margin: 0; - padding: 0; - box-sizing: border-box; - } - html { - scroll-behavior: smooth; - } - body { - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; - background: linear-gradient(135deg, #4c4c4c 0%, #1a1a1a 100%); - min-height: 100vh; - display: flex; - justify-content: center; - align-items: center; - padding: 20px; - } - .container { - background: white; - border-radius: 12px; - box-shadow: 0 10px 60px rgba(0,0,0,0.5), 0 0 0 1px rgba(255, 193, 7, 0.1); - max-width: 1000px; - width: 100%; - padding: 40px; - } - .banner { - margin: -40px -40px 30px -40px; - padding: 25px 40px; - background: linear-gradient(135deg, #757575 0%, #616161 100%); - border-radius: 12px 12px 0 0; - text-align: center; - } - .banner-text { - color: white; - font-size: 14px; - margin-bottom: 20px; - font-weight: 500; - } - .logos { - display: flex; - justify-content: center; - align-items: center; - gap: 40px; - flex-wrap: wrap; - } - .logo-link { - display: inline-block; - transition: opacity 0.3s; - } - .logo-link:hover { - opacity: 0.9; - } - .logo-link img { - height: 45px; - width: auto; - } - .microshift-logo { - height: 40px !important; - filter: brightness(0) invert(1); - } - .jumpstarter-logo { - height: 40px !important; - } - .nav-bar { - display: flex; - gap: 0; - margin: 0 -40px 30px -40px; - border-bottom: 1px solid #e0e0e0; - background: #fafafa; - } - .nav-link { - flex: 1; - text-align: center; - padding: 15px 20px; - text-decoration: none; - color: #666; - font-size: 14px; - font-weight: 500; - transition: all 0.3s; - border-bottom: 3px solid transparent; - } - .nav-link:hover { - background: #f5f5f5; - color: #333; - border-bottom-color: #ffc107; - } - .nav-link.active { - color: #000; - border-bottom-color: #ffc107; - background: white; - } - .content-area { - padding: 0 40px 40px 40px; - margin: 0 -40px -40px -40px; - } - h2 { - color: #333; - font-size: 20px; - margin-bottom: 15px; - } - .section { - display: none; - padding: 20px 0; - animation: fadeIn 0.3s ease-in; - } - @keyframes fadeIn { - from { - opacity: 0; - transform: translateY(10px); - } - to { - opacity: 1; - transform: translateY(0); - } - } - .info { - background: #f8f9fa; - padding: 12px 16px; - border-radius: 6px; - margin-bottom: 15px; - font-size: 14px; - color: #555; - } - .info strong { - color: #333; - } - .warning-box { - background: #fff3cd; - border: 1px solid #ffc107; - border-radius: 6px; - padding: 16px; - margin-bottom: 30px; - } - .warning-box h2 { - color: #856404; - font-size: 18px; - margin-bottom: 10px; - } - .warning-box p { - color: #856404; - font-size: 14px; - line-height: 1.5; - } - .form-group { - margin-bottom: 15px; - } - label { - display: block; - margin-bottom: 6px; - color: #555; - font-size: 14px; - font-weight: 500; - } - input[type="text"], - input[type="password"], - textarea { - width: 100%; - padding: 10px 12px; - border: 1px solid #ddd; - border-radius: 6px; - font-size: 14px; - transition: border-color 0.3s, opacity 0.3s; - font-family: inherit; - } - textarea { - font-family: monospace; - resize: vertical; - } - input[type="text"]:focus, - input[type="password"]:focus, - textarea:focus { - outline: none; - border-color: #ffc107; - box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); - } - input[type="text"]:disabled, - input[type="password"]:disabled, - textarea:disabled { - background-color: #f5f5f5; - cursor: not-allowed; - opacity: 0.6; - } - select { - width: 100%; - padding: 10px 12px; - border: 1px solid #ddd; - border-radius: 6px; - font-size: 14px; - background-color: white; - cursor: pointer; - transition: border-color 0.3s; - } - select:focus { - outline: none; - border-color: #ffc107; - box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); - } - .hint { - font-size: 12px; - color: #888; - margin-top: 4px; - } - button { - background: #ffc107; - color: #000; - border: none; - padding: 12px 24px; - border-radius: 6px; - font-size: 14px; - font-weight: 600; - cursor: pointer; - transition: background 0.3s, opacity 0.3s; - } - button:hover { - background: #ffb300; - } - button:disabled { - background: #666; - color: #999; - cursor: not-allowed; - opacity: 0.6; - } - button:disabled:hover { - background: #666; - } - button[type="submit"] { - width: 100%; - } - .download-btn { - background: #ffc107; - display: inline-block; - text-decoration: none; - color: #000; - padding: 12px 24px; - border-radius: 6px; - font-size: 14px; - font-weight: 600; - transition: background 0.3s; - } - .download-btn:hover { - background: #ffb300; - } - .message { - padding: 12px 16px; - border-radius: 6px; - margin-bottom: 20px; - font-size: 14px; - } - .message.success { - background: #d4edda; - color: #155724; - border: 1px solid #c3e6cb; - } - .message.error { - background: #f8d7da; - color: #721c24; - border: 1px solid #f5c6cb; - } - .message.info { - background: #d1ecf1; - color: #0c5460; - border: 1px solid #bee5eb; - } - /* MicroShift page specific styles */ - .status-badge { - display: inline-block; - padding: 4px 8px; - border-radius: 4px; - font-size: 11px; - font-weight: 600; - text-transform: uppercase; - } - .status-running { - background: #d4edda; - color: #155724; - } - .status-pending { - background: #fff3cd; - color: #856404; - } - .status-failed { - background: #f8d7da; - color: #721c24; - } - .status-succeeded { - background: #d1ecf1; - color: #0c5460; - } - .status-crashloopbackoff { - background: #f8d7da; - color: #721c24; - } - .status-terminating { - background: #ffeaa7; - color: #856404; - } - .status-unknown { - background: #e2e3e5; - color: #383d41; - } - table { - width: 100%; - border-collapse: collapse; - margin-top: 20px; - font-size: 13px; - } - th { - background: #f8f9fa; - padding: 12px 8px; - text-align: left; - font-weight: 600; - color: #333; - border-bottom: 2px solid #dee2e6; - position: sticky; - top: 0; - z-index: 10; - } - td { - padding: 10px 8px; - border-bottom: 1px solid #eee; - color: #555; - } - tr:hover { - background: #f8f9fa; - } - .table-wrapper { - overflow-x: auto; - max-height: 70vh; - overflow-y: auto; - } - .loading { - text-align: center; - padding: 40px; - color: #666; - } - .error { - background: #f8d7da; - color: #721c24; - padding: 12px 16px; - border-radius: 6px; - margin-bottom: 20px; - } - .pod-count { - color: #666; - font-size: 14px; - margin-bottom: 10px; - } - .microshift-section { - margin-bottom: 30px; - padding-bottom: 30px; - border-bottom: 1px solid #eee; - } - .microshift-section:last-child { - border-bottom: none; - } - .action-icon { - text-decoration: none; - font-size: 18px; - padding: 4px 6px; - margin: 0 2px; - border-radius: 4px; - transition: all 0.3s; - display: inline-block; - cursor: pointer; - } - .action-icon:hover { - background: #fff3e0; - transform: scale(1.2); - } - """ - return Response(css, mimetype='text/css') - - -@app.route('/logout') -def logout(): - """Logout endpoint that forces re-authentication.""" - return Response( - 'Logged out. Please close this dialog to log in again.', - 401, - {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} - ) - - -@app.route('/') -@requires_auth -def index(): - """Serve the main configuration page.""" - current_hostname = get_current_hostname() - jumpstarter_config = get_jumpstarter_config() - password_required = is_default_password() - ssh_keys = get_ssh_authorized_keys() - - # Force password change if still using default - if password_required: - return render_template_string( - PASSWORD_REQUIRED_TEMPLATE, - messages=[], - current_hostname=current_hostname, - ssh_keys=ssh_keys - ) - - return render_template_string( - HTML_TEMPLATE, - messages=[], - current_hostname=current_hostname, - jumpstarter_config=jumpstarter_config, - password_required=password_required, - ssh_keys=ssh_keys - ) - - -@app.route('/api/change-password', methods=['POST']) -@requires_auth -def api_change_password(): - """API endpoint to handle password change request (returns JSON).""" - data = request.get_json() if request.is_json else {} - new_password = data.get('newPassword', request.form.get('newPassword', '')).strip() - confirm_password = data.get('confirmPassword', request.form.get('confirmPassword', '')).strip() - ssh_keys_value = data.get('sshKeys', request.form.get('sshKeys', '')).strip() - - was_default = is_default_password() - existing_ssh_keys = get_ssh_authorized_keys() - - messages = [] - password_updated = False - ssh_updated = False - requires_redirect = False - - # If password is provided, validate and set it - if new_password: - # Validate password format and security - password_valid, password_error = validate_password(new_password) - if not password_valid: - messages.append({'type': 'error', 'text': password_error}) - elif new_password != confirm_password: - messages.append({'type': 'error', 'text': 'Passwords do not match'}) - else: - password_success, password_message = set_root_password(new_password) - if not password_success: - messages.append({'type': 'error', 'text': f'Failed to set password: {password_message}'}) - else: - password_updated = True - messages.append({'type': 'success', 'text': 'Password changed successfully!'}) - if was_default: - # Update login banner on first password change - update_login_banner() - requires_redirect = True - elif was_default: - # If we're on the default password screen and no password provided, require it - messages.append({'type': 'error', 'text': 'Password is required to change from default password'}) - - # Process SSH keys (always process if form was submitted) - ssh_success, ssh_message = set_ssh_authorized_keys(ssh_keys_value) - if ssh_success: - ssh_updated = True - if ssh_keys_value: - messages.append({'type': 'success', 'text': ssh_message}) - else: - # Only show message if keys were cleared and there were keys before - if existing_ssh_keys: - messages.append({'type': 'success', 'text': ssh_message}) - else: - messages.append({'type': 'error', 'text': f'Failed to set SSH keys: {ssh_message}'}) - - has_errors = any(msg.get('type') == 'error' for msg in messages) - success = not has_errors and (password_updated or ssh_updated) - - return jsonify({ - 'success': success, - 'messages': messages, - 'password_updated': password_updated, - 'ssh_updated': ssh_updated, - 'requires_redirect': requires_redirect, - 'ssh_keys': get_ssh_authorized_keys() if ssh_updated else existing_ssh_keys - }) - - -@app.route('/api/configure-jumpstarter', methods=['POST']) -@requires_auth -def api_configure_jumpstarter(): - """API endpoint to handle Jumpstarter CR configuration request (returns JSON).""" - data = request.get_json() if request.is_json else {} - base_domain = data.get('baseDomain', request.form.get('baseDomain', '')).strip() - image = data.get('image', request.form.get('image', '')).strip() - image_pull_policy = data.get('imagePullPolicy', request.form.get('imagePullPolicy', 'IfNotPresent')).strip() - - messages = [] - success = False - - if not base_domain: - messages.append({'type': 'error', 'text': 'Base domain is required'}) - else: - # Validate base domain format (same as hostname validation) - domain_valid, domain_error = validate_hostname(base_domain) - if not domain_valid: - messages.append({'type': 'error', 'text': f'Invalid base domain: {domain_error}'}) - elif not image: - messages.append({'type': 'error', 'text': 'Controller image is required'}) - else: - # Apply the Jumpstarter CR - cr_success, cr_message = apply_jumpstarter_cr(base_domain, image, image_pull_policy) - - if cr_success: - msg = f'Jumpstarter configuration applied successfully! Base Domain: {base_domain}, Image: {image}' - messages.append({'type': 'success', 'text': msg}) - success = True - else: - messages.append({'type': 'error', 'text': f'Failed to apply Jumpstarter CR: {cr_message}'}) - - return jsonify({ - 'success': success, - 'messages': messages, - 'config': { - 'base_domain': base_domain, - 'image': image, - 'image_pull_policy': image_pull_policy - } if success else None - }) - - -@app.route('/configure-jumpstarter', methods=['POST']) -@requires_auth -def configure_jumpstarter(): - """Handle Jumpstarter CR configuration request (legacy HTML form submission).""" - base_domain = request.form.get('baseDomain', '').strip() - image = request.form.get('image', '').strip() - image_pull_policy = request.form.get('imagePullPolicy', 'IfNotPresent').strip() - - current_hostname = get_current_hostname() - jumpstarter_config = get_jumpstarter_config() - password_required = is_default_password() - - messages = [] - - if not base_domain: - messages.append({'type': 'error', 'text': 'Base domain is required'}) - else: - # Validate base domain format (same as hostname validation) - domain_valid, domain_error = validate_hostname(base_domain) - if not domain_valid: - messages.append({'type': 'error', 'text': f'Invalid base domain: {domain_error}'}) - elif not image: - messages.append({'type': 'error', 'text': 'Controller image is required'}) - else: - # Apply the Jumpstarter CR - cr_success, cr_message = apply_jumpstarter_cr(base_domain, image, image_pull_policy) - - if cr_success: - msg = f'Jumpstarter configuration applied successfully! Base Domain: {base_domain}, Image: {image}' - messages.append({'type': 'success', 'text': msg}) - # Update config to show what was just applied - jumpstarter_config = { - 'base_domain': base_domain, - 'image': image, - 'image_pull_policy': image_pull_policy - } - else: - messages.append({'type': 'error', 'text': f'Failed to apply Jumpstarter CR: {cr_message}'}) - - return render_template_string( - HTML_TEMPLATE, - messages=messages, - current_hostname=current_hostname, - jumpstarter_config=jumpstarter_config, - password_required=password_required - ) - - -def get_lvm_pv_info(): - """ - Parse pvscan output to get LVM physical volume information. - Returns dict with PV info or None if not available. - """ - try: - result = subprocess.run(['pvscan'], capture_output=True, text=True, timeout=5) - if result.returncode != 0: - return None - - # Parse output like: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" - # or: "Total: 1 [62.41 GiB] / in use: 1 [62.41 GiB] / in no VG: 0 [0 ]" - output = result.stdout.strip() - if not output: - return None - - lines = output.split('\n') - - # Look for PV line - pv_device = None - vg_name = None - total_size = None - free_size = None - - for line in lines: - line = line.strip() - # Match: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" - if line.startswith('PV '): - parts = line.split() - if len(parts) >= 2: - pv_device = parts[1] - # Find VG name - for i, part in enumerate(parts): - if part == 'VG' and i + 1 < len(parts): - vg_name = parts[i + 1] - break - # Find size info in brackets - bracket_match = re.search(r'\[([^\]]+)\]', line) - if bracket_match: - size_info = bracket_match.group(1) - # Parse "62.41 GiB / 52.41 GiB free" - size_parts = size_info.split('/') - if len(size_parts) >= 1: - total_size = size_parts[0].strip() - if len(size_parts) >= 2: - free_match = re.search(r'([\d.]+)\s*([KMGT]i?B)', size_parts[1]) - if free_match: - free_size = free_match.group(1) + ' ' + free_match.group(2) - - if not pv_device or not total_size: - return None - - # Calculate used space and percentage - # Parse sizes to calculate percentage - def parse_size(size_str): - """Parse size string like '62.41 GiB' to bytes.""" - match = re.match(r'([\d.]+)\s*([KMGT]i?)B?', size_str, re.IGNORECASE) - if not match: - return 0 - value = float(match.group(1)) - unit = match.group(2).upper() - multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} - return int(value * multipliers.get(unit, 1)) - - total_bytes = parse_size(total_size) - free_bytes = parse_size(free_size) if free_size else 0 - used_bytes = total_bytes - free_bytes - percent = int((used_bytes / total_bytes * 100)) if total_bytes > 0 else 0 - - # Format used size - def format_size(bytes_val): - """Format bytes to human-readable size.""" - for unit, multiplier in [('TiB', 1024**4), ('GiB', 1024**3), ('MiB', 1024**2), ('KiB', 1024)]: - if bytes_val >= multiplier: - return f"{bytes_val / multiplier:.2f} {unit}" - return f"{bytes_val} B" - - used_size = format_size(used_bytes) - - return { - 'pv_device': pv_device, - 'vg_name': vg_name or 'N/A', - 'total': total_size, - 'free': free_size or '0 B', - 'used': used_size, - 'percent': percent - } - except Exception as e: - print(f"Error parsing LVM PV info: {e}", file=sys.stderr) - return None +from flask import Flask +# Import route registrationfunctions +from api import register_api_routes +from routes import register_ui_routes +from system import update_login_banner -def get_root_filesystem(): - """ - Detect the real root filesystem mount point. - On bootc systems, /sysroot is the real root filesystem. - Otherwise, find the largest real block device filesystem. - """ - # Check if /sysroot exists and is a mount point (bootc systems) - try: - result = subprocess.run(['findmnt', '-n', '-o', 'TARGET', '/sysroot'], - capture_output=True, text=True, timeout=5) - if result.returncode == 0 and result.stdout.strip(): - return '/sysroot' - except Exception: - pass - - # Fallback: parse df output to find the real root filesystem - try: - df_result = subprocess.run(['df', '-h'], capture_output=True, text=True, timeout=5) - if df_result.returncode != 0: - return '/' # Fallback to root - - lines = df_result.stdout.strip().split('\n') - if len(lines) < 2: - return '/' # Fallback to root - - # Virtual filesystem types to skip - virtual_fs = ('tmpfs', 'overlay', 'composefs', 'devtmpfs', 'proc', 'sysfs', - 'devpts', 'cgroup', 'pstore', 'bpf', 'tracefs', 'debugfs', - 'configfs', 'fusectl', 'mqueue', 'hugetlbfs', 'efivarfs', 'ramfs', - 'nsfs', 'shm', 'vfat') - - # Boot partitions to skip - boot_paths = ('/boot', '/boot/efi') - - best_fs = None - best_size = 0 - - for line in lines[1:]: # Skip header - parts = line.split() - if len(parts) < 6: - continue - - filesystem = parts[0] - mount_point = parts[5] - size_str = parts[1] - - # Skip virtual filesystems - fs_type = filesystem.split('/')[-1] if '/' in filesystem else filesystem - if any(vfs in fs_type.lower() for vfs in virtual_fs): - continue - - # Skip boot partitions - if mount_point in boot_paths: - continue - - # Skip if not a block device (doesn't start with /dev) - if not filesystem.startswith('/dev'): - continue - - # Prefer LVM root volumes - if '/mapper/' in filesystem and 'root' in filesystem.lower(): - return mount_point - - # Calculate size for comparison (convert to bytes for comparison) - try: - # Parse size like "10G", "500M", etc. - size_val = float(size_str[:-1]) - size_unit = size_str[-1].upper() - if size_unit == 'G': - size_bytes = size_val * 1024 * 1024 * 1024 - elif size_unit == 'M': - size_bytes = size_val * 1024 * 1024 - elif size_unit == 'K': - size_bytes = size_val * 1024 - else: - size_bytes = size_val - - if size_bytes > best_size: - best_size = size_bytes - best_fs = mount_point - except (ValueError, IndexError): - continue - - if best_fs: - return best_fs - - except Exception: - pass - - # Final fallback - return '/' - - -@app.route('/api/system-stats') -@requires_auth -def get_system_stats(): - """API endpoint to get system statistics.""" - try: - stats = {} - - # Disk usage - use detected root filesystem - root_fs = get_root_filesystem() - disk_result = subprocess.run(['df', '-h', root_fs], capture_output=True, text=True) - disk_lines = disk_result.stdout.strip().split('\n') - if len(disk_lines) > 1: - disk_parts = disk_lines[1].split() - stats['disk'] = { - 'total': disk_parts[1], - 'used': disk_parts[2], - 'available': disk_parts[3], - 'percent': int(disk_parts[4].rstrip('%')) - } - else: - stats['disk'] = {'total': 'N/A', 'used': 'N/A', 'available': 'N/A', 'percent': 0} - - # Memory usage - mem_result = subprocess.run(['free', '-h'], capture_output=True, text=True) - mem_lines = mem_result.stdout.strip().split('\n') - if len(mem_lines) > 1: - mem_parts = mem_lines[1].split() - # Parse percentage - mem_total_result = subprocess.run(['free'], capture_output=True, text=True) - mem_total_lines = mem_total_result.stdout.strip().split('\n')[1].split() - mem_percent = int((int(mem_total_lines[2]) / int(mem_total_lines[1])) * 100) - - stats['memory'] = { - 'total': mem_parts[1], - 'used': mem_parts[2], - 'available': mem_parts[6] if len(mem_parts) > 6 else mem_parts[3], - 'percent': mem_percent - } - else: - stats['memory'] = {'total': 'N/A', 'used': 'N/A', 'available': 'N/A', 'percent': 0} - - # CPU info - cpu_count_result = subprocess.run(['nproc'], capture_output=True, text=True) - cpu_cores = int(cpu_count_result.stdout.strip()) if cpu_count_result.returncode == 0 else 0 - - # CPU usage - get from top - top_result = subprocess.run(['top', '-bn1'], capture_output=True, text=True) - cpu_usage = 0 - for line in top_result.stdout.split('\n'): - if 'Cpu(s)' in line or '%Cpu' in line: - # Parse line like "%Cpu(s): 2.0 us, 1.0 sy, 0.0 ni, 97.0 id,..." - parts = line.split(',') - for part in parts: - if 'id' in part: - idle = float(part.split()[0]) - cpu_usage = round(100 - idle, 1) - break - break - - stats['cpu'] = { - 'cores': cpu_cores, - 'usage': cpu_usage - } - - # System info - kernel_result = subprocess.run(['uname', '-r'], capture_output=True, text=True) - kernel = kernel_result.stdout.strip() - - hostname = get_current_hostname() - - # Uptime - uptime_result = subprocess.run(['uptime', '-p'], capture_output=True, text=True) - uptime = uptime_result.stdout.strip().replace('up ', '') - - # Load average - loadavg_result = subprocess.run(['cat', '/proc/loadavg'], capture_output=True, text=True) - loadavg_parts = loadavg_result.stdout.strip().split() - - stats['system'] = { - 'kernel': kernel, - 'hostname': hostname, - 'uptime': uptime, - 'load_1': loadavg_parts[0] if len(loadavg_parts) > 0 else '0', - 'load_5': loadavg_parts[1] if len(loadavg_parts) > 1 else '0', - 'load_15': loadavg_parts[2] if len(loadavg_parts) > 2 else '0' - } - - # Network interfaces - ip_result = subprocess.run(['ip', '-4', 'addr', 'show'], capture_output=True, text=True) - interfaces = [] - current_iface = None - # Prefixes to skip (container/virtual interfaces) - skip_prefixes = ('veth', 'docker', 'br-', 'cni', 'flannel', 'cali') - - for line in ip_result.stdout.split('\n'): - line = line.strip() - if line and line[0].isdigit() and ':' in line: - # Interface line - parts = line.split(':') - if len(parts) >= 2: - iface_name = parts[1].strip().split('@')[0] - # Skip virtual/container interfaces - if not iface_name.startswith(skip_prefixes): - current_iface = iface_name - else: - current_iface = None - elif 'inet ' in line and current_iface: - # IP line - ip_addr = line.split()[1].split('/')[0] - if ip_addr != '127.0.0.1': # Skip localhost - interfaces.append({ - 'name': current_iface, - 'ip': ip_addr - }) - current_iface = None - - stats['network'] = { - 'interfaces': interfaces - } - - # LVM Physical Volume information - lvm_info = get_lvm_pv_info() - if lvm_info: - stats['lvm'] = lvm_info - - return jsonify(stats) - - except Exception as e: - return jsonify({'error': f'Error gathering system statistics: {str(e)}'}), 500 - - -@app.route('/api/bootc-status') -@requires_auth -def get_bootc_status(): - """API endpoint to get BootC status and upgrade check information.""" - try: - status_output = '' - upgrade_check_output = '' - - # Get bootc status - try: - status_result = subprocess.run( - ['bootc', 'status'], - capture_output=True, - text=True, - timeout=10 - ) - if status_result.returncode == 0: - status_output = status_result.stdout.strip() - else: - status_output = f"Error: {status_result.stderr.strip()}" - except FileNotFoundError: - status_output = "bootc command not found" - except subprocess.TimeoutExpired: - status_output = "Command timed out" - except Exception as e: - status_output = f"Error: {str(e)}" - - # Get upgrade check - try: - upgrade_result = subprocess.run( - ['bootc', 'upgrade', '--check'], - capture_output=True, - text=True, - timeout=30 - ) - if upgrade_result.returncode == 0: - upgrade_check_output = upgrade_result.stdout.strip() - else: - upgrade_check_output = f"Error: {upgrade_result.stderr.strip()}" - except FileNotFoundError: - upgrade_check_output = "bootc command not found" - except subprocess.TimeoutExpired: - upgrade_check_output = "Command timed out" - except Exception as e: - upgrade_check_output = f"Error: {str(e)}" - - return jsonify({ - 'status': status_output, - 'upgrade_check': upgrade_check_output - }) - - except Exception as e: - return jsonify({'error': f'Error getting BootC status: {str(e)}'}), 500 - - -@app.route('/api/bootc-upgrade-check', methods=['POST']) -@requires_auth -def bootc_upgrade_check(): - """API endpoint to check for BootC upgrades.""" - try: - result = subprocess.run( - ['bootc', 'upgrade', '--check'], - capture_output=True, - text=True, - timeout=30 - ) - - if result.returncode == 0: - return jsonify({ - 'success': True, - 'output': result.stdout.strip(), - 'message': 'Upgrade check completed' - }) - else: - return jsonify({ - 'success': False, - 'error': result.stderr.strip() or 'Upgrade check failed' - }), 400 - - except FileNotFoundError: - return jsonify({'success': False, 'error': 'bootc command not found'}), 404 - except subprocess.TimeoutExpired: - return jsonify({'success': False, 'error': 'Command timed out'}), 500 - except Exception as e: - return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 - - -@app.route('/api/bootc-upgrade', methods=['POST']) -@requires_auth -def bootc_upgrade(): - """API endpoint to apply BootC upgrade.""" - try: - # Run bootc upgrade (this may take a while) - result = subprocess.run( - ['bootc', 'upgrade'], - capture_output=True, - text=True, - timeout=600 # 10 minutes timeout for upgrade - ) - - if result.returncode == 0: - return jsonify({ - 'success': True, - 'output': result.stdout.strip(), - 'message': 'Upgrade completed successfully. Reboot may be required.' - }) - else: - return jsonify({ - 'success': False, - 'error': result.stderr.strip() or 'Upgrade failed' - }), 400 - - except FileNotFoundError: - return jsonify({'success': False, 'error': 'bootc command not found'}), 404 - except subprocess.TimeoutExpired: - return jsonify({'success': False, 'error': 'Command timed out (upgrade may still be in progress)'}), 500 - except Exception as e: - return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 - - -@app.route('/api/bootc-switch', methods=['POST']) -@requires_auth -def bootc_switch(): - """API endpoint to switch BootC to a different image.""" - try: - data = request.get_json() if request.is_json else {} - image = data.get('image', '').strip() - - if not image: - return jsonify({'success': False, 'error': 'Image reference is required'}), 400 - - # Validate image format (basic check) - if not (image.startswith('quay.io/') or image.startswith('docker.io/') or - ':' in image or '/' in image): - return jsonify({'success': False, 'error': 'Invalid image reference format'}), 400 - - # Run bootc switch (this may take a while) - result = subprocess.run( - ['bootc', 'switch', image], - capture_output=True, - text=True, - timeout=600 # 10 minutes timeout for switch - ) - - if result.returncode == 0: - return jsonify({ - 'success': True, - 'output': result.stdout.strip(), - 'message': f'Switched to {image} successfully. Reboot may be required.' - }) - else: - return jsonify({ - 'success': False, - 'error': result.stderr.strip() or 'Switch failed' - }), 400 - - except FileNotFoundError: - return jsonify({'success': False, 'error': 'bootc command not found'}), 404 - except subprocess.TimeoutExpired: - return jsonify({'success': False, 'error': 'Command timed out (switch may still be in progress)'}), 500 - except Exception as e: - return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 - - -@app.route('/api/dmesg') -@requires_auth -def get_dmesg(): - """API endpoint to get kernel log (dmesg).""" - try: - # Run dmesg command to get kernel log - result = subprocess.run( - ['dmesg'], - capture_output=True, - text=True, - timeout=10 - ) - - if result.returncode != 0: - return jsonify({'error': f'Failed to get dmesg: {result.stderr.strip()}'}), 500 - - # Return the log (limit to last 10000 lines to avoid huge responses) - log_lines = result.stdout.strip().split('\n') - if len(log_lines) > 10000: - log_lines = log_lines[-10000:] - - return jsonify({ - 'log': '\n'.join(log_lines), - 'line_count': len(log_lines) - }) - - except subprocess.TimeoutExpired: - return jsonify({'error': 'Command timed out'}), 500 - except Exception as e: - return jsonify({'error': f'Error getting dmesg: {str(e)}'}), 500 - - -@app.route('/api/operator-status') -@requires_auth -def get_operator_status(): - """API endpoint to check if the Jumpstarter operator is ready.""" - try: - # Path to MicroShift kubeconfig - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return jsonify({'ready': False, 'message': 'MicroShift kubeconfig not found. Waiting for MicroShift to start...'}), 200 - - # Check if jumpstarter-operator pod is running and ready - result = subprocess.run( - ['oc', '--kubeconfig', kubeconfig_path, 'get', 'pods', '-n', 'jumpstarter-operator-system', '-o', 'json'], - capture_output=True, - text=True, - check=True, - timeout=10 - ) - - pods_data = json.loads(result.stdout) - - # Look for the operator controller manager pod - for pod in pods_data.get('items', []): - pod_name = pod.get('metadata', {}).get('name', '') - if 'jumpstarter-operator-controller-manager' in pod_name: - # Check if pod is running and ready - status = pod.get('status', {}) - phase = status.get('phase', '') - container_statuses = status.get('containerStatuses', []) - - if phase == 'Running' and container_statuses: - all_ready = all(c.get('ready', False) for c in container_statuses) - if all_ready: - return jsonify({'ready': True, 'message': 'Jumpstarter operator is ready'}), 200 - else: - return jsonify({'ready': False, 'message': 'Jumpstarter operator is starting...'}), 200 - else: - return jsonify({'ready': False, 'message': f'Jumpstarter operator status: {phase}'}), 200 - - # Operator pod not found - return jsonify({'ready': False, 'message': 'Waiting for Jumpstarter operator to deploy...'}), 200 - - except subprocess.CalledProcessError as e: - # Namespace might not exist yet - return jsonify({'ready': False, 'message': 'Waiting for Jumpstarter operator to deploy...'}), 200 - except subprocess.TimeoutExpired: - return jsonify({'ready': False, 'message': 'Timeout checking operator status'}), 200 - except Exception as e: - return jsonify({'ready': False, 'message': 'Checking operator status...'}), 200 - - -@app.route('/api/pods') -@requires_auth -def get_pods(): - """API endpoint to get pod status as JSON.""" - try: - # Path to MicroShift kubeconfig - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return jsonify({'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 - - # Run oc get pods -A -o json with explicit kubeconfig - result = subprocess.run( - ['oc', '--kubeconfig', kubeconfig_path, 'get', 'pods', '-A', '-o', 'json'], - capture_output=True, - text=True, - check=True, - timeout=10 - ) - - pods_data = json.loads(result.stdout) - pods_list = [] - - for pod in pods_data.get('items', []): - metadata = pod.get('metadata', {}) - spec = pod.get('spec', {}) - status = pod.get('status', {}) - - # Calculate ready containers - container_statuses = status.get('containerStatuses', []) - ready_count = sum(1 for c in container_statuses if c.get('ready', False)) - total_count = len(container_statuses) - - # Calculate total restarts - restarts = sum(c.get('restartCount', 0) for c in container_statuses) - - # Check if pod is terminating (has deletionTimestamp) - if metadata.get('deletionTimestamp'): - phase = 'Terminating' - else: - # Determine pod phase/status - phase = status.get('phase', 'Unknown') - - # Check for more specific status from container states - for container in container_statuses: - state = container.get('state', {}) - if 'waiting' in state: - reason = state['waiting'].get('reason', '') - if reason: - phase = reason - break - - # Calculate age - creation_time = metadata.get('creationTimestamp', '') - age = calculate_age(creation_time) - - pods_list.append({ - 'namespace': metadata.get('namespace', 'default'), - 'name': metadata.get('name', 'unknown'), - 'ready': f"{ready_count}/{total_count}", - 'status': phase, - 'restarts': restarts, - 'age': age, - 'node': spec.get('nodeName', 'N/A') - }) - - return jsonify({'pods': pods_list}) - - except subprocess.CalledProcessError as e: - error_msg = e.stderr.strip() if e.stderr else str(e) - return jsonify({'error': f'Failed to get pods: {error_msg}'}), 500 - except subprocess.TimeoutExpired: - return jsonify({'error': 'Command timed out'}), 500 - except Exception as e: - return jsonify({'error': f'Error: {str(e)}'}), 500 - - -@app.route('/api/routes') -@requires_auth -def get_routes(): - """API endpoint to get OpenShift routes as JSON.""" - try: - # Path to MicroShift kubeconfig - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return jsonify({'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 - - # Run oc get routes -A -o json with explicit kubeconfig - result = subprocess.run( - ['oc', '--kubeconfig', kubeconfig_path, 'get', 'routes', '-A', '-o', 'json'], - capture_output=True, - text=True, - check=True, - timeout=10 - ) - - routes_data = json.loads(result.stdout) - routes_list = [] - - for route in routes_data.get('items', []): - metadata = route.get('metadata', {}) - spec = route.get('spec', {}) - status = route.get('status', {}) - - # Get route host - host = spec.get('host', 'N/A') - - # Get target service and port - to = spec.get('to', {}) - service_name = to.get('name', 'N/A') - - port = spec.get('port', {}) - target_port = port.get('targetPort', 'N/A') if port else 'N/A' - - # Get TLS configuration - tls = spec.get('tls', {}) - tls_termination = tls.get('termination', 'None') if tls else 'None' - - # Get ingress status - ingresses = status.get('ingress', []) - admitted = 'False' - if ingresses: - for ingress in ingresses: - conditions = ingress.get('conditions', []) - for condition in conditions: - if condition.get('type') == 'Admitted': - admitted = 'True' if condition.get('status') == 'True' else 'False' - break - - # Calculate age - creation_time = metadata.get('creationTimestamp', '') - age = calculate_age(creation_time) - - routes_list.append({ - 'namespace': metadata.get('namespace', 'default'), - 'name': metadata.get('name', 'unknown'), - 'host': host, - 'service': service_name, - 'port': str(target_port), - 'tls': tls_termination, - 'admitted': admitted, - 'age': age - }) - - return jsonify({'routes': routes_list}) - - except subprocess.CalledProcessError as e: - error_msg = e.stderr.strip() if e.stderr else str(e) - return jsonify({'error': f'Failed to get routes: {error_msg}'}), 500 - except subprocess.TimeoutExpired: - return jsonify({'error': 'Command timed out'}), 500 - except Exception as e: - return jsonify({'error': f'Error: {str(e)}'}), 500 - - -@app.route('/api/pods//', methods=['DELETE']) -@requires_auth -def delete_pod(namespace, pod_name): - """API endpoint to delete a pod (causing it to restart).""" - try: - # Path to MicroShift kubeconfig - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return jsonify({'success': False, 'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 - - # Run oc delete pod with explicit kubeconfig - subprocess.run( - ['oc', '--kubeconfig', kubeconfig_path, 'delete', 'pod', pod_name, '-n', namespace], - capture_output=True, - text=True, - check=True, - timeout=10 - ) - - return jsonify({'success': True, 'message': f'Pod {pod_name} deleted successfully'}) - - except subprocess.CalledProcessError as e: - error_msg = e.stderr.strip() if e.stderr else str(e) - return jsonify({'success': False, 'error': f'Failed to delete pod: {error_msg}'}), 500 - except subprocess.TimeoutExpired: - return jsonify({'success': False, 'error': 'Command timed out'}), 500 - except Exception as e: - return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 - - -@app.route('/logs//') -@requires_auth -def stream_logs(namespace, pod_name): - """Stream pod logs in real-time.""" - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return "MicroShift kubeconfig not found. Is MicroShift running?", 503 - - def generate(): - """Generator function to stream logs.""" - process = None - try: - # Start oc logs -f process - process = subprocess.Popen( - ['oc', '--kubeconfig', kubeconfig_path, 'logs', '-f', '-n', namespace, pod_name], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - bufsize=1 - ) - - # Stream output line by line - for line in iter(process.stdout.readline, ''): - if not line: - break - yield f"{line}" - - except Exception as e: - yield f"Error streaming logs: {str(e)}\n" - finally: - # Clean up process when connection closes - if process: - try: - process.terminate() - process.wait(timeout=5) - except Exception: - process.kill() - - # Return streaming response with HTML wrapper - html_header = f""" - - - Logs: {namespace}/{pod_name} - - - -
-

📋 Pod Logs

-
Namespace: {namespace} | Pod: {pod_name}
-
-
""" - - html_footer = """
- - -""" - - def generate_with_html(): - yield html_header - for line in generate(): - yield line.replace('<', '<').replace('>', '>') - yield html_footer - - return Response(generate_with_html(), mimetype='text/html') - - -@app.route('/kubeconfig') -@requires_auth -def download_kubeconfig(): - """Serve the kubeconfig file for download with nip.io hostname and insecure TLS.""" - kubeconfig_path = Path(KUBECONFIG_PATH) - - if not kubeconfig_path.exists(): - return "Kubeconfig file not found", 404 - - try: - # Read the original kubeconfig - with open(kubeconfig_path, 'r') as f: - kubeconfig_content = f.read() - - # Always use nip.io format based on default route IP - default_ip = get_default_route_ip() - if default_ip: - nip_hostname = f"jumpstarter.{default_ip}.nip.io" - else: - # Fallback to current hostname if IP detection fails - nip_hostname = get_current_hostname() - - # Extract the original server hostname (likely localhost) before replacing - # This is needed for tls-server-name to match the certificate - original_server_match = re.search(r'server:\s+https://([^:]+):(\d+)', kubeconfig_content) - original_hostname = 'localhost' # Default fallback - if original_server_match: - original_hostname = original_server_match.group(1) - - # Replace localhost with the nip.io hostname - kubeconfig_content = re.sub( - r'server:\s+https://localhost:(\d+)', - f'server: https://{nip_hostname}:\\1', - kubeconfig_content - ) - - # Keep the CA certificate fields (certificate-authority-data or certificate-authority) - # They are needed for certificate chain verification - - # Remove insecure-skip-tls-verify if it exists (we'll replace it with tls-server-name) - kubeconfig_content = re.sub( - r'^\s+insecure-skip-tls-verify:\s+.*\n', - '', - kubeconfig_content, - flags=re.MULTILINE - ) - - # Add tls-server-name to verify the CA but allow hostname mismatch - # This tells the client to verify the certificate as if it were issued for the original hostname - # (e.g., localhost), even though we're connecting via nip.io hostname - kubeconfig_content = re.sub( - r'(server:\s+https://[^\n]+\n)', - f'\\1 tls-server-name: {original_hostname}\n', - kubeconfig_content - ) - - # Create a BytesIO object to send as file - kubeconfig_bytes = BytesIO(kubeconfig_content.encode('utf-8')) - kubeconfig_bytes.seek(0) - - return send_file( - kubeconfig_bytes, - as_attachment=True, - download_name='kubeconfig', - mimetype='application/octet-stream' - ) - except Exception as e: - return f"Error reading kubeconfig: {str(e)}", 500 - - -def calculate_age(creation_timestamp): - """Calculate age from Kubernetes timestamp.""" - if not creation_timestamp: - return 'N/A' - - try: - from datetime import datetime, timezone - - # Parse ISO 8601 timestamp - created = datetime.fromisoformat(creation_timestamp.replace('Z', '+00:00')) - now = datetime.now(timezone.utc) - delta = now - created - - # Format age - seconds = int(delta.total_seconds()) - if seconds < 60: - return f'{seconds}s' - elif seconds < 3600: - return f'{seconds // 60}m' - elif seconds < 86400: - hours = seconds // 3600 - minutes = (seconds % 3600) // 60 - return f'{hours}h{minutes}m' if minutes > 0 else f'{hours}h' - else: - days = seconds // 86400 - hours = (seconds % 86400) // 3600 - return f'{days}d{hours}h' if hours > 0 else f'{days}d' - except Exception as e: - print(f"Error calculating age: {e}", file=sys.stderr) - return 'N/A' - - -def get_default_route_ip(): - """Get the IP address of the default route interface.""" - try: - # Get default route - result = subprocess.run( - ['ip', 'route', 'show', 'default'], - capture_output=True, - text=True, - check=True - ) - - # Parse output: "default via X.X.X.X dev ethX ..." - lines = result.stdout.strip().split('\n') - if not lines: - return None - - parts = lines[0].split() - if len(parts) < 5: - return None - - # Find the device name - dev_idx = parts.index('dev') if 'dev' in parts else None - if dev_idx is None or dev_idx + 1 >= len(parts): - return None - - dev_name = parts[dev_idx + 1] - - # Get IP address for this device - result = subprocess.run( - ['ip', '-4', 'addr', 'show', dev_name], - capture_output=True, - text=True, - check=True - ) - - # Parse: " inet 192.168.1.10/24 ..." - for line in result.stdout.split('\n'): - line = line.strip() - if line.startswith('inet '): - ip_with_mask = line.split()[1] - ip = ip_with_mask.split('/')[0] - return ip.replace('.', '-') # Format for nip.io - - return None - except Exception as e: - print(f"Error getting default route IP: {e}", file=sys.stderr) - return None - - -def get_current_hostname(): - """Get the current system hostname.""" - try: - return socket.gethostname() - except Exception as e: - print(f"Error getting hostname: {e}", file=sys.stderr) - return "unknown" - - -def get_jumpstarter_config(): - """Get the current Jumpstarter CR configuration from the cluster.""" - default_ip = get_default_route_ip() - default_base_domain = f"jumpstarter.{default_ip}.nip.io" if default_ip else "jumpstarter.local" - - defaults = { - 'base_domain': default_base_domain, - 'image': 'quay.io/jumpstarter-dev/jumpstarter-controller:latest', - 'image_pull_policy': 'IfNotPresent' - } - - try: - # Path to MicroShift kubeconfig - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return defaults - - # Try to get existing Jumpstarter CR - result = subprocess.run( - ['oc', '--kubeconfig', kubeconfig_path, 'get', 'jumpstarter', 'jumpstarter', '-n', 'default', '-o', 'json'], - capture_output=True, - text=True, - timeout=5 - ) - - if result.returncode == 0: - cr_data = json.loads(result.stdout) - spec = cr_data.get('spec', {}) - controller = spec.get('controller', {}) - - return { - 'base_domain': spec.get('baseDomain', defaults['base_domain']), - 'image': controller.get('image', defaults['image']), - 'image_pull_policy': controller.get('imagePullPolicy', defaults['image_pull_policy']) - } - else: - # CR doesn't exist yet, return defaults - return defaults - - except Exception as e: - print(f"Error getting Jumpstarter config: {e}", file=sys.stderr) - return defaults - - -def set_root_password(password): - """Set the root user password using chpasswd.""" - try: - # Use chpasswd to set password (more reliable than passwd for scripting) - process = subprocess.Popen( - ['chpasswd'], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - stdout, stderr = process.communicate(input=f'root:{password}\n') - - if process.returncode != 0: - error_msg = stderr.strip() if stderr else "Unknown error" - print(f"Error setting root password: {error_msg}", file=sys.stderr) - return False, error_msg - - return True, "Success" - except Exception as e: - print(f"Error setting root password: {e}", file=sys.stderr) - return False, str(e) - - -def get_ssh_authorized_keys(): - """Read existing SSH authorized keys from /root/.ssh/authorized_keys.""" - ssh_dir = Path('/root/.ssh') - authorized_keys_path = ssh_dir / 'authorized_keys' - - if authorized_keys_path.exists(): - try: - with open(authorized_keys_path, 'r') as f: - return f.read().strip() - except Exception as e: - print(f"Error reading authorized_keys: {e}", file=sys.stderr) - return "" - return "" - - -def set_ssh_authorized_keys(keys_content): - """Set SSH authorized keys in /root/.ssh/authorized_keys with proper permissions.""" - ssh_dir = Path('/root/.ssh') - authorized_keys_path = ssh_dir / 'authorized_keys' - - try: - # Create .ssh directory if it doesn't exist - ssh_dir.mkdir(mode=0o700, parents=True, exist_ok=True) - - # Write authorized_keys file - keys_content = keys_content.strip() - if keys_content: - with open(authorized_keys_path, 'w') as f: - f.write(keys_content) - if not keys_content.endswith('\n'): - f.write('\n') - - # Set proper permissions: .ssh directory = 700, authorized_keys = 600 - os.chmod(ssh_dir, 0o700) - os.chmod(authorized_keys_path, 0o600) - - return True, "SSH authorized keys updated successfully" - else: - # If empty, remove the file if it exists - if authorized_keys_path.exists(): - authorized_keys_path.unlink() - # Ensure .ssh directory still has correct permissions - os.chmod(ssh_dir, 0o700) - return True, "SSH authorized keys cleared" - except Exception as e: - print(f"Error setting SSH authorized keys: {e}", file=sys.stderr) - return False, str(e) - - -def update_login_banner(): - """Update the login banner with the web UI URL.""" - try: - default_ip = get_default_route_ip() - if default_ip: - hostname = f"jumpstarter.{default_ip}.nip.io" - port = 8880 - url = f"http://{hostname}:{port}" - - # Format URL line to fit properly in the box (62 chars content width) - url_line = f" → {url}" - - banner = f""" -╔══════════════════════════════════════════════════════════════════╗ -║ ║ -║ Jumpstarter Controller Community Edition ║ -║ Powered by MicroShift ║ -║ ║ -║ Web Configuration UI: ║ -║ {url_line:<64}║ -║ ║ -║ Login with: root / ║ -║ ║ -╚══════════════════════════════════════════════════════════════════╝ - -""" - - # Write to /etc/issue for pre-login banner - with open('/etc/issue', 'w') as f: - f.write(banner) - - return True, "Success" - else: - return False, "Could not determine IP address" - except Exception as e: - print(f"Error updating login banner: {e}", file=sys.stderr) - return False, str(e) - - -def apply_jumpstarter_cr(base_domain, image, image_pull_policy='IfNotPresent'): - """Apply Jumpstarter Custom Resource using oc.""" - try: - # Path to MicroShift kubeconfig - kubeconfig_path = KUBECONFIG_PATH - - # Check if kubeconfig exists - if not os.path.exists(kubeconfig_path): - return False, 'MicroShift kubeconfig not found. Is MicroShift running?' - - # Build the CR YAML - cr = { - 'apiVersion': 'operator.jumpstarter.dev/v1alpha1', - 'kind': 'Jumpstarter', - 'metadata': { - 'name': 'jumpstarter', - 'namespace': 'default' - }, - 'spec': { - 'baseDomain': base_domain, - 'controller': { - 'grpc': { - 'endpoints': [ - { - 'address': f'grpc.{base_domain}', - 'route': { - 'enabled': True - } - } - ] - }, - 'image': image, - 'imagePullPolicy': image_pull_policy, - 'replicas': 1 - }, - 'routers': { - 'grpc': { - 'endpoints': [ - { - 'address': f'router.{base_domain}', - 'route': { - 'enabled': True - } - } - ] - }, - 'image': image, - 'imagePullPolicy': image_pull_policy, - 'replicas': 1 - }, - 'useCertManager': True - } - } - - # Write CR to temporary file - with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f: - yaml_content = json_to_yaml(cr) - f.write(yaml_content) - temp_file = f.name - - try: - # Apply using oc with explicit kubeconfig - result = subprocess.run( - ['oc', '--kubeconfig', kubeconfig_path, 'apply', '-f', temp_file], - capture_output=True, - text=True, - check=True - ) - return True, result.stdout.strip() - finally: - # Clean up temp file - try: - os.unlink(temp_file) - except Exception: - pass - - except subprocess.CalledProcessError as e: - error_msg = e.stderr.strip() if e.stderr else str(e) - print(f"Error applying Jumpstarter CR: {error_msg}", file=sys.stderr) - return False, error_msg - except Exception as e: - print(f"Error applying Jumpstarter CR: {e}", file=sys.stderr) - return False, str(e) - - -def json_to_yaml(obj, indent=0): - """Convert a JSON object to YAML format (simple implementation).""" - lines = [] - indent_str = ' ' * indent - - if isinstance(obj, dict): - for key, value in obj.items(): - if isinstance(value, (dict, list)): - lines.append(f"{indent_str}{key}:") - lines.append(json_to_yaml(value, indent + 1)) - else: - lines.append(f"{indent_str}{key}: {yaml_value(value)}") - elif isinstance(obj, list): - for item in obj: - if isinstance(item, (dict, list)): - lines.append(f"{indent_str}-") - lines.append(json_to_yaml(item, indent + 1)) - else: - lines.append(f"{indent_str}- {yaml_value(item)}") - - return '\n'.join(lines) - +# Create Flask app +app = Flask(__name__) -def yaml_value(value): - """Format a value for YAML output.""" - if value is None: - return 'null' - elif isinstance(value, bool): - return 'true' if value else 'false' - elif isinstance(value, str): - # Quote strings that contain special characters - if ':' in value or '#' in value or value.startswith('-'): - return f'"{value}"' - return value - else: - return str(value) +# Register all routes +register_ui_routes(app) +register_api_routes(app) def main(): diff --git a/deploy/microshift-bootc/config-svc/auth.py b/deploy/microshift-bootc/config-svc/auth.py new file mode 100644 index 00000000..e8f32625 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/auth.py @@ -0,0 +1,139 @@ +"""Authentication and validation utilities for Jumpstarter Configuration UI.""" + +import re +import subprocess +import sys +from functools import wraps + +from flask import request, Response + + +def validate_hostname(hostname): + """ + Validate hostname according to RFC 1123 standards. + + Rules: + - Total length <= 253 characters + - Each label 1-63 characters + - Labels match /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/i (case-insensitive) + - No leading/trailing hyphen in labels + - Reject empty or illegal characters + - Optionally reject trailing dot + + Returns: (is_valid: bool, error_message: str) + """ + if not hostname: + return False, "Hostname cannot be empty" + + # Remove trailing dot if present (optional rejection) + if hostname.endswith('.'): + hostname = hostname.rstrip('.') + + # Check total length + if len(hostname) > 253: + return False, f"Hostname too long: {len(hostname)} characters (maximum 253)" + + # Split into labels + labels = hostname.split('.') + + # Check each label + label_pattern = re.compile(r'^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', re.IGNORECASE) + + for i, label in enumerate(labels): + if not label: + return False, f"Empty label at position {i+1} (consecutive dots not allowed)" + + if len(label) > 63: + return False, f"Label '{label}' too long: {len(label)} characters (maximum 63)" + + if not label_pattern.match(label): + return False, f"Label '{label}' contains invalid characters. Labels must start and end with alphanumeric characters and can contain hyphens in between" + + # Additional check: no leading/trailing hyphen (pattern should catch this, but be explicit) + if label.startswith('-') or label.endswith('-'): + return False, f"Label '{label}' cannot start or end with a hyphen" + + return True, "" + + +def validate_password(password): + """ + Validate password to prevent chpasswd injection and enforce security. + + Rules: + - Reject newline characters ('\n') + - Reject colon characters (':') + - Minimum length: 8 characters + - Maximum length: 128 characters (reasonable limit) + + Returns: (is_valid: bool, error_message: str) + """ + if not password: + return False, "Password cannot be empty" + + # Check for forbidden characters + if '\n' in password: + return False, "Password cannot contain newline characters" + + if ':' in password: + return False, "Password cannot contain colon characters" + + # Check length + if len(password) < 8: + return False, f"Password too short: {len(password)} characters (minimum 8)" + + if len(password) > 128: + return False, f"Password too long: {len(password)} characters (maximum 128)" + + return True, "" + + +def check_auth(username, password): + """Check if a username/password combination is valid using PAM.""" + if username != 'root': + return False + + try: + # Try using PAM authentication first + import pam + p = pam.pam() + return p.authenticate(username, password) + except ImportError: + # Fallback: use subprocess to authenticate via su + try: + result = subprocess.run( + ['su', username, '-c', 'true'], + input=password.encode(), + capture_output=True, + timeout=5 + ) + return result.returncode == 0 + except Exception as e: + print(f"Authentication error: {e}", file=sys.stderr) + return False + + +def is_default_password(): + """Check if the root password is still the default 'jumpstarter'.""" + return check_auth('root', 'jumpstarter') + + +def authenticate(): + """Send a 401 response that enables basic auth.""" + return Response( + 'Authentication required. Please login with root credentials.', + 401, + {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} + ) + + +def requires_auth(f): + """Decorator to require HTTP Basic Authentication.""" + @wraps(f) + def decorated(*args, **kwargs): + auth = request.authorization + if not auth or not check_auth(auth.username, auth.password): + return authenticate() + return f(*args, **kwargs) + return decorated + diff --git a/deploy/microshift-bootc/config-svc/config-svc.service b/deploy/microshift-bootc/config-svc/config-svc.service index 027fef8a..12a64385 100644 --- a/deploy/microshift-bootc/config-svc/config-svc.service +++ b/deploy/microshift-bootc/config-svc/config-svc.service @@ -7,7 +7,7 @@ Before=getty@.service systemd-user-sessions.service [Service] Type=simple -ExecStart=/usr/bin/python3 /usr/local/bin/config-svc +ExecStart=/usr/local/bin/config-svc Restart=on-failure RestartSec=5 Environment="PORT=8880" diff --git a/deploy/microshift-bootc/config-svc/pyproject.toml b/deploy/microshift-bootc/config-svc/pyproject.toml new file mode 100644 index 00000000..e8a67575 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/pyproject.toml @@ -0,0 +1,75 @@ +[project] +name = "jumpstarter-config-svc" +version = "1.0.0" +description = "Jumpstarter Configuration Web UI for MicroShift" +readme = "README.md" +requires-python = ">=3.9" +license = {text = "Apache-2.0"} +authors = [ + {name = "Jumpstarter Contributors"} +] +keywords = ["jumpstarter", "microshift", "kubernetes", "configuration", "web-ui"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: System :: Systems Administration", +] + +dependencies = [ + "flask>=2.3.0,<4.0.0", +] + +[project.optional-dependencies] +auth = [ + "python-pam>=2.0.0", # Optional, falls back to subprocess if not available +] + +[project.scripts] +jumpstarter-config-svc = "app_new:main" + +[project.urls] +Homepage = "https://jumpstarter.dev" +Documentation = "https://docs.jumpstarter.dev" +Repository = "https://github.com/jumpstarter-dev/jumpstarter-controller" +Issues = "https://github.com/jumpstarter-dev/jumpstarter-controller/issues" + +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +packages = ["config-svc"] + +[tool.setuptools.package-data] +config-svc = [ + "templates/*.html", + "templates/*.css", +] + +[tool.black] +line-length = 120 +target-version = ['py39', 'py310', 'py311', 'py312'] + +[tool.isort] +profile = "black" +line_length = 120 + +[tool.pylint] +max-line-length = 120 +disable = [ + "C0111", # missing-docstring + "C0103", # invalid-name +] + +[tool.mypy] +python_version = "3.9" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = false + diff --git a/deploy/microshift-bootc/config-svc/routes.py b/deploy/microshift-bootc/config-svc/routes.py new file mode 100644 index 00000000..b4d3ede2 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/routes.py @@ -0,0 +1,116 @@ +"""Main UI route handlers for Jumpstarter Configuration UI.""" + +from flask import render_template, render_template_string, request, Response +from pathlib import Path + +from auth import requires_auth, is_default_password, validate_hostname +from system import ( + get_current_hostname, get_jumpstarter_config, + get_ssh_authorized_keys, apply_jumpstarter_cr +) + + +def register_ui_routes(app): + """Register all UI routes with the Flask app.""" + + # Load templates once + templates_dir = Path(__file__).parent / 'templates' + + with open(templates_dir / 'password_required.html', 'r') as f: + PASSWORD_REQUIRED_TEMPLATE = f.read() + + with open(templates_dir / 'index.html', 'r') as f: + HTML_TEMPLATE = f.read() + + with open(templates_dir / 'styles.css', 'r') as f: + CSS_CONTENT = f.read() + + @app.route('/static/styles.css') + def serve_css(): + """Serve the consolidated CSS stylesheet.""" + return Response(CSS_CONTENT, mimetype='text/css') + + @app.route('/logout') + def logout(): + """Logout endpoint that forces re-authentication.""" + return Response( + 'Logged out. Please close this dialog to log in again.', + 401, + {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} + ) + + @app.route('/') + @requires_auth + def index(): + """Serve the main configuration page.""" + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + password_required = is_default_password() + ssh_keys = get_ssh_authorized_keys() + + # Force password change if still using default + if password_required: + return render_template_string( + PASSWORD_REQUIRED_TEMPLATE, + messages=[], + current_hostname=current_hostname, + ssh_keys=ssh_keys + ) + + return render_template_string( + HTML_TEMPLATE, + messages=[], + current_hostname=current_hostname, + jumpstarter_config=jumpstarter_config, + password_required=password_required, + ssh_keys=ssh_keys + ) + + @app.route('/configure-jumpstarter', methods=['POST']) + @requires_auth + def configure_jumpstarter(): + """Handle Jumpstarter CR configuration request (legacy HTML form submission).""" + base_domain = request.form.get('baseDomain', '').strip() + image = request.form.get('image', '').strip() + image_pull_policy = request.form.get('imagePullPolicy', 'IfNotPresent').strip() + + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + password_required = is_default_password() + + messages = [] + + if not base_domain: + messages.append({'type': 'error', 'text': 'Base domain is required'}) + else: + # Validate base domain format (same as hostname validation) + domain_valid, domain_error = validate_hostname(base_domain) + if not domain_valid: + messages.append({'type': 'error', 'text': f'Invalid base domain: {domain_error}'}) + elif not image: + messages.append({'type': 'error', 'text': 'Controller image is required'}) + else: + # Apply the Jumpstarter CR + cr_success, cr_message = apply_jumpstarter_cr(base_domain, image, image_pull_policy) + + if cr_success: + msg = f'Jumpstarter configuration applied successfully! Base Domain: {base_domain}, Image: {image}' + messages.append({'type': 'success', 'text': msg}) + # Update config to show what was just applied + jumpstarter_config = { + 'base_domain': base_domain, + 'image': image, + 'image_pull_policy': image_pull_policy + } + else: + messages.append({'type': 'error', 'text': f'Failed to apply Jumpstarter CR: {cr_message}'}) + + return render_template_string( + HTML_TEMPLATE, + messages=messages, + current_hostname=current_hostname, + jumpstarter_config=jumpstarter_config, + password_required=password_required, + ssh_keys=get_ssh_authorized_keys() + ) + diff --git a/deploy/microshift-bootc/config-svc/system.py b/deploy/microshift-bootc/config-svc/system.py new file mode 100644 index 00000000..3abec602 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/system.py @@ -0,0 +1,569 @@ +"""System utility functions for Jumpstarter Configuration UI.""" + +import json +import os +import re +import socket +import subprocess +import sys +import tempfile +from datetime import datetime, timezone +from pathlib import Path + +# MicroShift kubeconfig path +KUBECONFIG_PATH = '/var/lib/microshift/resources/kubeadmin/kubeconfig' + + +def calculate_age(creation_timestamp): + """Calculate age from Kubernetes timestamp.""" + if not creation_timestamp: + return 'N/A' + + try: + # Parse ISO 8601 timestamp + created = datetime.fromisoformat(creation_timestamp.replace('Z', '+00:00')) + now = datetime.now(timezone.utc) + delta = now - created + + # Format age + seconds = int(delta.total_seconds()) + if seconds < 60: + return f'{seconds}s' + elif seconds < 3600: + return f'{seconds // 60}m' + elif seconds < 86400: + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + return f'{hours}h{minutes}m' if minutes > 0 else f'{hours}h' + else: + days = seconds // 86400 + hours = (seconds % 86400) // 3600 + return f'{days}d{hours}h' if hours > 0 else f'{days}d' + except Exception as e: + print(f"Error calculating age: {e}", file=sys.stderr) + return 'N/A' + + +def get_default_route_ip(): + """Get the IP address of the default route interface.""" + try: + # Get default route + result = subprocess.run( + ['ip', 'route', 'show', 'default'], + capture_output=True, + text=True, + check=True + ) + + # Parse output: "default via X.X.X.X dev ethX ..." + lines = result.stdout.strip().split('\n') + if not lines: + return None + + parts = lines[0].split() + if len(parts) < 5: + return None + + # Find the device name + dev_idx = parts.index('dev') if 'dev' in parts else None + if dev_idx is None or dev_idx + 1 >= len(parts): + return None + + dev_name = parts[dev_idx + 1] + + # Get IP address for this device + result = subprocess.run( + ['ip', '-4', 'addr', 'show', dev_name], + capture_output=True, + text=True, + check=True + ) + + # Parse: " inet 192.168.1.10/24 ..." + for line in result.stdout.split('\n'): + line = line.strip() + if line.startswith('inet '): + ip_with_mask = line.split()[1] + ip = ip_with_mask.split('/')[0] + return ip.replace('.', '-') # Format for nip.io + + return None + except Exception as e: + print(f"Error getting default route IP: {e}", file=sys.stderr) + return None + + +def get_current_hostname(): + """Get the current system hostname.""" + try: + return socket.gethostname() + except Exception as e: + print(f"Error getting hostname: {e}", file=sys.stderr) + return "unknown" + + +def get_jumpstarter_config(): + """Get the current Jumpstarter CR configuration from the cluster.""" + default_ip = get_default_route_ip() + default_base_domain = f"jumpstarter.{default_ip}.nip.io" if default_ip else "jumpstarter.local" + + defaults = { + 'base_domain': default_base_domain, + 'image': 'quay.io/jumpstarter-dev/jumpstarter-controller:latest', + 'image_pull_policy': 'IfNotPresent' + } + + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return defaults + + # Try to get existing Jumpstarter CR + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'jumpstarter', 'jumpstarter', '-n', 'default', '-o', 'json'], + capture_output=True, + text=True, + timeout=5 + ) + + if result.returncode == 0: + cr_data = json.loads(result.stdout) + spec = cr_data.get('spec', {}) + controller = spec.get('controller', {}) + + return { + 'base_domain': spec.get('baseDomain', defaults['base_domain']), + 'image': controller.get('image', defaults['image']), + 'image_pull_policy': controller.get('imagePullPolicy', defaults['image_pull_policy']) + } + else: + # CR doesn't exist yet, return defaults + return defaults + + except Exception as e: + print(f"Error getting Jumpstarter config: {e}", file=sys.stderr) + return defaults + + +def set_root_password(password): + """Set the root user password using chpasswd.""" + try: + # Use chpasswd to set password (more reliable than passwd for scripting) + process = subprocess.Popen( + ['chpasswd'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + stdout, stderr = process.communicate(input=f'root:{password}\n') + + if process.returncode != 0: + error_msg = stderr.strip() if stderr else "Unknown error" + print(f"Error setting root password: {error_msg}", file=sys.stderr) + return False, error_msg + + return True, "Success" + except Exception as e: + print(f"Error setting root password: {e}", file=sys.stderr) + return False, str(e) + + +def get_ssh_authorized_keys(): + """Read existing SSH authorized keys from /root/.ssh/authorized_keys.""" + ssh_dir = Path('/root/.ssh') + authorized_keys_path = ssh_dir / 'authorized_keys' + + if authorized_keys_path.exists(): + try: + with open(authorized_keys_path, 'r') as f: + return f.read().strip() + except Exception as e: + print(f"Error reading authorized_keys: {e}", file=sys.stderr) + return "" + return "" + + +def set_ssh_authorized_keys(keys_content): + """Set SSH authorized keys in /root/.ssh/authorized_keys with proper permissions.""" + ssh_dir = Path('/root/.ssh') + authorized_keys_path = ssh_dir / 'authorized_keys' + + try: + # Create .ssh directory if it doesn't exist + ssh_dir.mkdir(mode=0o700, parents=True, exist_ok=True) + + # Write authorized_keys file + keys_content = keys_content.strip() + if keys_content: + with open(authorized_keys_path, 'w') as f: + f.write(keys_content) + if not keys_content.endswith('\n'): + f.write('\n') + + # Set proper permissions: .ssh directory = 700, authorized_keys = 600 + os.chmod(ssh_dir, 0o700) + os.chmod(authorized_keys_path, 0o600) + + return True, "SSH authorized keys updated successfully" + else: + # If empty, remove the file if it exists + if authorized_keys_path.exists(): + authorized_keys_path.unlink() + # Ensure .ssh directory still has correct permissions + os.chmod(ssh_dir, 0o700) + return True, "SSH authorized keys cleared" + except Exception as e: + print(f"Error setting SSH authorized keys: {e}", file=sys.stderr) + return False, str(e) + + +def update_login_banner(): + """Update the login banner with the web UI URL.""" + try: + default_ip = get_default_route_ip() + if default_ip: + hostname = f"jumpstarter.{default_ip}.nip.io" + port = 8880 + url = f"http://{hostname}:{port}" + + # Format URL line to fit properly in the box (62 chars content width) + url_line = f" → {url}" + + banner = f""" +╔══════════════════════════════════════════════════════════════════╗ +║ ║ +║ Jumpstarter Controller Community Edition ║ +║ Powered by MicroShift ║ +║ ║ +║ Web Configuration UI: ║ +║ {url_line:<64}║ +║ ║ +║ Login with: root / ║ +║ ║ +╚══════════════════════════════════════════════════════════════════╝ + +""" + + # Write to /etc/issue for pre-login banner + with open('/etc/issue', 'w') as f: + f.write(banner) + + return True, "Success" + else: + return False, "Could not determine IP address" + except Exception as e: + print(f"Error updating login banner: {e}", file=sys.stderr) + return False, str(e) + + +def apply_jumpstarter_cr(base_domain, image, image_pull_policy='IfNotPresent'): + """Apply Jumpstarter Custom Resource using oc.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return False, 'MicroShift kubeconfig not found. Is MicroShift running?' + + # Build the CR YAML + cr = { + 'apiVersion': 'operator.jumpstarter.dev/v1alpha1', + 'kind': 'Jumpstarter', + 'metadata': { + 'name': 'jumpstarter', + 'namespace': 'default' + }, + 'spec': { + 'baseDomain': base_domain, + 'controller': { + 'grpc': { + 'endpoints': [ + { + 'address': f'grpc.{base_domain}', + 'route': { + 'enabled': True + } + } + ] + }, + 'image': image, + 'imagePullPolicy': image_pull_policy, + 'replicas': 1 + }, + 'routers': { + 'grpc': { + 'endpoints': [ + { + 'address': f'router.{base_domain}', + 'route': { + 'enabled': True + } + } + ] + }, + 'image': image, + 'imagePullPolicy': image_pull_policy, + 'replicas': 1 + }, + 'useCertManager': True + } + } + + # Write CR to temporary file + with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f: + yaml_content = json_to_yaml(cr) + f.write(yaml_content) + temp_file = f.name + + try: + # Apply using oc with explicit kubeconfig + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'apply', '-f', temp_file], + capture_output=True, + text=True, + check=True + ) + return True, result.stdout.strip() + finally: + # Clean up temp file + try: + os.unlink(temp_file) + except Exception: + pass + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + print(f"Error applying Jumpstarter CR: {error_msg}", file=sys.stderr) + return False, error_msg + except Exception as e: + print(f"Error applying Jumpstarter CR: {e}", file=sys.stderr) + return False, str(e) + + +def json_to_yaml(obj, indent=0): + """Convert a JSON object to YAML format (simple implementation).""" + lines = [] + indent_str = ' ' * indent + + if isinstance(obj, dict): + for key, value in obj.items(): + if isinstance(value, (dict, list)): + lines.append(f"{indent_str}{key}:") + lines.append(json_to_yaml(value, indent + 1)) + else: + lines.append(f"{indent_str}{key}: {yaml_value(value)}") + elif isinstance(obj, list): + for item in obj: + if isinstance(item, (dict, list)): + lines.append(f"{indent_str}-") + lines.append(json_to_yaml(item, indent + 1)) + else: + lines.append(f"{indent_str}- {yaml_value(item)}") + + return '\n'.join(lines) + + +def yaml_value(value): + """Format a value for YAML output.""" + if value is None: + return 'null' + elif isinstance(value, bool): + return 'true' if value else 'false' + elif isinstance(value, str): + # Quote strings that contain special characters + if ':' in value or '#' in value or value.startswith('-'): + return f'"{value}"' + return value + else: + return str(value) + + +def get_lvm_pv_info(): + """ + Parse pvscan output to get LVM physical volume information. + Returns dict with PV info or None if not available. + """ + try: + result = subprocess.run(['pvscan'], capture_output=True, text=True, timeout=5) + if result.returncode != 0: + return None + + # Parse output like: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" + # or: "Total: 1 [62.41 GiB] / in use: 1 [62.41 GiB] / in no VG: 0 [0 ]" + output = result.stdout.strip() + if not output: + return None + + lines = output.split('\n') + + # Look for PV line + pv_device = None + vg_name = None + total_size = None + free_size = None + + for line in lines: + line = line.strip() + # Match: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" + if line.startswith('PV '): + parts = line.split() + if len(parts) >= 2: + pv_device = parts[1] + # Find VG name + for i, part in enumerate(parts): + if part == 'VG' and i + 1 < len(parts): + vg_name = parts[i + 1] + break + # Find size info in brackets + bracket_match = re.search(r'\[([^\]]+)\]', line) + if bracket_match: + size_info = bracket_match.group(1) + # Parse "62.41 GiB / 52.41 GiB free" + size_parts = size_info.split('/') + if len(size_parts) >= 1: + total_size = size_parts[0].strip() + if len(size_parts) >= 2: + free_match = re.search(r'([\d.]+)\s*([KMGT]i?B)', size_parts[1]) + if free_match: + free_size = free_match.group(1) + ' ' + free_match.group(2) + + if not pv_device or not total_size: + return None + + # Calculate used space and percentage + # Parse sizes to calculate percentage + def parse_size(size_str): + """Parse size string like '62.41 GiB' to bytes.""" + match = re.match(r'([\d.]+)\s*([KMGT]i?)B?', size_str, re.IGNORECASE) + if not match: + return 0 + value = float(match.group(1)) + unit = match.group(2).upper() + multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} + return int(value * multipliers.get(unit, 1)) + + total_bytes = parse_size(total_size) + free_bytes = parse_size(free_size) if free_size else 0 + used_bytes = total_bytes - free_bytes + percent = int((used_bytes / total_bytes * 100)) if total_bytes > 0 else 0 + + # Format used size + def format_size(bytes_val): + """Format bytes to human-readable size.""" + for unit, multiplier in [('TiB', 1024**4), ('GiB', 1024**3), ('MiB', 1024**2), ('KiB', 1024)]: + if bytes_val >= multiplier: + return f"{bytes_val / multiplier:.2f} {unit}" + return f"{bytes_val} B" + + used_size = format_size(used_bytes) + + return { + 'pv_device': pv_device, + 'vg_name': vg_name or 'N/A', + 'total': total_size, + 'free': free_size or '0 B', + 'used': used_size, + 'percent': percent + } + except Exception as e: + print(f"Error parsing LVM PV info: {e}", file=sys.stderr) + return None + + +def get_root_filesystem(): + """ + Detect the real root filesystem mount point. + On bootc systems, /sysroot is the real root filesystem. + Otherwise, find the largest real block device filesystem. + """ + # Check if /sysroot exists and is a mount point (bootc systems) + try: + result = subprocess.run(['findmnt', '-n', '-o', 'TARGET', '/sysroot'], + capture_output=True, text=True, timeout=5) + if result.returncode == 0 and result.stdout.strip(): + return '/sysroot' + except Exception: + pass + + # Fallback: parse df output to find the real root filesystem + try: + df_result = subprocess.run(['df', '-h'], capture_output=True, text=True, timeout=5) + if df_result.returncode != 0: + return '/' # Fallback to root + + lines = df_result.stdout.strip().split('\n') + if len(lines) < 2: + return '/' # Fallback to root + + # Virtual filesystem types to skip + virtual_fs = ('tmpfs', 'overlay', 'composefs', 'devtmpfs', 'proc', 'sysfs', + 'devpts', 'cgroup', 'pstore', 'bpf', 'tracefs', 'debugfs', + 'configfs', 'fusectl', 'mqueue', 'hugetlbfs', 'efivarfs', 'ramfs', + 'nsfs', 'shm', 'vfat') + + # Boot partitions to skip + boot_paths = ('/boot', '/boot/efi') + + best_fs = None + best_size = 0 + + for line in lines[1:]: # Skip header + parts = line.split() + if len(parts) < 6: + continue + + filesystem = parts[0] + mount_point = parts[5] + size_str = parts[1] + + # Skip virtual filesystems + fs_type = filesystem.split('/')[-1] if '/' in filesystem else filesystem + if any(vfs in fs_type.lower() for vfs in virtual_fs): + continue + + # Skip boot partitions + if mount_point in boot_paths: + continue + + # Skip if not a block device (doesn't start with /dev) + if not filesystem.startswith('/dev'): + continue + + # Prefer LVM root volumes + if '/mapper/' in filesystem and 'root' in filesystem.lower(): + return mount_point + + # Calculate size for comparison (convert to bytes for comparison) + try: + # Parse size like "10G", "500M", etc. + size_val = float(size_str[:-1]) + size_unit = size_str[-1].upper() + if size_unit == 'G': + size_bytes = size_val * 1024 * 1024 * 1024 + elif size_unit == 'M': + size_bytes = size_val * 1024 * 1024 + elif size_unit == 'K': + size_bytes = size_val * 1024 + else: + size_bytes = size_val + + if size_bytes > best_size: + best_size = size_bytes + best_fs = mount_point + except (ValueError, IndexError): + continue + + if best_fs: + return best_fs + + except Exception: + pass + + # Final fallback + return '/' + diff --git a/deploy/microshift-bootc/config-svc/templates/index.html b/deploy/microshift-bootc/config-svc/templates/index.html new file mode 100644 index 00000000..c41fb486 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/templates/index.html @@ -0,0 +1,857 @@ + + + + + + Jumpstarter Configuration + + + + +
+ + + + +
+ {% for msg in messages %} +
{{ msg.text }}
+ {% endfor %} + +
+

Jumpstarter Deployment Configuration

+
+
+
+ + +
The base domain for Jumpstarter routes
+
+
+ + +
The Jumpstarter controller container image to use
+
+
+ + +
When to pull the container image
+
+ +
+
+ +
+

Change Root Password

+
+
+
+ + +
Leave empty to only update SSH keys. Minimum 8 characters if provided.
+
+
+ + +
Re-enter your new password (required if password is provided)
+
+
+ + +
One SSH public key per line. Leave empty to clear existing keys.
+
+ +
+
+ +
+

BootC Operations

+
+
+ + +
Container image reference to switch to (e.g., quay.io/jumpstarter-dev/microshift/bootc:latest)
+
+
+ + + +
+ +

System Information

+
+
Loading system statistics...
+
+ +

BootC Status

+
+
Loading BootC status...
+
+ +

Kernel Log

+
+
Loading kernel log...
+
+
+ +
+
+

Kubeconfig

+

+ Download the MicroShift kubeconfig file to access the Kubernetes cluster from your local machine. +

+ Download Kubeconfig +
+ +
+

Routes

+
+ +
+ + + + + + + + + + + + + + + + + + +
NamespaceNameHostServicePortTLSAdmittedAge
Loading routes...
+
+
+ +
+

Pod Status

+
+ +
+ + + + + + + + + + + + + + + + + +
NamespaceNameReadyStatusRestartsAgeActions
Loading pods...
+
+
+
+
+
+ + + + \ No newline at end of file diff --git a/deploy/microshift-bootc/config-svc/templates/password_required.html b/deploy/microshift-bootc/config-svc/templates/password_required.html new file mode 100644 index 00000000..1ab933b3 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/templates/password_required.html @@ -0,0 +1,134 @@ + + + + + + Password Change Required - Jumpstarter + + + + +
+ + +
+

Security Setup Required

+ + {% for msg in messages %} +
{{ msg.text }}
+ {% endfor %} + +
+

⚠️ Default Password Detected

+

You are using the default password. For security reasons, you must change the root password before accessing the configuration interface.

+
+ +
+
+
+ + +
Minimum 8 characters (required to change from default password)
+
+
+ + +
Re-enter your new password
+
+
+ + +
One SSH public key per line. Leave empty to clear existing keys.
+
+ +
+ +
+
+ + \ No newline at end of file diff --git a/deploy/microshift-bootc/config-svc/templates/styles.css b/deploy/microshift-bootc/config-svc/templates/styles.css new file mode 100644 index 00000000..d2a63df5 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/templates/styles.css @@ -0,0 +1,370 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} +html { + scroll-behavior: smooth; +} +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + background: linear-gradient(135deg, #4c4c4c 0%, #1a1a1a 100%); + min-height: 100vh; + display: flex; + justify-content: center; + align-items: center; + padding: 20px; +} +.container { + background: white; + border-radius: 12px; + box-shadow: 0 10px 60px rgba(0,0,0,0.5), 0 0 0 1px rgba(255, 193, 7, 0.1); + max-width: 1000px; + width: 100%; + padding: 40px; +} +.banner { + margin: -40px -40px 30px -40px; + padding: 25px 40px; + background: linear-gradient(135deg, #757575 0%, #616161 100%); + border-radius: 12px 12px 0 0; + text-align: center; +} +.banner-text { + color: white; + font-size: 14px; + margin-bottom: 20px; + font-weight: 500; +} +.logos { + display: flex; + justify-content: center; + align-items: center; + gap: 40px; + flex-wrap: wrap; +} +.logo-link { + display: inline-block; + transition: opacity 0.3s; +} +.logo-link:hover { + opacity: 0.9; +} +.logo-link img { + height: 45px; + width: auto; +} +.microshift-logo { + height: 40px !important; + filter: brightness(0) invert(1); +} +.jumpstarter-logo { + height: 40px !important; +} +.nav-bar { + display: flex; + gap: 0; + margin: 0 -40px 30px -40px; + border-bottom: 1px solid #e0e0e0; + background: #fafafa; +} +.nav-link { + flex: 1; + text-align: center; + padding: 15px 20px; + text-decoration: none; + color: #666; + font-size: 14px; + font-weight: 500; + transition: all 0.3s; + border-bottom: 3px solid transparent; +} +.nav-link:hover { + background: #f5f5f5; + color: #333; + border-bottom-color: #ffc107; +} +.nav-link.active { + color: #000; + border-bottom-color: #ffc107; + background: white; +} +.content-area { + padding: 0 40px 40px 40px; + margin: 0 -40px -40px -40px; +} +h2 { + color: #333; + font-size: 20px; + margin-bottom: 15px; +} +.section { + display: none; + padding: 20px 0; + animation: fadeIn 0.3s ease-in; +} +@keyframes fadeIn { + from { + opacity: 0; + transform: translateY(10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} +.info { + background: #f8f9fa; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 15px; + font-size: 14px; + color: #555; +} +.info strong { + color: #333; +} +.warning-box { + background: #fff3cd; + border: 1px solid #ffc107; + border-radius: 6px; + padding: 16px; + margin-bottom: 30px; +} +.warning-box h2 { + color: #856404; + font-size: 18px; + margin-bottom: 10px; +} +.warning-box p { + color: #856404; + font-size: 14px; + line-height: 1.5; +} +.form-group { + margin-bottom: 15px; +} +label { + display: block; + margin-bottom: 6px; + color: #555; + font-size: 14px; + font-weight: 500; +} +input[type="text"], +input[type="password"], +textarea { + width: 100%; + padding: 10px 12px; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 14px; + transition: border-color 0.3s, opacity 0.3s; + font-family: inherit; +} +textarea { + font-family: monospace; + resize: vertical; +} +input[type="text"]:focus, +input[type="password"]:focus, +textarea:focus { + outline: none; + border-color: #ffc107; + box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); +} +input[type="text"]:disabled, +input[type="password"]:disabled, +textarea:disabled { + background-color: #f5f5f5; + cursor: not-allowed; + opacity: 0.6; +} +select { + width: 100%; + padding: 10px 12px; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 14px; + background-color: white; + cursor: pointer; + transition: border-color 0.3s; +} +select:focus { + outline: none; + border-color: #ffc107; + box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); +} +.hint { + font-size: 12px; + color: #888; + margin-top: 4px; +} +button { + background: #ffc107; + color: #000; + border: none; + padding: 12px 24px; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + cursor: pointer; + transition: background 0.3s, opacity 0.3s; +} +button:hover { + background: #ffb300; +} +button:disabled { + background: #666; + color: #999; + cursor: not-allowed; + opacity: 0.6; +} +button:disabled:hover { + background: #666; +} +button[type="submit"] { + width: 100%; +} +.download-btn { + background: #ffc107; + display: inline-block; + text-decoration: none; + color: #000; + padding: 12px 24px; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + transition: background 0.3s; +} +.download-btn:hover { + background: #ffb300; +} +.message { + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 20px; + font-size: 14px; +} +.message.success { + background: #d4edda; + color: #155724; + border: 1px solid #c3e6cb; +} +.message.error { + background: #f8d7da; + color: #721c24; + border: 1px solid #f5c6cb; +} +.message.info { + background: #d1ecf1; + color: #0c5460; + border: 1px solid #bee5eb; +} +/* MicroShift page specific styles */ +.status-badge { + display: inline-block; + padding: 4px 8px; + border-radius: 4px; + font-size: 11px; + font-weight: 600; + text-transform: uppercase; +} +.status-running { + background: #d4edda; + color: #155724; +} +.status-pending { + background: #fff3cd; + color: #856404; +} +.status-failed { + background: #f8d7da; + color: #721c24; +} +.status-succeeded { + background: #d1ecf1; + color: #0c5460; +} +.status-crashloopbackoff { + background: #f8d7da; + color: #721c24; +} +.status-terminating { + background: #ffeaa7; + color: #856404; +} +.status-unknown { + background: #e2e3e5; + color: #383d41; +} +table { + width: 100%; + border-collapse: collapse; + margin-top: 20px; + font-size: 13px; +} +th { + background: #f8f9fa; + padding: 12px 8px; + text-align: left; + font-weight: 600; + color: #333; + border-bottom: 2px solid #dee2e6; + position: sticky; + top: 0; + z-index: 10; +} +td { + padding: 10px 8px; + border-bottom: 1px solid #eee; + color: #555; +} +tr:hover { + background: #f8f9fa; +} +.table-wrapper { + overflow-x: auto; + max-height: 70vh; + overflow-y: auto; +} +.loading { + text-align: center; + padding: 40px; + color: #666; +} +.error { + background: #f8d7da; + color: #721c24; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 20px; +} +.pod-count { + color: #666; + font-size: 14px; + margin-bottom: 10px; +} +.microshift-section { + margin-bottom: 30px; + padding-bottom: 30px; + border-bottom: 1px solid #eee; +} +.microshift-section:last-child { + border-bottom: none; +} +.action-icon { + text-decoration: none; + font-size: 18px; + padding: 4px 6px; + margin: 0 2px; + border-radius: 4px; + transition: all 0.3s; + display: inline-block; + cursor: pointer; +} +.action-icon:hover { + background: #fff3e0; + transform: scale(1.2); +} + diff --git a/deploy/microshift-bootc/config-svc/update-banner.sh b/deploy/microshift-bootc/config-svc/update-banner.sh index b9aa7edf..0b543d8b 100644 --- a/deploy/microshift-bootc/config-svc/update-banner.sh +++ b/deploy/microshift-bootc/config-svc/update-banner.sh @@ -3,34 +3,30 @@ import sys import os -import importlib.util -import importlib.machinery -def update_login_banner(): -config_svc_path = '/usr/local/bin/config-svc' -if not os.path.exists(config_svc_path): - print(f"Error: {config_svc_path} does not exist", file=sys.stderr) - sys.exit(1) +# Add config-svc module directory to Python path +sys.path.insert(0, '/usr/local/lib/config-svc') -# Try to create spec with explicit loader for files without .py extension try: - # Use SourceFileLoader explicitly for files without .py extension - loader = importlib.machinery.SourceFileLoader('config_svc', config_svc_path) - spec = importlib.util.spec_from_loader('config_svc', loader) + # Import the system module which contains update_login_banner() + from system import update_login_banner - if spec is None: - print(f"Error: Failed to create spec for {config_svc_path}", file=sys.stderr) - sys.exit(1) + # Call the update function + success, message = update_login_banner() - if spec.loader is None: - print(f"Error: Failed to get loader for {config_svc_path}", file=sys.stderr) + if not success: + print(f"Warning: Failed to update login banner: {message}", file=sys.stderr) sys.exit(1) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - module.update_login_banner() + print("Login banner updated successfully", file=sys.stdout) + sys.exit(0) + +except ImportError as e: + print(f"Error: Failed to import system module: {e}", file=sys.stderr) + print("Make sure config-svc modules are installed at /usr/local/lib/config-svc/", file=sys.stderr) + sys.exit(1) except Exception as e: - print(f"Error loading or executing {config_svc_path}: {e}", file=sys.stderr) + print(f"Error updating login banner: {e}", file=sys.stderr) sys.exit(1)