diff --git a/roles/ocp4_workload_quay_operator/defaults/main.yml b/roles/ocp4_workload_quay_operator/defaults/main.yml index 7213105..bd63e34 100644 --- a/roles/ocp4_workload_quay_operator/defaults/main.yml +++ b/roles/ocp4_workload_quay_operator/defaults/main.yml @@ -119,3 +119,31 @@ ocp4_workload_quay_operator_registry_enable_clairpostgres: false # Startup probe override (disabled by default) ocp4_workload_quay_operator_registry_startup_probe_update: false ocp4_workload_quay_operator_registry_startup_probe_failure_threshold: 30 + +# -------------------------------- +# Storage Backend Configuration +# -------------------------------- +# Set to true to use S4 storage, false to use Noobaa (OCS) +# Default: false (uses Noobaa/OpenShift Container Storage) +ocp4_workload_quay_operator_s4_storage_enabled: false + +# -------------------------------- +# S3 Storage Configuration (S4) +# -------------------------------- +# Only used when ocp4_workload_quay_operator_s4_storage_enabled: true +# Quay uses S4 storage for object storage backend + +# S4 S3 endpoint configuration +ocp4_workload_quay_operator_s4_namespace: s4 +ocp4_workload_quay_operator_s4_bucket_name: quay-registry + +# S4 S3 credentials (should match S4 deployment) +ocp4_workload_quay_operator_s4_access_key: s4admin +ocp4_workload_quay_operator_s4_secret_key: s4secret + +# S3 endpoint - uses internal service endpoint +# Format: http://s4..svc.cluster.local:7480 +ocp4_workload_quay_operator_s4_endpoint: "http://s4.{{ ocp4_workload_quay_operator_s4_namespace }}.svc.cluster.local:7480" + +# S3 region +ocp4_workload_quay_operator_s4_region: us-east-1 diff --git a/roles/ocp4_workload_quay_operator/readme.adoc b/roles/ocp4_workload_quay_operator/readme.adoc index d0e5d07..00d591f 100644 --- a/roles/ocp4_workload_quay_operator/readme.adoc +++ b/roles/ocp4_workload_quay_operator/readme.adoc @@ -5,7 +5,8 @@ * This role can ** install the Quay Operator ** use a previously installed Red Hat Quay Operator to deploy a Quay Registry into an OpenShift Cluster. -* The cluster *must* have OpenShift Container Storage installed. +* By default, the cluster *must* have OpenShift Container Storage (Noobaa) installed. +* Optionally, S4 S3-compatible storage can be used instead of Noobaa. * The role consists of the following tasks files: ** Tasks: link:./tasks/pre_workload.yml[pre_workload.yml] - Sets up an @@ -26,6 +27,68 @@ *** This role removes the Red Hat Quay Registry project (and therefore Red Hat Quay Registry) *** Debug task will print out: `remove_workload Tasks completed successfully.` +== Prerequisites + +=== Storage Backend Options + +This role supports two storage backends for Quay: + +1. **Noobaa/OpenShift Container Storage** (default) +2. **S4 Storage** (optional, S3-compatible) + +==== Option 1: Noobaa Storage (Default) + +The cluster *must* have OpenShift Container Storage installed with Noobaa. + +[source,yaml] +---- +workloads: +- agnosticd.core_workloads.ocp4_workload_quay_operator + +# Uses default storage (Noobaa/OCS) +# No additional configuration required +---- + +==== Option 2: S4 Storage (Optional) + +To use S4 storage instead of Noobaa, S4 must be deployed first using the `ocp4_workload_s4` role. + +[source,yaml] +---- +workloads: +- agnosticd.core_workloads.ocp4_workload_s4 +- agnosticd.core_workloads.ocp4_workload_quay_operator + +# Enable S4 storage backend +ocp4_workload_quay_operator_s4_storage_enabled: true + +# S4 configuration +ocp4_workload_s4_buckets: +- quay-registry # Required bucket for Quay + +# S4 credentials (must match Quay configuration) +ocp4_workload_s4_access_key_id: s4admin +ocp4_workload_s4_secret_access_key: s4secret + +# Quay S4 settings (optional, uses defaults if not specified) +ocp4_workload_quay_operator_s4_namespace: s4 +ocp4_workload_quay_operator_s4_bucket_name: quay-registry +ocp4_workload_quay_operator_s4_access_key: s4admin +ocp4_workload_quay_operator_s4_secret_key: s4secret +---- + +=== S4 Storage Configuration Variables + +When using S4 storage (`ocp4_workload_quay_operator_s4_storage_enabled: true`), these variables control the S3 storage backend: + +* `ocp4_workload_quay_operator_s4_storage_enabled`: Enable S4 storage backend (default: `false`) +* `ocp4_workload_quay_operator_s4_namespace`: Namespace where S4 is deployed (default: `s4`) +* `ocp4_workload_quay_operator_s4_bucket_name`: S3 bucket name for Quay (default: `quay-registry`) +* `ocp4_workload_quay_operator_s4_access_key`: S3 access key (default: `s4admin`) +* `ocp4_workload_quay_operator_s4_secret_key`: S3 secret key (default: `s4secret`) +* `ocp4_workload_quay_operator_s4_endpoint`: S3 endpoint URL (default: auto-configured internal endpoint) +* `ocp4_workload_quay_operator_s4_region`: S3 region (default: `us-east-1`) + == Review the defaults variable file * This file link:./defaults/main.yml[./defaults/main.yml] contains all the variables you need to define to control the deployment of your workload. diff --git a/roles/ocp4_workload_quay_operator/tasks/workload.yml b/roles/ocp4_workload_quay_operator/tasks/workload.yml index 8020534..cd494dd 100644 --- a/roles/ocp4_workload_quay_operator/tasks/workload.yml +++ b/roles/ocp4_workload_quay_operator/tasks/workload.yml @@ -1,18 +1,37 @@ --- -# Quay needs OpenShift Container Storage (Noobaa in particular) -# Check that the correct storage class exists on the cluster -- name: Retrieve Bucket Class - kubernetes.core.k8s_info: - api_version: noobaa.io/v1alpha1 - kind: BucketClass - namespace: openshift-storage - register: r_bucket_class - -- name: Assert that there is a Bucket Storage Class - ansible.builtin.assert: - that: - - r_bucket_class.resources | length == 1 - fail_msg: Quay must be installed on a cluster with OpenShift Container Storage configured - and a Bucket Class deployed. +# Validate storage backend based on configuration +- name: Validate storage backend + block: + - name: Check S4 service exists + when: ocp4_workload_quay_operator_s4_storage_enabled | bool + kubernetes.core.k8s_info: + api_version: v1 + kind: Service + name: s4 + namespace: "{{ ocp4_workload_quay_operator_s4_namespace }}" + register: r_s4_service + + - name: Assert that S4 service exists + when: ocp4_workload_quay_operator_s4_storage_enabled | bool + ansible.builtin.assert: + that: + - r_s4_service.resources | length == 1 + fail_msg: "S4 storage service not found in namespace {{ ocp4_workload_quay_operator_s4_namespace }}. Deploy S4 first using ocp4_workload_s4 role." + + - name: Check Noobaa BucketClass exists + when: not (ocp4_workload_quay_operator_s4_storage_enabled | bool) + kubernetes.core.k8s_info: + api_version: noobaa.io/v1alpha1 + kind: BucketClass + namespace: openshift-storage + register: r_bucket_class + + - name: Assert that Noobaa BucketClass exists + when: not (ocp4_workload_quay_operator_s4_storage_enabled | bool) + ansible.builtin.assert: + that: + - r_bucket_class.resources | length == 1 + fail_msg: "Quay must be installed on a cluster with OpenShift Container Storage configured - and a Bucket Class deployed." - name: Install Quay Operator when: ocp4_workload_quay_operator_install_operator | bool diff --git a/roles/ocp4_workload_quay_operator/templates/config.yaml.j2 b/roles/ocp4_workload_quay_operator/templates/config.yaml.j2 index 394ef0e..08800d5 100644 --- a/roles/ocp4_workload_quay_operator/templates/config.yaml.j2 +++ b/roles/ocp4_workload_quay_operator/templates/config.yaml.j2 @@ -6,3 +6,19 @@ SUPER_USERS: - {{ ocp4_workload_quay_operator_registry_admin_user }} FEATURE_USER_INITIALIZE: true {% endif %} +{% if ocp4_workload_quay_operator_s4_storage_enabled | bool %} +# S4 Storage Configuration +DISTRIBUTED_STORAGE_CONFIG: + s4storage: + - RadosGWStorage + - hostname: s4.{{ ocp4_workload_quay_operator_s4_namespace }}.svc.cluster.local + port: 7480 + is_secure: false + bucket_name: {{ ocp4_workload_quay_operator_s4_bucket_name }} + storage_path: /datastorage/registry + access_key: {{ ocp4_workload_quay_operator_s4_access_key }} + secret_key: {{ ocp4_workload_quay_operator_s4_secret_key }} +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: +- s4storage +{% endif %} diff --git a/roles/ocp4_workload_quay_operator/templates/quay_registry.yaml.j2 b/roles/ocp4_workload_quay_operator/templates/quay_registry.yaml.j2 index 56c68f5..8b246f2 100644 --- a/roles/ocp4_workload_quay_operator/templates/quay_registry.yaml.j2 +++ b/roles/ocp4_workload_quay_operator/templates/quay_registry.yaml.j2 @@ -10,7 +10,7 @@ spec: - kind: postgres managed: true - kind: objectstorage - managed: true + managed: {{ 'false' if ocp4_workload_quay_operator_s4_storage_enabled | bool else 'true' }} - kind: redis managed: true - kind: tls diff --git a/roles/ocp4_workload_s4/defaults/main.yml b/roles/ocp4_workload_s4/defaults/main.yml new file mode 100644 index 0000000..f0475c3 --- /dev/null +++ b/roles/ocp4_workload_s4/defaults/main.yml @@ -0,0 +1,107 @@ +--- +# S4 (Super Simple Storage Service) workload configuration + +# -------------------------------------------------- +# Namespace configuration +# -------------------------------------------------- +ocp4_workload_s4_namespace: s4 +ocp4_workload_s4_namespace_create: true + +# -------------------------------------------------- +# ArgoCD Application configuration +# -------------------------------------------------- +ocp4_workload_s4_application_name: s4 +ocp4_workload_s4_gitops_namespace: openshift-gitops + +# Helm chart configuration +ocp4_workload_s4_chart_repo: https://github.com/rh-aiservices-bu/s4 +ocp4_workload_s4_chart_revision: v0.3.2 +ocp4_workload_s4_chart_path: charts/s4 + +# -------------------------------------------------- +# S4 Deployment configuration +# -------------------------------------------------- +# Image configuration +ocp4_workload_s4_image_repository: quay.io/rh-aiservices-bu/s4 +ocp4_workload_s4_image_tag: 0.3.2 +ocp4_workload_s4_image_pull_policy: IfNotPresent + +# S3 credentials +ocp4_workload_s4_access_key_id: s4admin +ocp4_workload_s4_secret_access_key: s4secret + +# Authentication configuration +# Username and password can be passed in as parameters. +# If not provided, username defaults to 'admin' and password is auto-generated. +ocp4_workload_s4_auth_enabled: true +ocp4_workload_s4_auth_username: admin # Default username (can be overridden) +ocp4_workload_s4_auth_password: "" # Pass in a password or leave empty for auto-generation +ocp4_workload_s4_auth_password_length: 16 # Length of auto-generated password +ocp4_workload_s4_auth_jwt_expiration_hours: 8 +ocp4_workload_s4_auth_cookie_require_https: true + +# -------------------------------------------------- +# Storage configuration +# -------------------------------------------------- +# RGW data volume (Ceph/SQLite storage) +ocp4_workload_s4_storage_data_size: 10Gi +ocp4_workload_s4_storage_data_storage_class: "" # Empty for default +ocp4_workload_s4_storage_data_access_mode: ReadWriteOnce + +# Local storage volume (optional, for local file browser) +ocp4_workload_s4_storage_local_enabled: false +ocp4_workload_s4_storage_local_size: 50Gi +ocp4_workload_s4_storage_local_storage_class: "" +ocp4_workload_s4_storage_local_access_mode: ReadWriteOnce +ocp4_workload_s4_storage_local_paths: "" # Comma-separated paths + +# Storage limits +ocp4_workload_s4_storage_max_file_size_gb: 20 +ocp4_workload_s4_storage_max_concurrent_transfers: 2 + +# -------------------------------------------------- +# Resource configuration +# -------------------------------------------------- +ocp4_workload_s4_resources_requests_cpu: 250m +ocp4_workload_s4_resources_requests_memory: 512Mi +ocp4_workload_s4_resources_limits_cpu: 2000m +ocp4_workload_s4_resources_limits_memory: 2Gi + +# -------------------------------------------------- +# Route configuration +# -------------------------------------------------- +ocp4_workload_s4_route_enabled: true +ocp4_workload_s4_route_host: "" # Auto-generated if empty +ocp4_workload_s4_route_tls_termination: edge +ocp4_workload_s4_route_tls_insecure_policy: Redirect + +# S3 API Route (enabled by default for S3 API access) +ocp4_workload_s4_route_s3_api_enabled: true +ocp4_workload_s4_route_s3_api_host: "" # Auto-generated if empty + +# -------------------------------------------------- +# Bucket creation configuration +# -------------------------------------------------- +# List of S3 buckets to create automatically +# If empty or not defined, no buckets will be created +# Example: ['datasets', 'ml-models', 'backups'] +ocp4_workload_s4_buckets: [] + +# -------------------------------------------------- +# ArgoCD sync configuration +# -------------------------------------------------- +ocp4_workload_s4_sync_policy_automated: true +ocp4_workload_s4_sync_policy_self_heal: true +ocp4_workload_s4_sync_policy_prune: true +ocp4_workload_s4_sync_retry_limit: 5 + +# -------------------------------------------------- +# User info configuration +# -------------------------------------------------- +ocp4_workload_s4_enable_user_info_messages: true +ocp4_workload_s4_enable_user_info_data: true + +# -------------------------------------------------- +# Wait for deployment +# -------------------------------------------------- +ocp4_workload_s4_wait_for_deployment: true diff --git a/roles/ocp4_workload_s4/meta/main.yml b/roles/ocp4_workload_s4/meta/main.yml new file mode 100644 index 0000000..aeaf302 --- /dev/null +++ b/roles/ocp4_workload_s4/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + role_name: ocp4_workload_s4 + author: Red Hat GPTE + description: | + Deploy S4 (Super Simple Storage Service) on OpenShift using GitOps. + S4 provides S3-compatible object storage with a modern web UI. + license: MIT + min_ansible_version: "2.9" + platforms: [] + galaxy_tags: + - ocp + - openshift + - s3 + - storage + - gitops + - argocd +dependencies: [] diff --git a/roles/ocp4_workload_s4/readme.adoc b/roles/ocp4_workload_s4/readme.adoc new file mode 100644 index 0000000..4862d55 --- /dev/null +++ b/roles/ocp4_workload_s4/readme.adoc @@ -0,0 +1,242 @@ += ocp4_workload_s4 - Deploy S4 Storage Service + +== Role overview + +* This role deploys S4 (Super Simple Storage Service) on OpenShift using GitOps/ArgoCD +* S4 provides S3-compatible object storage with a modern web UI +* Includes Ceph RADOS Gateway (RGW) backed by filesystem storage +* Supports optional automatic S3 bucket creation + +== Features + +* GitOps-based deployment using OpenShift GitOps (ArgoCD) +* S3-compatible API endpoint (port 7480) +* Modern React-based web UI (port 5000) +* Optional authentication for web UI +* Configurable persistent storage +* Optional automatic bucket creation +* OpenShift Route integration for external access +* Production-ready resource limits and health checks + +== Deployment + +This role assumes OpenShift GitOps is already installed in the `openshift-gitops` namespace. + +The role performs the following tasks: + +* Creates the S4 namespace (default: `s4`) +* Deploys an ArgoCD Application resource pointing to the S4 Helm chart +* Waits for the Application to sync and become healthy +* Optionally creates S3 bucket(s) using Kubernetes Jobs +* Reports access information to users (URLs, credentials) + +== Variables + +=== Namespace Configuration + +* `ocp4_workload_s4_namespace`: Namespace for S4 deployment (default: `s4`) +* `ocp4_workload_s4_namespace_create`: Create the namespace (default: `true`) + +=== ArgoCD Configuration + +* `ocp4_workload_s4_application_name`: ArgoCD Application name (default: `s4`) +* `ocp4_workload_s4_gitops_namespace`: OpenShift GitOps namespace (default: `openshift-gitops`) +* `ocp4_workload_s4_chart_repo`: Helm chart repository URL (default: `https://github.com/rh-aiservices-bu/s4`) +* `ocp4_workload_s4_chart_revision`: Git branch/tag (default: `main`) +* `ocp4_workload_s4_chart_path`: Path to chart in repo (default: `charts/s4`) + +=== S3 Credentials + +* `ocp4_workload_s4_access_key_id`: S3 access key (default: `s4admin`) +* `ocp4_workload_s4_secret_access_key`: S3 secret key (default: `s4secret`) + +=== Authentication + +Username and password can be passed in as parameters. If not provided, username defaults to `admin` and password is auto-generated. + +* `ocp4_workload_s4_auth_enabled`: Enable web UI authentication (default: `true`) +* `ocp4_workload_s4_auth_username`: UI username (default: `admin`, can be overridden) +* `ocp4_workload_s4_auth_password`: UI password (pass in custom password or leave empty for auto-generation) +* `ocp4_workload_s4_auth_password_length`: Length of auto-generated password (default: `16`) +* `ocp4_workload_s4_auth_jwt_expiration_hours`: JWT token expiration (default: `8`) + +=== Storage Configuration + +* `ocp4_workload_s4_storage_data_size`: RGW data volume size (default: `10Gi`) +* `ocp4_workload_s4_storage_data_storage_class`: Storage class for data (default: empty = default class) +* `ocp4_workload_s4_storage_local_enabled`: Enable local storage volume (default: `false`) +* `ocp4_workload_s4_storage_local_size`: Local storage volume size (default: `50Gi`) +* `ocp4_workload_s4_storage_max_file_size_gb`: Max file size in GB (default: `20`) + +=== Resource Limits + +* `ocp4_workload_s4_resources_requests_cpu`: CPU request (default: `250m`) +* `ocp4_workload_s4_resources_requests_memory`: Memory request (default: `512Mi`) +* `ocp4_workload_s4_resources_limits_cpu`: CPU limit (default: `2000m`) +* `ocp4_workload_s4_resources_limits_memory`: Memory limit (default: `2Gi`) + +=== Route Configuration + +* `ocp4_workload_s4_route_enabled`: Enable OpenShift Route for UI (default: `true`) +* `ocp4_workload_s4_route_host`: Custom hostname (default: auto-generated) +* `ocp4_workload_s4_route_tls_termination`: TLS termination (default: `edge`) +* `ocp4_workload_s4_route_s3_api_enabled`: Enable Route for S3 API (default: `true`) + +=== Bucket Creation + +* `ocp4_workload_s4_buckets`: List of S3 bucket names to create (default: `[]`) +** If empty or not defined, no buckets will be created +** Example: `['datasets', 'ml-models', 'backups']` + +=== Sync Policy + +* `ocp4_workload_s4_sync_policy_automated`: Enable automated sync (default: `true`) +* `ocp4_workload_s4_sync_policy_self_heal`: Enable self-healing (default: `true`) +* `ocp4_workload_s4_sync_policy_prune`: Enable resource pruning (default: `true`) + +== Example Usage + +=== Basic Deployment + +[source,yaml] +---- +- name: Deploy S4 + hosts: localhost + tasks: + - name: Include S4 workload + include_role: + name: ocp4_workload_s4 +---- + +=== With Bucket Creation + +[source,yaml] +---- +- name: Deploy S4 with buckets + hosts: localhost + tasks: + - name: Include S4 workload + include_role: + name: ocp4_workload_s4 + vars: + ocp4_workload_s4_buckets: + - my-data-bucket + - ml-models + - datasets + - backups +---- + +=== Custom Configuration + +[source,yaml] +---- +- name: Deploy S4 with custom settings + hosts: localhost + tasks: + - name: Include S4 workload + include_role: + name: ocp4_workload_s4 + vars: + ocp4_workload_s4_namespace: object-storage + ocp4_workload_s4_storage_data_size: 100Gi + ocp4_workload_s4_storage_data_storage_class: ocs-storagecluster-ceph-rbd + ocp4_workload_s4_access_key_id: my-access-key + ocp4_workload_s4_secret_access_key: my-secret-key + ocp4_workload_s4_route_s3_api_enabled: true + ocp4_workload_s4_buckets: + - production-data +---- + +=== Custom Authentication Credentials + +[source,yaml] +---- +- name: Deploy S4 with custom credentials + hosts: localhost + tasks: + - name: Include S4 workload + include_role: + name: ocp4_workload_s4 + vars: + ocp4_workload_s4_auth_username: myuser + ocp4_workload_s4_auth_password: MySecureP@ssw0rd + ocp4_workload_s4_access_key_id: custom-access-key + ocp4_workload_s4_secret_access_key: custom-secret-key-123456 +---- + +== Accessing S4 + +After deployment, users will receive: + +* *Web UI URL*: Access the modern web interface +* *UI Credentials*: Username and password for UI login +* *S3 Access Key*: For S3 API authentication +* *S3 Secret Key*: For S3 API authentication +* *S3 Endpoint (internal)*: For pod-to-pod communication +* *S3 Endpoint (external)*: If S3 route is enabled + +== Using S3 API + +You can use any S3-compatible client (AWS CLI, boto3, mc, etc.): + +[source,bash] +---- +# Using AWS CLI +aws configure set aws_access_key_id s4admin +aws configure set aws_secret_access_key s4secret +aws --endpoint-url https://s3-route-url s3 ls + +# Using MinIO Client (mc) +mc alias set mys4 https://s3-route-url s4admin s4secret +mc ls mys4 +mc mb mys4/mybucket +mc cp file.txt mys4/mybucket/ +---- + +== Removal + +To remove the S4 deployment: + +[source,yaml] +---- +- name: Remove S4 + hosts: localhost + tasks: + - name: Include S4 workload + include_role: + name: ocp4_workload_s4 + vars: + ACTION: destroy +---- + +This will: + +* Delete bucket creation jobs +* Delete the ArgoCD Application (which removes all S4 resources) +* Delete the S4 namespace + +NOTE: Bucket data is stored in the persistent volumes. Removing the workload will delete the PVCs and all data unless you have external backups. + +== Predefined Variables + +The following variables are always available: + +* `openshift_console_url`: The URL of the OpenShift console +* `openshift_api_url`: The OpenShift API endpoint URL +* `openshift_cluster_ingress_domain`: The domain used for ingress controllers + +== S4 Architecture + +S4 combines several components in a single container: + +* *Ceph RADOS Gateway (RGW)*: Provides S3-compatible API +* *SQLite Backend*: Lightweight metadata storage (single replica only) +* *React Web UI*: Modern interface for file management +* *File Browser*: Browse and manage files/objects + +The deployment uses: + +* Kubernetes Deployment (1 replica - SQLite limitation) +* PersistentVolumeClaim for RGW data storage +* OpenShift Route for external access +* Service for internal cluster access diff --git a/roles/ocp4_workload_s4/tasks/main.yml b/roles/ocp4_workload_s4/tasks/main.yml new file mode 100644 index 0000000..23308e3 --- /dev/null +++ b/roles/ocp4_workload_s4/tasks/main.yml @@ -0,0 +1,11 @@ +--- +# -------------------------------------------------- +# Do not modify this file +# -------------------------------------------------- +- name: Running workload provision tasks + when: ACTION == "provision" + ansible.builtin.include_tasks: workload.yml + +- name: Running workload removal tasks + when: ACTION == "destroy" + ansible.builtin.include_tasks: remove_workload.yml diff --git a/roles/ocp4_workload_s4/tasks/remove_workload.yml b/roles/ocp4_workload_s4/tasks/remove_workload.yml new file mode 100644 index 0000000..e0488f3 --- /dev/null +++ b/roles/ocp4_workload_s4/tasks/remove_workload.yml @@ -0,0 +1,51 @@ +--- +# -------------------------------------------------- +# S4 Workload Removal Tasks +# -------------------------------------------------- + +- name: Delete bucket creation jobs + kubernetes.core.k8s: + state: absent + api_version: batch/v1 + kind: Job + namespace: "{{ ocp4_workload_s4_namespace }}" + label_selectors: + - "app.kubernetes.io/component=bucket-creator" + +- name: Delete ArgoCD Application + kubernetes.core.k8s: + state: absent + api_version: argoproj.io/v1alpha1 + kind: Application + name: "{{ ocp4_workload_s4_application_name }}" + namespace: "{{ ocp4_workload_s4_gitops_namespace }}" + +- name: Wait for Application deletion + kubernetes.core.k8s_info: + api_version: argoproj.io/v1alpha1 + kind: Application + name: "{{ ocp4_workload_s4_application_name }}" + namespace: "{{ ocp4_workload_s4_gitops_namespace }}" + register: r_application + retries: 30 + delay: 10 + until: r_application.resources | length == 0 + +- name: Delete S4 namespace + when: ocp4_workload_s4_namespace_create | bool + kubernetes.core.k8s: + state: absent + api_version: v1 + kind: Namespace + name: "{{ ocp4_workload_s4_namespace }}" + +- name: Wait for namespace deletion + when: ocp4_workload_s4_namespace_create | bool + kubernetes.core.k8s_info: + api_version: v1 + kind: Namespace + name: "{{ ocp4_workload_s4_namespace }}" + register: r_namespace + retries: 60 + delay: 5 + until: r_namespace.resources | length == 0 diff --git a/roles/ocp4_workload_s4/tasks/workload.yml b/roles/ocp4_workload_s4/tasks/workload.yml new file mode 100644 index 0000000..c0a6d86 --- /dev/null +++ b/roles/ocp4_workload_s4/tasks/workload.yml @@ -0,0 +1,177 @@ +--- +# -------------------------------------------------- +# S4 Workload Deployment Tasks +# -------------------------------------------------- + +- name: Set internal variables + ansible.builtin.set_fact: + _ocp4_workload_s4_auth_password: >- + {{ ocp4_workload_s4_auth_password + if ocp4_workload_s4_auth_password | length > 0 + else lookup('password', '/dev/null length={{ ocp4_workload_s4_auth_password_length }} chars=ascii_letters,digits') }} + _ocp4_workload_s4_route_url: "" + _ocp4_workload_s4_s3_api_endpoint_internal: "http://s4.{{ ocp4_workload_s4_namespace }}.svc.cluster.local:7480" + +- name: Create S4 namespace + when: ocp4_workload_s4_namespace_create | bool + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Namespace + metadata: + name: "{{ ocp4_workload_s4_namespace }}" + labels: + argocd.argoproj.io/managed-by: "{{ ocp4_workload_s4_gitops_namespace }}" + +- name: Create ArgoCD Application for S4 + kubernetes.core.k8s: + state: present + definition: "{{ lookup('template', 'application.yaml.j2') | from_yaml }}" + +- name: Wait for S4 Application to sync + when: ocp4_workload_s4_wait_for_deployment | bool + kubernetes.core.k8s_info: + api_version: argoproj.io/v1alpha1 + kind: Application + name: "{{ ocp4_workload_s4_application_name }}" + namespace: "{{ ocp4_workload_s4_gitops_namespace }}" + register: r_application + retries: 60 + delay: 10 + until: + - r_application.resources is defined + - r_application.resources | length > 0 + - r_application.resources[0].status is defined + - r_application.resources[0].status.sync is defined + - r_application.resources[0].status.sync.status == 'Synced' + - r_application.resources[0].status.health is defined + - r_application.resources[0].status.health.status == 'Healthy' + +- name: Wait for S4 deployment to be ready + when: ocp4_workload_s4_wait_for_deployment | bool + kubernetes.core.k8s_info: + api_version: apps/v1 + kind: Deployment + name: s4 + namespace: "{{ ocp4_workload_s4_namespace }}" + register: r_s4_deployment + retries: 60 + delay: 10 + until: + - r_s4_deployment.resources is defined + - r_s4_deployment.resources | length > 0 + - r_s4_deployment.resources[0].status is defined + - r_s4_deployment.resources[0].status.readyReplicas is defined + - r_s4_deployment.resources[0].status.readyReplicas | int == 1 + +- name: Get S4 route + when: ocp4_workload_s4_route_enabled | bool + kubernetes.core.k8s_info: + api_version: route.openshift.io/v1 + kind: Route + name: s4 + namespace: "{{ ocp4_workload_s4_namespace }}" + register: r_s4_route + retries: 30 + delay: 5 + until: + - r_s4_route.resources is defined + - r_s4_route.resources | length > 0 + - r_s4_route.resources[0].spec.host is defined + +- name: Set S4 route URL + when: + - ocp4_workload_s4_route_enabled | bool + - r_s4_route.resources | length > 0 + ansible.builtin.set_fact: + _ocp4_workload_s4_route_url: "https://{{ r_s4_route.resources[0].spec.host }}" + +- name: Get S3 API route (if enabled) + when: ocp4_workload_s4_route_s3_api_enabled | bool + kubernetes.core.k8s_info: + api_version: route.openshift.io/v1 + kind: Route + name: s4-api + namespace: "{{ ocp4_workload_s4_namespace }}" + register: r_s4_api_route + retries: 30 + delay: 5 + until: + - r_s4_api_route.resources is defined + - r_s4_api_route.resources | length > 0 + - r_s4_api_route.resources[0].spec.host is defined + +- name: Set S3 API endpoint URL + when: + - ocp4_workload_s4_route_s3_api_enabled | bool + - r_s4_api_route.resources | length > 0 + ansible.builtin.set_fact: + _ocp4_workload_s4_s3_api_endpoint_external: "https://{{ r_s4_api_route.resources[0].spec.host }}" + +- name: Create S3 bucket(s) + when: ocp4_workload_s4_buckets | default([]) | length > 0 + block: + - name: Create buckets + kubernetes.core.k8s: + state: present + definition: "{{ lookup('template', 'bucket-job.yaml.j2') | from_yaml }}" + loop: "{{ ocp4_workload_s4_buckets }}" + loop_control: + loop_var: bucket_name + + - name: Wait for bucket creation job(s) to complete + kubernetes.core.k8s_info: + api_version: batch/v1 + kind: Job + namespace: "{{ ocp4_workload_s4_namespace }}" + label_selectors: + - "app.kubernetes.io/component=bucket-creator" + register: r_bucket_jobs + retries: 30 + delay: 5 + until: + - r_bucket_jobs.resources is defined + - r_bucket_jobs.resources | length > 0 + - r_bucket_jobs.resources | selectattr('status.succeeded', 'defined') | selectattr('status.succeeded', 'equalto', 1) | list | length == (ocp4_workload_s4_buckets | length) + +- name: Report S4 information to user + when: ocp4_workload_s4_enable_user_info_messages | bool + block: + - name: Set S4 user info messages + ansible.builtin.set_fact: + _s4_user_info_messages: + - msg: "S4 Web UI: {{ _ocp4_workload_s4_route_url }}" + when: "{{ _ocp4_workload_s4_route_url | length > 0 }}" + - msg: "S4 UI Login: {{ ocp4_workload_s4_auth_username }} / {{ _ocp4_workload_s4_auth_password }}" + when: "{{ ocp4_workload_s4_auth_enabled | bool }}" + - msg: "S3 Access Key: {{ ocp4_workload_s4_access_key_id }}" + when: true + - msg: "S3 Secret Key: {{ ocp4_workload_s4_secret_access_key }}" + when: true + - msg: "S3 Endpoint (internal): {{ _ocp4_workload_s4_s3_api_endpoint_internal }}" + when: true + - msg: "S3 Endpoint (external): {{ _ocp4_workload_s4_s3_api_endpoint_external }}" + when: "{{ ocp4_workload_s4_route_s3_api_enabled | bool and _ocp4_workload_s4_s3_api_endpoint_external is defined }}" + - msg: "S3 Buckets Created: {{ ocp4_workload_s4_buckets | join(', ') }}" + when: "{{ ocp4_workload_s4_buckets | default([]) | length > 0 }}" + + - name: Print S4 information + when: item.when | bool + agnosticd.core.agnosticd_user_info: + msg: "{{ item.msg }}" + loop: "{{ _s4_user_info_messages }}" + +- name: Save S4 data for user + when: ocp4_workload_s4_enable_user_info_data | bool + agnosticd.core.agnosticd_user_info: + data: + s4_web_ui_url: "{{ _ocp4_workload_s4_route_url }}" + s4_ui_username: "{{ ocp4_workload_s4_auth_username }}" + s4_ui_password: "{{ _ocp4_workload_s4_auth_password }}" + s4_s3_access_key_id: "{{ ocp4_workload_s4_access_key_id }}" + s4_s3_secret_access_key: "{{ ocp4_workload_s4_secret_access_key }}" + s4_s3_api_endpoint_internal: "{{ _ocp4_workload_s4_s3_api_endpoint_internal }}" + s4_s3_api_endpoint_external: "{{ _ocp4_workload_s4_s3_api_endpoint_external | default('') }}" + s4_buckets: "{{ ocp4_workload_s4_buckets | default([]) }}" + s4_namespace: "{{ ocp4_workload_s4_namespace }}" diff --git a/roles/ocp4_workload_s4/templates/application.yaml.j2 b/roles/ocp4_workload_s4/templates/application.yaml.j2 new file mode 100644 index 0000000..7801f2b --- /dev/null +++ b/roles/ocp4_workload_s4/templates/application.yaml.j2 @@ -0,0 +1,123 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: {{ ocp4_workload_s4_application_name }} + namespace: {{ ocp4_workload_s4_gitops_namespace }} + labels: + app.kubernetes.io/name: s4 + app.kubernetes.io/component: application +spec: + project: default + source: + repoURL: {{ ocp4_workload_s4_chart_repo }} + targetRevision: {{ ocp4_workload_s4_chart_revision }} + path: {{ ocp4_workload_s4_chart_path }} + helm: + values: | + # Image configuration + image: + repository: {{ ocp4_workload_s4_image_repository }} + tag: {{ ocp4_workload_s4_image_tag }} + pullPolicy: {{ ocp4_workload_s4_image_pull_policy }} + + # S3 backend configuration + s3: + endpoint: 'http://localhost:7480' + region: 'us-east-1' + accessKeyId: {{ ocp4_workload_s4_access_key_id }} + secretAccessKey: {{ ocp4_workload_s4_secret_access_key }} + + # Authentication configuration + auth: + enabled: {{ ocp4_workload_s4_auth_enabled | lower }} +{% if ocp4_workload_s4_auth_enabled %} + username: {{ ocp4_workload_s4_auth_username }} + password: {{ _ocp4_workload_s4_auth_password }} + jwtExpirationHours: {{ ocp4_workload_s4_auth_jwt_expiration_hours }} + cookieRequireHttps: {{ ocp4_workload_s4_auth_cookie_require_https | lower }} +{% endif %} + + # Storage configuration + storage: + localPaths: '{{ ocp4_workload_s4_storage_local_paths }}' + maxFileSizeGB: {{ ocp4_workload_s4_storage_max_file_size_gb }} + maxConcurrentTransfers: {{ ocp4_workload_s4_storage_max_concurrent_transfers }} + + # RGW data volume + data: + size: {{ ocp4_workload_s4_storage_data_size }} +{% if ocp4_workload_s4_storage_data_storage_class | length > 0 %} + storageClass: {{ ocp4_workload_s4_storage_data_storage_class }} +{% endif %} + accessMode: {{ ocp4_workload_s4_storage_data_access_mode }} + + # Local storage volume + localStorage: + enabled: {{ ocp4_workload_s4_storage_local_enabled | lower }} +{% if ocp4_workload_s4_storage_local_enabled %} + size: {{ ocp4_workload_s4_storage_local_size }} +{% if ocp4_workload_s4_storage_local_storage_class | length > 0 %} + storageClass: {{ ocp4_workload_s4_storage_local_storage_class }} +{% endif %} + accessMode: {{ ocp4_workload_s4_storage_local_access_mode }} +{% endif %} + + # Resource limits and requests + resources: + requests: + cpu: {{ ocp4_workload_s4_resources_requests_cpu }} + memory: {{ ocp4_workload_s4_resources_requests_memory }} + limits: + cpu: {{ ocp4_workload_s4_resources_limits_cpu }} + memory: {{ ocp4_workload_s4_resources_limits_memory }} + + # Service configuration + service: + type: ClusterIP + port: 5000 + s3Port: 7480 + + # OpenShift Route configuration + route: + enabled: {{ ocp4_workload_s4_route_enabled | lower }} +{% if ocp4_workload_s4_route_enabled %} +{% if ocp4_workload_s4_route_host | length > 0 %} + host: {{ ocp4_workload_s4_route_host }} +{% endif %} + tls: + termination: {{ ocp4_workload_s4_route_tls_termination }} + insecureEdgeTerminationPolicy: {{ ocp4_workload_s4_route_tls_insecure_policy }} +{% endif %} + + # S3 API Route + s3Api: + enabled: {{ ocp4_workload_s4_route_s3_api_enabled | lower }} +{% if ocp4_workload_s4_route_s3_api_enabled %} +{% if ocp4_workload_s4_route_s3_api_host | length > 0 %} + host: {{ ocp4_workload_s4_route_s3_api_host }} +{% endif %} + tls: + termination: {{ ocp4_workload_s4_route_tls_termination }} + insecureEdgeTerminationPolicy: {{ ocp4_workload_s4_route_tls_insecure_policy }} +{% endif %} + + destination: + server: https://kubernetes.default.svc + namespace: {{ ocp4_workload_s4_namespace }} + + syncPolicy: +{% if ocp4_workload_s4_sync_policy_automated %} + automated: + prune: {{ ocp4_workload_s4_sync_policy_prune | lower }} + selfHeal: {{ ocp4_workload_s4_sync_policy_self_heal | lower }} +{% endif %} + syncOptions: + - CreateNamespace=true + - RespectIgnoreDifferences=true + retry: + limit: {{ ocp4_workload_s4_sync_retry_limit }} + backoff: + duration: 5s + factor: 2 + maxDuration: 3m diff --git a/roles/ocp4_workload_s4/templates/bucket-job.yaml.j2 b/roles/ocp4_workload_s4/templates/bucket-job.yaml.j2 new file mode 100644 index 0000000..21021c4 --- /dev/null +++ b/roles/ocp4_workload_s4/templates/bucket-job.yaml.j2 @@ -0,0 +1,80 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: create-bucket-{{ bucket_name }} + namespace: {{ ocp4_workload_s4_namespace }} + labels: + app.kubernetes.io/name: s4 + app.kubernetes.io/component: bucket-creator + app.kubernetes.io/bucket: {{ bucket_name }} +spec: + backoffLimit: 5 + ttlSecondsAfterFinished: 600 + template: + metadata: + labels: + app.kubernetes.io/name: s4 + app.kubernetes.io/component: bucket-creator + spec: + restartPolicy: OnFailure + serviceAccountName: s4 + containers: + - name: create-bucket + image: quay.io/minio/mc:latest + command: + - /bin/sh + - -c + - | + set -e + echo "Configuring S3 client..." + mc alias set s4 http://s4.{{ ocp4_workload_s4_namespace }}.svc.cluster.local:7480 \ + $AWS_ACCESS_KEY_ID $AWS_SECRET_ACCESS_KEY --insecure + + echo "Waiting for S3 service to be ready..." + MAX_RETRIES=60 + RETRY=0 + until mc ls s4 --insecure 2>/dev/null; do + RETRY=$((RETRY+1)) + if [ $RETRY -gt $MAX_RETRIES ]; then + echo "ERROR: S3 service did not become ready after $MAX_RETRIES attempts" + echo "Attempting bucket creation anyway..." + break + fi + echo "S3 service not ready yet (attempt $RETRY/$MAX_RETRIES), waiting..." + sleep 5 + done + + echo "Creating bucket: {{ bucket_name }}" + if mc ls s4/{{ bucket_name }} --insecure 2>/dev/null; then + echo "Bucket {{ bucket_name }} already exists" + else + mc mb s4/{{ bucket_name }} --insecure + echo "Bucket {{ bucket_name }} created successfully" + fi + + echo "Verifying bucket exists..." + mc ls s4/{{ bucket_name }} --insecure + echo "Bucket creation complete" + env: + - name: AWS_ACCESS_KEY_ID + value: {{ ocp4_workload_s4_access_key_id }} + - name: AWS_SECRET_ACCESS_KEY + value: {{ ocp4_workload_s4_secret_access_key }} + - name: HOME + value: /tmp + - name: MC_CONFIG_DIR + value: /tmp/.mc + volumeMounts: + - name: mc-config + mountPath: /tmp + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 256Mi + volumes: + - name: mc-config + emptyDir: {}