diff --git a/ansible/inventory/demo/wiab-staging.yml b/ansible/inventory/demo/wiab-staging.yml index 7652ce731..cb95c01aa 100644 --- a/ansible/inventory/demo/wiab-staging.yml +++ b/ansible/inventory/demo/wiab-staging.yml @@ -6,4 +6,4 @@ wiab-staging: ansible_user: 'demo' ansible_ssh_private_key_file: "~/.ssh/id_ed25519" vars: - artifact_hash: f1f624256bdab0f9f76158c7f45e0618ee641237 + artifact_hash: 82edf88d9193e9f7e0a62ee4b287fd0c7cebb1bd diff --git a/bin/debug_logs.sh b/bin/debug_logs.sh index 8a40701b3..3138e025f 100755 --- a/bin/debug_logs.sh +++ b/bin/debug_logs.sh @@ -4,14 +4,14 @@ set -euo pipefail echo "Printing all pods status" kubectl get pods --all-namespaces echo "------------------------------------" -namespaces=$(kubectl get ns -o=jsonpath='{.items[*].metadata.name}') +namespaces="cert-manager-ns default" echo "Namespaces = $namespaces" for ns in $namespaces; do - pods=$(kubectl get pods --all-namespaces -o=jsonpath='{.items[*].metadata.name}') + pods=$(kubectl get pods -n "$ns" -o=jsonpath='{.items[*].metadata.name}') echo "Pods in namespace: $ns = $pods" for pod in $pods; do echo "Logs for pod: $pod" - kubectl logs --all-containers -n "$ns" "$pod" || true + kubectl logs --tail 30 --all-containers -n "$ns" "$pod" || true echo "Description for pod: $pod" kubectl describe pod -n "$ns" "$pod" || true echo "------------------------------------" diff --git a/bin/helm-operations.sh b/bin/helm-operations.sh index 1f9175579..9dbe49a58 100755 --- a/bin/helm-operations.sh +++ b/bin/helm-operations.sh @@ -3,17 +3,35 @@ set -Eeo pipefail # Read values from environment variables with defaults -BASE_DIR="/wire-server-deploy" -TARGET_SYSTEM="example.dev" -CERT_MASTER_EMAIL="certmaster@${TARGET_SYSTEM}" +BASE_DIR="${BASE_DIR:-/wire-server-deploy}" +TARGET_SYSTEM="${TARGET_SYSTEM:-example.com}" +CERT_MASTER_EMAIL="certmaster@${CERT_MASTER_EMAIL}:-certmaster@${TARGET_SYSTEM}" + +# DEPLOY_CERT_MANAGER env variable is used to decide if cert_manager and nginx-ingress-services charts should get deployed +# default is set to TRUE to deploy it unless changed +DEPLOY_CERT_MANAGER="${DEPLOY_CERT_MANAGER:-TRUE}" + +# DUMP_LOGS_ON_FAIL to dump logs on failure +# it is false by default +DUMP_LOGS_ON_FAIL="${DUMP_LOGS_ON_FAIL:-FALSE}" # this IP should match the DNS A record value for TARGET_SYSTEM # assuming it to be the public address used by clients to reach public Address -HOST_IP="" +HOST_IP="${HOST_IP:-}" + if [ -z "$HOST_IP" ]; then HOST_IP=$(wget -qO- https://api.ipify.org) fi +function dump_debug_logs { + local exit_code=$? + if [[ "$DUMP_LOGS_ON_FAIL" == "TRUE" ]]; then + "$BASE_DIR"/bin/debug_logs.sh + fi + return $exit_code +} +trap dump_debug_logs ERR + # picking a node for calling traffic (3rd kube worker node) CALLING_NODE=$(kubectl get nodes --no-headers | tail -n 1 | awk '{print $1}') if [[ -z "$CALLING_NODE" ]]; then @@ -21,6 +39,21 @@ if [[ -z "$CALLING_NODE" ]]; then exit 1 fi +sync_pg_secrets() { + echo "Retrieving PostgreSQL password from databases-ephemeral for wire-server deployment..." + if kubectl get secret wire-postgresql-external-secret &>/dev/null; then + # Usage: sync-k8s-secret-to-wire-secrets.sh + "$BASE_DIR/bin/sync-k8s-secret-to-wire-secrets.sh" \ + wire-postgresql-external-secret password \ + "$BASE_DIR/values/wire-server/secrets.yaml" \ + .brig.secrets.pgPassword .galley.secrets.pgPassword .background-worker.secrets.pgPassword + else + echo "⚠️ Warning: PostgreSQL secret 'wire-postgresql-secret' not found, skipping secret sync" + echo " Make sure databases-ephemeral chart is deployed before wire-server" + fi + return $? +} + # Creates values.yaml from prod-values.example.yaml and secrets.yaml from prod-secrets.example.yaml # Works on all chart directories in $BASE_DIR/values/ process_values() { @@ -136,23 +169,6 @@ deploy_charts() { helm_command+=" --values $secrets_file" fi - # handle wire-server to inject PostgreSQL password from databases-ephemeral - if [[ "$chart" == "wire-server" ]]; then - - echo "Retrieving PostgreSQL password from databases-ephemeral for wire-server deployment..." - if kubectl get secret wire-postgresql-external-secret &>/dev/null; then - # Usage: sync-k8s-secret-to-wire-secrets.sh - "$BASE_DIR/bin/sync-k8s-secret-to-wire-secrets.sh" \ - "wire-postgresql-external-secret" \ - "password" \ - "$BASE_DIR/values/wire-server/secrets.yaml" \ - .brig.secrets.pgPassword .galley.secrets.pgPassword .background-worker.secrets.pgPassword - else - echo "⚠️ Warning: PostgreSQL secret 'wire-postgresql-external-secret' not found, skipping secret sync" - echo " Make sure databases-ephemeral chart is deployed before wire-server" - fi - fi - echo "Deploying $chart as $helm_command" eval "$helm_command" done @@ -164,7 +180,7 @@ deploy_charts() { deploy_cert_manager() { kubectl get namespace cert-manager-ns || kubectl create namespace cert-manager-ns - helm upgrade --install -n cert-manager-ns cert-manager "$BASE_DIR/charts/cert-manager" --values "$BASE_DIR/values/cert-manager/values.yaml" + helm upgrade --install --wait --timeout=5m0s -n cert-manager-ns cert-manager "$BASE_DIR/charts/cert-manager" --values "$BASE_DIR/values/cert-manager/values.yaml" # display running pods kubectl get pods --sort-by=.metadata.creationTimestamp -n cert-manager-ns @@ -175,39 +191,45 @@ deploy_calling_services() { echo "Deploying sftd and coturn" # select the node to deploy sftd kubectl annotate node "$CALLING_NODE" wire.com/external-ip="$HOST_IP" --overwrite - helm upgrade --install sftd "$BASE_DIR/charts/sftd" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/sftd/values.yaml" + helm upgrade --install --wait --timeout=5m0s sftd "$BASE_DIR/charts/sftd" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/sftd/values.yaml" kubectl annotate node "$CALLING_NODE" wire.com/external-ip="$HOST_IP" --overwrite - helm upgrade --install coturn "$BASE_DIR/charts/coturn" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/coturn/values.yaml" --values "$BASE_DIR/values/coturn/secrets.yaml" + helm upgrade --install --wait --timeout=5m0s coturn "$BASE_DIR/charts/coturn" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/coturn/values.yaml" --values "$BASE_DIR/values/coturn/secrets.yaml" # display running pods post deploying all helm charts in default namespace kubectl get pods --sort-by=.metadata.creationTimestamp } main() { + # Create prod-values.example.yaml to values.yaml and take backup process_values "prod" "values" # Create prod-secrets.example.yaml to secrets.yaml and take backup process_values "prod" "secrets" +# Sync postgresql secret +sync_pg_secrets + # configure chart specific variables for each chart in values.yaml file configure_values # deploying with external datastores, useful for prod setup deploy_charts cassandra-external elasticsearch-external minio-external postgresql-external fake-aws smtp rabbitmq-external databases-ephemeral reaper wire-server webapp account-pages team-settings smallstep-accomp ingress-nginx-controller -# deploying cert manager to issue certs, by default letsencrypt-http01 issuer is configured -deploy_cert_manager +# deploying cert-manager only when the env var DEPLOY_CERT_MANAGER is set to TRUE +if [[ "$DEPLOY_CERT_MANAGER" == "TRUE" ]]; then + # deploying cert manager to issue certs, by default letsencrypt-http01 issuer is configured + deploy_cert_manager + + # nginx-ingress-services chart needs cert-manager to be deployed + deploy_charts nginx-ingress-services -# nginx-ingress-services chart needs cert-manager to be deployed -deploy_charts nginx-ingress-services + # print status of certs + kubectl get certificate +fi # deploying sft and coturn services -# not implemented yet deploy_calling_services - -# print status of certs -kubectl get certificate } main diff --git a/bin/offline-deploy.sh b/bin/offline-deploy.sh index 61c7d3dfa..3bede967a 100755 --- a/bin/offline-deploy.sh +++ b/bin/offline-deploy.sh @@ -41,4 +41,4 @@ fi $DOCKER_RUN_BASE $SSH_MOUNT $WSD_CONTAINER ./bin/offline-cluster.sh -sudo docker run --network=host -v $PWD:/wire-server-deploy $WSD_CONTAINER ./bin/helm-operations.sh +sudo docker run --network=host -v $PWD:/wire-server-deploy $WSD_CONTAINER sh -c 'TARGET_SYSTEM="example.dev" CERT_MASTER_EMAIL="certmaster@example.dev" DEPLOY_CERT_MANAGER=TRUE DUMP_LOGS_ON_FAIL=TRUE ./bin/helm-operations.sh' diff --git a/changelog.d/3-deploy-builds/minor-deploy-fixes b/changelog.d/3-deploy-builds/minor-deploy-fixes new file mode 100644 index 000000000..c27d2b844 --- /dev/null +++ b/changelog.d/3-deploy-builds/minor-deploy-fixes @@ -0,0 +1,8 @@ +Fixed: debug_logs.sh to log only the pods for default and cert-manager-ns namespace and limit log lines +Added: enabled debug_logs.sh on helm install failures (helm_operations.sh) with a flag DUMP_LOGS_ON_FAIL +Added: env vars to helm_operations.sh to improve UX while configuring variables +Fixed: sync_pg_secrets operation in helm_operations.sh and clean the deploy_charts logic +Added: wait and timeout on cert-manager and calling_services helm chart operations +Fixed: offline-cluster.sh to run helm-operations.sh using new env vars and with default DUMP_LOGS_ON_FAIL=TRUE +Fixed: documentation for wiab-staging.md based on a user feedback +Fixed: sftd helm chart values for joinCall component which fails to find hashbased images diff --git a/offline/wiab-staging.md b/offline/wiab-staging.md index 1fa85aef9..d3a3de4ea 100644 --- a/offline/wiab-staging.md +++ b/offline/wiab-staging.md @@ -90,6 +90,7 @@ We need the whole ansible directory as ansible-playbook uses some templates for **Option A: Download as ZIP** ```bash +# requirements: wget and unzip wget https://github.com/wireapp/wire-server-deploy/archive/refs/heads/master.zip unzip master.zip cd wire-server-deploy-master @@ -97,6 +98,7 @@ cd wire-server-deploy-master **Option B: Clone with Git** ```bash +# requirements: git git clone https://github.com/wireapp/wire-server-deploy.git cd wire-server-deploy ``` @@ -105,7 +107,7 @@ cd wire-server-deploy A sample inventory is available at [ansible/inventory/demo/wiab-staging.yml](https://github.com/wireapp/wire-server-deploy/blob/master/ansible/inventory/demo/wiab-staging.yml). -*Note: Replace example.com with your physical machine address where KVM is available and adjust other variables accordingly.* +*Note: Replace example.com with your physical machine (adminhost) address where KVM is available and adjust other variables like ansible_user and ansible_ssh_private_key_file. The SSH user for ansible `ansible_user` should have password-less `sudo` access. The physical host should be running Ubuntu 22.04.* **Step 3: Run the VM and network provision** @@ -125,6 +127,8 @@ Ensure the inventory file `ansible/inventory/offline/inventory.yml` in the direc Since the inventory is ready, please continue with the following steps: +> **Note**: All next steps assume that the wire-server-deploy artifact has been downloaded on the `adminhost` (your physical machine) and extracted at `/home/ansible_user/wire-server-deploy`. All commands from here on will be issued from this directory on the `adminhost`, ssh on the node before proceeding. + ### Environment Setup - **[Making tooling available in your environment](docs_ubuntu_22.04.md#making-tooling-available-in-your-environment)** @@ -140,7 +144,7 @@ Since the inventory is ready, please continue with the following steps: ### Helm Operations to install wire services and supporting helm charts -**Helm chart deployment (automated):** The script `bin/helm-operations.sh` will deploy the charts for you. It prepares `values.yaml`/`secrets.yaml`, customizes them for your domain/IPs, then runs Helm installs/upgrades in the correct order. +**Helm chart deployment (automated):** The script `bin/helm-operations.sh` will deploy the charts for you. It prepares `values.yaml`/`secrets.yaml`, customizes them for your domain/IPs, then runs Helm installs/upgrades in the correct order. Prepare the values before running it. **User-provided inputs (set these before running):** - `TARGET_SYSTEM`: your domain (e.g., `wire.example.com` or `example.dev`). @@ -148,15 +152,19 @@ Since the inventory is ready, please continue with the following steps: - `HOST_IP`: public IP that matches your DNS A record (auto-detected if empty). **TLS / certificate behavior (cert-manager vs. Bring Your Own):** -- By default, `bin/helm-operations.sh` runs `deploy_cert_manager`, which installs cert-manager and configures a Let’s Encrypt (HTTP-01) issuer for the ingress charts. -- If you **do not** want Let’s Encrypt / cert-manager (for example, you are using **[Bring Your Own certificates](docs_ubuntu_22.04.md#acquiring--deploying-ssl-certificates)** or you cannot satisfy HTTP-01 requirements), disable this step by commenting out the `deploy_cert_manager` call inside `bin/helm-operations.sh`. - - After disabling cert-manager, ensure your ingress is configured with your own TLS secret(s) as described in the TLS documentation below. +- By default, `bin/helm-operations.sh` has `DEPLOY_CERT_MANAGER=TRUE`, which installs cert-manager and configures a Let’s Encrypt (HTTP-01) issuer for the ingress charts. +- If you **do not** want Let’s Encrypt / cert-manager (for example, you are using **[Bring Your Own certificates](docs_ubuntu_22.04.md#acquiring--deploying-ssl-certificates)**), disable this step by passing the environment variable `DEPLOY_CERT_MANAGER=FALSE` when running `bin/helm-operations.sh`. + - When choosing `DEPLOY_CERT_MANAGER=FALSE`, ensure your ingress is configured with your own TLS secret(s) as described at [Acquiring / Deploying SSL Certificates](docs_ubuntu_22.04.md#acquiring--deploying-ssl-certificates). + - When choosing `DEPLOY_CERT_MANAGER=TRUE`, ensure if further network configuration is required by following [cert-manager behaviour in NAT / bridge environments](#cert-manager-behaviour-in-nat--bridge-environments). -**To run the automated helm chart deployment**: -`d ./bin/helm-operations.sh` +**To run the automated helm chart deployment with your variables**: +```bash +# example command - verify the variables before running it +d sh -c 'TARGET_SYSTEM="example.dev" CERT_MASTER_EMAIL="certmaster@example.dev" DEPLOY_CERT_MANAGER=TRUE ./bin/helm-operations.sh' +``` **Charts deployed by the script:** -- External datastores and helpers: `cassandra-external`, `elasticsearch-external`, `minio-external`, `rabbitmq-external`, `databases-ephemeral`, `reaper`, `fake-aws`, `demo-smtp`. +- External datastores and helpers: `cassandra-external`, `elasticsearch-external`, `minio-external`, `rabbitmq-external`,`postgresql-external`, `databases-ephemeral`, `reaper`, `fake-aws`, `demo-smtp`. - Wire services: `wire-server`, `webapp`, `account-pages`, `team-settings`, `smallstep-accomp`. - Ingress and certificates: `ingress-nginx-controller`, `cert-manager`, `nginx-ingress-services`. - Calling services: `sftd`, `coturn`. @@ -165,23 +173,17 @@ Since the inventory is ready, please continue with the following steps: - Creates `values.yaml` and `secrets.yaml` from `prod-values.example.yaml` and `prod-secrets.example.yaml` for each chart under `values/`. - Backs up any existing `values.yaml`/`secrets.yaml` before replacing them. -**Values configured by the script:** -- Replaces `example.com` with `TARGET_SYSTEM` in Wire and webapp hostnames. -- Enables cert-manager and sets `certmasterEmail` using `CERT_MASTER_EMAIL`. -- Sets SFTD hosts and switches issuer to `letsencrypt-http01`. -- Sets coturn listen/relay/external IPs using the calling node IP and `HOST_IP`. - *Note: The `bin/helm-operations.sh` script above deploys these charts; you do not need to run the Helm commands manually unless you want to customize or debug.* ## Network Traffic Configuration ### Bring traffic from the physical machine to Wire services in the k8s cluster -If you used the Ansible playbook earlier, nftables firewall rules are pre-configured to forward traffic. If you set up VMs manually with your own hypervisor, you must manually configure network traffic flow using nftables. +If you used the Ansible playbook earlier, nftables firewall rules are pre-configured to forward traffic. If you set up VMs manually with your own hypervisor, you must manually configure network traffic flow using nftables as descibed below. **Required Network Configuration:** -The physical machine must forward traffic from external clients to the Kubernetes cluster running Wire services. This involves: +The physical machine (adminhost) must forward traffic from external clients to the Kubernetes cluster running Wire services. This involves: 1. **HTTP/HTTPS Traffic (Ingress)** - Forward ports 80 and 443 to the nginx-ingress-controller running on a Kubernetes node - Port 80 (HTTP) → Kubernetes node port 31772 @@ -193,19 +195,20 @@ The physical machine must forward traffic from external clients to the Kubernete **Implementation:** -Use the detailed nftables rules in [../ansible/files/wiab_server_nftables.conf.j2](../ansible/files/wiab_server_nftables.conf.j2) as the template. The guide covers: +Use the detailed nftables rules in [../ansible/files/wiab_server_nftables.conf.j2](../ansible/files/wiab_server_nftables.conf.j2) as the template. The nftable configuration template covers: - Defining your network variables (Coturn IP, Kubernetes node IP, WAN interface) - Creating NAT rules for HTTP/HTTPS ingress traffic -- Setting up TURN protocol forwarding for Coturn -- Restarting nftables to apply changes +- Setting up TURN protocol forwarding for Coturn and traffic for SFTD -You can also apply these rules using the Ansible playbook, by following: +*Note: If you have already ran the playbook wiab-staging-provision.yml then it is already be configured for you. Confirm it by checking if the wire endpoint `https://webapp.TARGET_SYSTEM` is reachable from public internet or your private network (in case of private network), but not from the adminhost itself.* + +You can also apply these rules using the Ansible playbook against your adminhost, by following: ```bash ansible-playbook -i inventory.yml ansible/wiab-staging-nftables.yml ``` -*Note: If you ran the playbook wiab-staging-provision.yml then it might already be configured for you. Please confirm before running.* +You can run the above playbook from local system or where you have cloned/downloaded the [Wire server deploy ansible playbooks](#getting-the-ansible-playbooks). The inventory should define the following variables: @@ -221,59 +224,78 @@ calling_node_ip=192.168.122.13 # Host WAN interface name inf_wan=eth0 -``` -> **Note (cert-manager & hairpin NAT):** -> When cert-manager performs HTTP-01 self-checks inside the cluster, traffic can hairpin (Pod → Node → host public IP → DNAT → Node → Ingress). -> If your nftables rules DNAT in `PREROUTING` without a matching SNAT on `virbr0 → virbr0`, return packets may bypass the host and break conntrack, causing HTTP-01 timeouts, resulting in certificate verification failure. -> Additionally, strict `rp_filter` can drop asymmetric return packets. -> If cert-manager is deployed in a NAT/bridge (`virbr0`) environment, first verify whether certificate issuance is failing before applying hairpin handling. -> Check whether certificates are successfully issued: -> ```bash -> d kubectl get certificates -> ``` -> If certificates are not in `Ready=True` state, inspect cert-manager logs for HTTP-01 self-check or timeout errors: -> ```bash -> d kubectl logs -n cert-manager-ns -> ``` -> If you observe HTTP-01 challenge timeouts or self-check failures in a NAT/bridge environment, hairpin SNAT and relaxed reverse-path filtering handling may be required. - > - Relax reverse-path filtering to loose mode to allow asymmetric flows: - > ```bash - > sudo sysctl -w net.ipv4.conf.all.rp_filter=2 - > sudo sysctl -w net.ipv4.conf.virbr0.rp_filter=2 - > ``` - > These settings help conntrack reverse DNAT correctly and avoid drops during cert-manager’s HTTP-01 challenges in NAT/bridge (virbr0) environments. - > - > - Enable Hairpin SNAT (temporary for cert-manager HTTP-01): - > ```bash - > sudo nft insert rule ip nat POSTROUTING position 0 \ - > iifname "virbr0" oifname "virbr0" \ - > ip daddr 192.168.122.0/24 ct status dnat \ - > counter masquerade \ - > comment "wire-hairpin-dnat-virbr0" - > ``` - > This forces DNATed traffic that hairpins over the bridge to be masqueraded, ensuring return traffic flows back through the host and conntrack can correctly reverse the DNAT. - > Verify the rule was added: - > ```bash - > sudo nft list chain ip nat POSTROUTING - > ``` - > You should see a rule similar to: - > ``` - > iifname "virbr0" oifname "virbr0" ip daddr 192.168.122.0/24 ct status dnat counter masquerade # handle - > ``` - > - > - Remove the rule after certificates are issued - > ```bash - > d kubectl get certificates - > ``` - > - Once Let's Encrypt validation completes and certificates are issued, remove the temporary hairpin SNAT rule. Use the following pipeline to locate the rule handle and delete it safely: - > ```bash - > sudo nft list chain ip nat POSTROUTING | \ - > grep wire-hairpin-dnat-virbr0 | \ - > sed -E 's/.*handle ([0-9]+).*/\1/' | \ - > xargs -r -I {} sudo nft delete rule ip nat POSTROUTING handle {} - > ``` +# These are the same as wiab-staging.yml +# user and ssh key for adminhost +ansible_user='demo' +ansible_ssh_private_key_file='~/.ssh/id_ed25519' + +``` +### cert-manager behaviour in NAT / bridge environments + +When cert-manager performs HTTP-01 self-checks inside the cluster, traffic can hairpin: + +- Pod → Node → host public IP → DNAT → Node → Ingress + +In NAT/bridge setups (for example, using `virbr0` on the host): + +- If nftables DNAT rules exist in `PREROUTING` without a matching SNAT on `virbr0 → virbr0`, return packets may bypass the host and break conntrack, causing HTTP-01 timeouts and certificate verification failures. +- too strict of `rp_filter` settings can drop asymmetric return packets. + +Before changing anything, first verify whether certificate issuance is actually failing: + +1. Check whether certificates are successfully issued: + ```bash + d kubectl get certificates + ``` +2. If certificates are not in `Ready=True` state, inspect cert-manager logs for HTTP-01 self-check or timeout errors: + ```bash + d kubectl logs -n cert-manager-ns + ``` + +If you observe HTTP-01 challenge timeouts or self-check failures in a NAT/bridge environment, hairpin SNAT and relaxed reverse-path filtering handling may be required. One possible approach is: + +- Relax reverse-path filtering to loose mode to allow asymmetric flows: + ```bash + sudo sysctl -w net.ipv4.conf.all.rp_filter=2 + sudo sysctl -w net.ipv4.conf.virbr0.rp_filter=2 + ``` + These settings help conntrack reverse DNAT correctly and avoid drops during cert-manager’s HTTP-01 challenges in NAT/bridge (`virbr0`) environments. + +- Enable Hairpin SNAT (temporary for cert-manager HTTP-01): + ```bash + sudo nft insert rule ip nat POSTROUTING position 0 \ + iifname "virbr0" oifname "virbr0" \ + ip daddr 192.168.122.0/24 ct status dnat \ + counter masquerade \ + comment "wire-hairpin-dnat-virbr0" + ``` + This forces DNATed traffic that hairpins over the bridge to be masqueraded, ensuring return traffic flows back through the host and conntrack can correctly reverse the DNAT. + + Verify the rule was added: + ```bash + sudo nft list chain ip nat POSTROUTING + ``` + You should see a rule similar to: + ``` + iifname "virbr0" oifname "virbr0" ip daddr 192.168.122.0/24 ct status dnat counter masquerade # handle + ``` + +- Remove the rule after certificates are issued, confirm by running the following: + ```bash + d kubectl get certificates + ``` + + Once Let’s Encrypt validation completes and certificates are issued, remove the temporary hairpin SNAT rule. Use the following pipeline to locate the rule handle and delete it safely: + ```bash + sudo nft -a list chain ip nat POSTROUTING | \ + grep wire-hairpin-dnat-virbr0 | \ + sed -E 's/.*handle ([0-9]+).*/\1/' | \ + xargs -r -I {} sudo nft delete rule ip nat POSTROUTING handle {} + ``` + +For additional background on when hairpin NAT is required and how it relates to WIAB Dev and WIAB Staging, see [Hairpin networking for WIAB Dev and WIAB Staging](tls-certificates.md#hairpin-networking-for-wiab-dev-and-wiab-staging). ## Further Reading diff --git a/values/sftd/demo-values.example.yaml b/values/sftd/demo-values.example.yaml index 566db6bc4..91dc2c885 100644 --- a/values/sftd/demo-values.example.yaml +++ b/values/sftd/demo-values.example.yaml @@ -3,6 +3,10 @@ host: sftd.example.com replicaCount: 1 joinCall: replicaCount: 1 + image: + repository: docker.io/bitnamilegacy/nginx + pullPolicy: IfNotPresent + tag: "1.27.3-debian-12-r5" tls: issuerRef: name: letsencrypt-http01 diff --git a/values/sftd/prod-values.example.yaml b/values/sftd/prod-values.example.yaml index ac48178f3..1c2374f9e 100644 --- a/values/sftd/prod-values.example.yaml +++ b/values/sftd/prod-values.example.yaml @@ -9,6 +9,16 @@ tls: issuerRef: name: letsencrypt-http01 kind: ClusterIssuer + +joinCall: +# this value should be set to 3 when deployed in a full production DMZ manner +# replicaCount = 1 is to support the simple wiab-staging solution + replicaCount: 1 + image: + repository: docker.io/bitnamilegacy/nginx + pullPolicy: IfNotPresent + tag: "1.27.3-debian-12-r5" + # Uncomment to enable SFT to SFT communication for federated calls # multiSFT: # enabled: true