diff --git a/.devcontainer.json b/.devcontainer.json index 85fb9f6..929d75d 100644 --- a/.devcontainer.json +++ b/.devcontainer.json @@ -2,7 +2,7 @@ "name": "devtools", "shutdownAction": "none", "build": { - "dockerfile": "Dockerfile" + "dockerfile": "Dockerfile.devcontainer" }, "updateRemoteUserUID": false, "overrideCommand": false, diff --git a/.github/workflows/build-push.yaml b/.github/workflows/build-push.yaml new file mode 100644 index 0000000..e77eaa5 --- /dev/null +++ b/.github/workflows/build-push.yaml @@ -0,0 +1,416 @@ +name: Build And Push + +# Builds the codecollection image and pushes a multi-arch manifest to GHCR +# with the tag schema the cc-registry-v2 image catalog expects: +# +# -- +# +# Where: +# sanitized-ref = github.ref_name with '/' -> '-' (e.g. "main", "feature-foo", "pr-42") +# cc_sha7 = first 7 chars of github.sha (this repo's commit) +# rt_sha7 = first 7 chars of rw-base-runtime at `runtime_ref` +# (https://github.com/runwhen-contrib/rw-base-runtime) +# +# Architecture +# ------------ +# The build is split across three jobs so each platform is built natively +# rather than under QEMU emulation: +# +# prepare -- compute tag set + push flag + build args ONCE, +# share with downstream jobs so they can't drift +# build (matrix) -- one job per platform, each on its own native runner: +# linux/amd64 -> ubuntu-latest +# linux/arm64 -> ubuntu-24.04-arm +# each job: +# 1. builds the image natively (no QEMU) +# 2. loads it into the local docker daemon +# 3. runs the smoke test against the native arch +# 4. on push, builds again push-by-digest (no tags) +# and exports the per-platform digest as an artifact +# merge -- pulls the per-platform digests and uses +# `docker buildx imagetools create` to stitch them +# into a single multi-arch manifest under every tag +# prepare computed. Pure registry-side metadata op. +# +# Build triggers: +# - push to ANY branch -> produces a CodeCollectionVersion image for that branch +# - pull_request to ANY base branch -> produces a "pr-" preview image +# - workflow_dispatch -> manual build (e.g. to validate against a BYO base) +# +# We build off non-main branches on purpose: each branch is a candidate CCV +# the catalog can pin to. Path filters skip rebuilds for pure docs / config +# diffs. +# +# Dockerfile note: this workflow builds the production `Dockerfile`, which +# uses rw-base-runtime as its base. The repo's `Dockerfile.devcontainer` +# (referenced by .devcontainer.json) is the local-dev image and is NOT +# built here. + +on: + push: + branches: + - '**' # all branches; tag pushes (refs/tags/*) are excluded + paths-ignore: + - '**/*.md' + - '**/*.html' + - 'LICENSE' + - '.gitignore' + - '.gitbook.yaml' + - '.devcontainer.json' + - 'Dockerfile.devcontainer' + - 'CHANGELOG.md' + - 'CONTRIBUTING.md' + - 'README.md' + - 'SUMMARY.md' + - 'Introduction.md' + - 'docs/**' + pull_request: + # No branches: restriction -> triggers on PRs targeting any base branch + paths-ignore: + - '**/*.md' + - '**/*.html' + - 'LICENSE' + - '.gitignore' + - '.gitbook.yaml' + - '.devcontainer.json' + - 'Dockerfile.devcontainer' + - 'CHANGELOG.md' + - 'CONTRIBUTING.md' + - 'README.md' + - 'SUMMARY.md' + - 'Introduction.md' + - 'docs/**' + workflow_dispatch: + inputs: + base_image: + description: "Base image (FROM ref) to build against. Leave blank to use the Dockerfile default." + required: false + default: "" + type: string + runtime_ref: + description: "rw-base-runtime ref to embed in the tag suffix (branch / tag / sha). Default: main." + required: false + default: "main" + type: string + push: + description: "Push the resulting image to GHCR." + required: false + default: true + type: boolean + +env: + REGISTRY: ghcr.io + IMAGE: ${{ github.repository }} + # The repo we resolve the rt_sha tag suffix from. MUST match the base + # image we FROM in Dockerfile (currently rw-base-runtime). + RUNTIME_REPO: runwhen-contrib/rw-base-runtime + +permissions: + contents: read + packages: write + +jobs: + # =========================================================================== + # prepare — single source of truth for the tag set, push flag and build + # args so the matrix builds and the final merge job don't drift. + # =========================================================================== + prepare: + runs-on: ubuntu-latest + outputs: + should_push: ${{ steps.push_flag.outputs.should_push }} + tags: ${{ steps.meta.outputs.tags }} + canonical_tag: ${{ steps.meta.outputs.canonical_tag }} + repo_lc: ${{ steps.meta.outputs.repo_lc }} + sanitized_ref: ${{ steps.meta.outputs.sanitized_ref }} + cc_sha: ${{ steps.meta.outputs.cc_sha }} + rt_sha: ${{ steps.meta.outputs.rt_sha }} + runtime_ref: ${{ steps.meta.outputs.runtime_ref }} + base_image_arg: ${{ steps.args.outputs.base_image_arg }} + steps: + - name: Determine push flag + id: push_flag + run: | + case "${{ github.event_name }}" in + push|pull_request) + echo "should_push=true" >> "$GITHUB_OUTPUT" ;; + workflow_dispatch) + echo "should_push=${{ inputs.push }}" >> "$GITHUB_OUTPUT" ;; + *) + echo "should_push=false" >> "$GITHUB_OUTPUT" ;; + esac + + - name: Compute tag set + id: meta + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + RUNTIME_REF_INPUT: ${{ inputs.runtime_ref }} + run: | + set -euo pipefail + + # --- this repo's sha --- + cc_sha="${{ github.sha }}" + cc_sha7="${cc_sha:0:7}" + + # --- ref name (PRs collapse to "pr-" since github.ref_name on + # pull_request is "/merge", which is useless as an OCI tag) --- + if [ "${{ github.event_name }}" = "pull_request" ]; then + ref_name="pr-${{ github.event.pull_request.number }}" + else + ref_name="${{ github.ref_name }}" + fi + # OCI tags must match [A-Za-z0-9_.-]{1,128}. Replace '/' with '-' + # so refs like "release/1.2" survive. + sanitized_ref="$(echo "${ref_name}" | tr '/' '-' | tr -c 'A-Za-z0-9_.-' '-' | sed 's/^-*//;s/-*$//')" + + # --- runtime sha (defaults to rw-base-runtime@main) --- + runtime_ref="${RUNTIME_REF_INPUT:-main}" + rt_sha="$(gh api "repos/${RUNTIME_REPO}/commits/${runtime_ref}" --jq '.sha')" + rt_sha7="${rt_sha:0:7}" + + # --- repo path (GHCR rejects uppercase) --- + repo_lc="$(echo "${{ env.REGISTRY }}/${{ env.IMAGE }}" | tr '[:upper:]' '[:lower:]')" + + # --- canonical immutable tag the catalog uses for discovery --- + canonical_tag="${sanitized_ref}-${cc_sha7}-${rt_sha7}" + + # --- moving-pointer aliases (re-pointed on every build) --- + tags=( "${repo_lc}:${canonical_tag}" ) + case "${{ github.event_name }}" in + push) + tags+=( "${repo_lc}:${sanitized_ref}" ) + if [ "${{ github.ref_name }}" = "main" ]; then + tags+=( "${repo_lc}:latest" ) + fi + ;; + pull_request) + tags+=( "${repo_lc}:pr-${{ github.event.pull_request.number }}" ) + ;; + workflow_dispatch) + tags+=( "${repo_lc}:${sanitized_ref}" ) + ;; + esac + + { + echo "cc_sha=${cc_sha}" + echo "rt_sha=${rt_sha}" + echo "runtime_ref=${runtime_ref}" + echo "sanitized_ref=${sanitized_ref}" + echo "repo_lc=${repo_lc}" + echo "canonical_tag=${canonical_tag}" + printf 'tags=%s\n' "$(IFS=,; echo "${tags[*]}")" + } >> "$GITHUB_OUTPUT" + + { + echo "## Will publish" + echo + for t in "${tags[@]}"; do echo "- \`${t}\`"; done + echo + echo "- Codecollection sha: \`${cc_sha}\`" + echo "- Runtime ref: \`${runtime_ref}\`" + echo "- Runtime sha: \`${rt_sha}\`" + } >> "$GITHUB_STEP_SUMMARY" + + - name: Resolve build args + id: args + env: + INPUT_BASE_IMAGE: ${{ inputs.base_image }} + run: | + set -euo pipefail + if [ -n "${INPUT_BASE_IMAGE:-}" ]; then + echo "base_image_arg=BASE_IMAGE=${INPUT_BASE_IMAGE}" >> "$GITHUB_OUTPUT" + else + echo "base_image_arg=" >> "$GITHUB_OUTPUT" + fi + + # =========================================================================== + # build — parallel native builds. Each matrix leg runs on a runner whose + # architecture matches the platform it is building, so there's no QEMU + # emulation cost. Smoke test runs on real hardware for each arch. + # =========================================================================== + build: + needs: prepare + strategy: + fail-fast: false + matrix: + include: + - platform: linux/amd64 + arch: amd64 + runner: ubuntu-latest + - platform: linux/arm64 + arch: arm64 + runner: ubuntu-24.04-arm + runs-on: ${{ matrix.runner }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GHCR + if: needs.prepare.outputs.should_push == 'true' + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # ---------------------------------------------------------------- + # Phase 1: build natively + load locally so we can exec the smoke + # test. Per-arch GHA cache scope -- the two matrix legs run in + # parallel on different runners, so they MUST NOT share a scope. + # Cache scope is derived from the repo name so this same workflow + # template can be dropped into other codecollections without edits. + # ---------------------------------------------------------------- + - name: Build (native, load) + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile + platforms: ${{ matrix.platform }} + load: true + tags: ccv:smoke + cache-from: type=gha,scope=${{ github.event.repository.name }}-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.event.repository.name }}-${{ matrix.arch }} + build-args: | + ${{ needs.prepare.outputs.base_image_arg }} + + - name: Smoke test image + run: | + set -euo pipefail + # Native-arch exec -- verifies the runtime layer (rw-core-keywords + # imports from rw-base-runtime), the codecollection layout landed, + # and the worker binary is intact. + docker run --rm --entrypoint /bin/bash ccv:smoke -c ' + set -eux + python3 -c "import RW.Core, RW.platform, RW.fetchsecrets, robot" + robot --version || true + test -d /home/runwhen/codecollection/codebundles + test -f /home/runwhen/codecollection/requirements.txt + test -x /home/runwhen/worker + python3 --version + ' + + # ---------------------------------------------------------------- + # Phase 2: only on push -- rebuild push-by-digest. Writes the + # platform-specific manifest into the registry WITHOUT human tags. + # The merge job assembles the multi-arch manifest from these + # digests under the canonical + alias tags. + # ---------------------------------------------------------------- + - name: Build & push by digest + id: digest_push + if: needs.prepare.outputs.should_push == 'true' + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile + platforms: ${{ matrix.platform }} + outputs: type=image,name=${{ needs.prepare.outputs.repo_lc }},push-by-digest=true,name-canonical=true,push=true + labels: | + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.revision=${{ github.sha }} + io.runwhen.codecollection.commit=${{ github.sha }} + io.runwhen.runtime.commit=${{ needs.prepare.outputs.rt_sha }} + io.runwhen.runtime.ref=${{ needs.prepare.outputs.runtime_ref }} + cache-from: type=gha,scope=${{ github.event.repository.name }}-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.event.repository.name }}-${{ matrix.arch }} + build-args: | + ${{ needs.prepare.outputs.base_image_arg }} + + - name: Stage digest for merge job + if: needs.prepare.outputs.should_push == 'true' + run: | + set -euo pipefail + mkdir -p /tmp/digests + digest="${{ steps.digest_push.outputs.digest }}" + # Merge job enumerates files in this dir; filename = digest sans "sha256:" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + if: needs.prepare.outputs.should_push == 'true' + uses: actions/upload-artifact@v4 + with: + name: digests-${{ matrix.arch }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + # =========================================================================== + # merge — combine the per-arch digests into the final multi-arch manifest + # under every tag prepare computed. `buildx imagetools create` is a + # registry-side metadata op -- no image rebuild. + # =========================================================================== + merge: + needs: [prepare, build] + if: needs.prepare.outputs.should_push == 'true' + runs-on: ubuntu-latest + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to GHCR + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Create multi-arch manifest + env: + REPO_LC: ${{ needs.prepare.outputs.repo_lc }} + TAG_CSV: ${{ needs.prepare.outputs.tags }} + run: | + set -euo pipefail + tag_args=() + IFS=',' read -ra TAGS <<< "${TAG_CSV}" + for t in "${TAGS[@]}"; do tag_args+=(-t "$t"); done + + digest_args=() + for f in /tmp/digests/*; do + d="$(basename "$f")" + digest_args+=("${REPO_LC}@sha256:${d}") + done + + echo "Tags: ${TAGS[*]}" + echo "Digests: ${digest_args[*]}" + + docker buildx imagetools create "${tag_args[@]}" "${digest_args[@]}" + + - name: Inspect resulting manifest + env: + TAG_CSV: ${{ needs.prepare.outputs.tags }} + run: | + set -euo pipefail + IFS=',' read -ra TAGS <<< "${TAG_CSV}" + docker buildx imagetools inspect "${TAGS[0]}" + + - name: Summary + env: + REPO_LC: ${{ needs.prepare.outputs.repo_lc }} + CANONICAL_TAG: ${{ needs.prepare.outputs.canonical_tag }} + TAG_CSV: ${{ needs.prepare.outputs.tags }} + run: | + { + echo "## ${{ github.event.repository.name }} build" + echo + echo "- Event: \`${{ github.event_name }}\`" + echo "- Ref: \`${{ github.ref }}\`" + echo "- Codecollection sha: \`${{ github.sha }}\`" + echo "- Runtime ref: \`${{ needs.prepare.outputs.runtime_ref }}\`" + echo "- Runtime sha: \`${{ needs.prepare.outputs.rt_sha }}\`" + echo "- Canonical tag: \`${REPO_LC}:${CANONICAL_TAG}\`" + echo "- Pushed: \`true\`" + echo + echo "### Published manifest" + IFS=',' read -ra TAGS <<< "${TAG_CSV}" + for t in "${TAGS[@]}"; do echo "- \`${t}\`"; done + } >> "$GITHUB_STEP_SUMMARY" diff --git a/Dockerfile b/Dockerfile index bba01a6..7aed2fa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,34 +1,47 @@ -FROM us-docker.pkg.dev/runwhen-nonprod-shared/public-images/codecollection-devtools:latest +# Production codecollection image — the CCV image the RunWhen platform pulls. +# +# Source FROM the unified rw-base-runtime, which ships: +# - Python 3 + the worker binary at /home/runwhen/worker +# - rw-core-keywords pip-installed system-wide (RW.Core / RW.platform / +# RW.fetchsecrets) and the rw-base-runtime helper scripts at +# /home/runwhen/robot-runtime/ +# - Standard CLI tooling (kubectl, aws, az, gcloud, helm, gh, jq, yq, ...) +# +# Source: https://github.com/runwhen-contrib/rw-base-runtime +# +# For interactive local development see Dockerfile.devcontainer (referenced +# by .devcontainer.json). It still uses codecollection-devtools and ships +# extra dev tooling. The two image families are intentionally separate +# during the transition; long-term we'll converge. +# +# Override at build time to pin a specific runtime sha or test a BYO base: +# +# docker build \ +# --build-arg BASE_IMAGE=ghcr.io/runwhen-contrib/rw-base-runtime: \ +# ... +# +# The CI workflow (.github/workflows/build-push.yaml) resolves the +# `runtime_ref` dispatch input to an rw-base-runtime commit sha and bakes +# that sha into the resulting image tag suffix. +ARG BASE_IMAGE=ghcr.io/runwhen-contrib/rw-base-runtime:latest +FROM ${BASE_IMAGE} USER root ENV RUNWHEN_HOME=/home/runwhen ENV PATH "$PATH:/usr/local/bin:/home/runwhen/.local/bin" -# Set up directories and permissions RUN mkdir -p $RUNWHEN_HOME/codecollection WORKDIR $RUNWHEN_HOME/codecollection -# Copy files into container with correct ownership COPY --chown=runwhen:0 . . -# Check and install requirements if requirements.txt exists RUN if [ -f "requirements.txt" ]; then pip install --no-cache-dir -r requirements.txt; else echo "requirements.txt not found, skipping pip install"; fi -# Install additional user packages -#RUN apt-get update && \ -# apt-get install -y --no-install-recommends net-tools && \ -# apt-get clean && \ -# rm -rf /var/lib/apt/lists/* /var/cache/apt - -# Add runwhen user to sudoers with no password prompt RUN echo "runwhen ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -# Set RunWhen Temp Dir RUN mkdir -p /var/tmp/runwhen && chmod 1777 /var/tmp/runwhen ENV TMPDIR=/var/tmp/runwhen -# Adjust permissions for runwhen user RUN chown runwhen:0 -R $RUNWHEN_HOME/codecollection -# Switch to runwhen user USER runwhen diff --git a/Dockerfile.devcontainer b/Dockerfile.devcontainer new file mode 100644 index 0000000..bba01a6 --- /dev/null +++ b/Dockerfile.devcontainer @@ -0,0 +1,34 @@ +FROM us-docker.pkg.dev/runwhen-nonprod-shared/public-images/codecollection-devtools:latest +USER root + +ENV RUNWHEN_HOME=/home/runwhen +ENV PATH "$PATH:/usr/local/bin:/home/runwhen/.local/bin" + +# Set up directories and permissions +RUN mkdir -p $RUNWHEN_HOME/codecollection +WORKDIR $RUNWHEN_HOME/codecollection + +# Copy files into container with correct ownership +COPY --chown=runwhen:0 . . + +# Check and install requirements if requirements.txt exists +RUN if [ -f "requirements.txt" ]; then pip install --no-cache-dir -r requirements.txt; else echo "requirements.txt not found, skipping pip install"; fi + +# Install additional user packages +#RUN apt-get update && \ +# apt-get install -y --no-install-recommends net-tools && \ +# apt-get clean && \ +# rm -rf /var/lib/apt/lists/* /var/cache/apt + +# Add runwhen user to sudoers with no password prompt +RUN echo "runwhen ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +# Set RunWhen Temp Dir +RUN mkdir -p /var/tmp/runwhen && chmod 1777 /var/tmp/runwhen +ENV TMPDIR=/var/tmp/runwhen + +# Adjust permissions for runwhen user +RUN chown runwhen:0 -R $RUNWHEN_HOME/codecollection + +# Switch to runwhen user +USER runwhen diff --git a/codebundles/aws-c7n-acm-health/runbook.robot b/codebundles/aws-c7n-acm-health/runbook.robot index fd85619..2808e2c 100644 --- a/codebundles/aws-c7n-acm-health/runbook.robot +++ b/codebundles/aws-c7n-acm-health/runbook.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -List Unused ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Unused ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find unused ACM certificates [Tags] aws acm certificate security data:config ${c7n_output}= RW.CLI.Run Cli @@ -54,7 +54,7 @@ List Unused ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS RW.Core.Add Pre To Report No unused ACM certificates found in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_ID}` END -List Expiring ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Expiring ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find Expiring ACM certificates [Tags] aws acm certificate expiration data:config CloudCustodian.Core.Generate Policy @@ -100,7 +100,7 @@ List Expiring ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${A RW.Core.Add Pre To Report No ACM certificates nearing expiration found in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_ID}` END -List Expired ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Expired ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find expired ACM certificates [Tags] aws acm certificate expiration data:config ${c7n_output}= RW.CLI.Run Cli @@ -141,7 +141,7 @@ List Expired ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AW RW.Core.Add Pre To Report No expired ACM certificates found in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_ID}` END -List Failed Status ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Failed Status ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find failed status ACM certificates [Tags] aws acm certificate status data:config ${c7n_output}= RW.CLI.Run Cli @@ -183,7 +183,7 @@ List Failed Status ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account RW.Core.Add Pre To Report No ACM certificates in failed status found in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_ID}` END -List Pending Validation ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Pending Validation ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find pending validation ACM certificates [Tags] aws acm certificate status data:config ${c7n_output}= RW.CLI.Run Cli diff --git a/codebundles/aws-c7n-acm-health/sli.robot b/codebundles/aws-c7n-acm-health/sli.robot index 0e98b21..e6dd165 100644 --- a/codebundles/aws-c7n-acm-health/sli.robot +++ b/codebundles/aws-c7n-acm-health/sli.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -Check for unused ACM certificates in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for unused ACM certificates in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find unused ACM certificates [Tags] aws acm certificate security data:config ${c7n_output}= RW.CLI.Run Cli @@ -23,7 +23,7 @@ Check for unused ACM certificates in AWS Region `${AWS_REGION}` in AWS account ` ${unused_certificate_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_UNUSED_CERTIFICATES}) else 0 Set Global Variable ${unused_certificate_score} -Check for Expiring ACM certificates in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for Expiring ACM certificates in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find Expiring ACM certificates [Tags] aws acm certificate expiration data:config CloudCustodian.Core.Generate Policy @@ -37,7 +37,7 @@ Check for Expiring ACM certificates in AWS Region `${AWS_REGION}` in AWS account ${expiring_certificate_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_EXPIRING_CERTIFICATES}) else 0 Set Global Variable ${expiring_certificate_score} -Check for expired ACM certificates in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for expired ACM certificates in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find expired ACM certificates [Tags] aws acm certificate expiration data:config ${c7n_output}= RW.CLI.Run Cli @@ -48,7 +48,7 @@ Check for expired ACM certificates in AWS Region `${AWS_REGION}` in AWS account ${expired_certificate_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_EXPIRED_CERTIFICATES}) else 0 Set Global Variable ${expired_certificate_score} -Check for Failed Status ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: Check for Failed Status ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find failed status ACM certificates [Tags] aws acm certificate status data:config ${c7n_output}= RW.CLI.Run Cli @@ -59,7 +59,7 @@ Check for Failed Status ACM Certificates in AWS Region `${AWS_REGION}` in AWS Ac ${failed_certificate_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_FAILED_CERTIFICATES}) else 0 Set Global Variable ${failed_certificate_score} -Check for Pending Validation ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: Check for Pending Validation ACM Certificates in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find pending validation ACM certificates [Tags] aws acm certificate validation data:config ${c7n_output}= RW.CLI.Run Cli @@ -70,7 +70,7 @@ Check for Pending Validation ACM Certificates in AWS Region `${AWS_REGION}` in A ${pending_validation_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_PENDING_VALIDATION_CERTIFICATES}) else 0 Set Global Variable ${pending_validation_score} -Generate Health Score +Improve: Generate Health Score ${health_score}= Evaluate (${unused_certificate_score} + ${expiring_certificate_score} + ${expired_certificate_score} + ${failed_certificate_score} + ${pending_validation_score}) / 5 ${health_score}= Convert to Number ${health_score} 2 RW.Core.Push Metric ${health_score} diff --git a/codebundles/aws-c7n-ebs-health/runbook.robot b/codebundles/aws-c7n-ebs-health/runbook.robot index e4f6cd1..c6fa571 100644 --- a/codebundles/aws-c7n-ebs-health/runbook.robot +++ b/codebundles/aws-c7n-ebs-health/runbook.robot @@ -14,7 +14,7 @@ Suite Setup Suite Initialization *** Tasks *** -List Unattached EBS Volumes in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List Unattached EBS Volumes in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check for unattached EBS volumes in the specified region. [Tags] ebs storage aws volume unattached data:config ${c7n_output}= RW.CLI.Run Cli @@ -51,7 +51,7 @@ List Unattached EBS Volumes in AWS Region `${AWS_REGION}` in AWS account `${AWS_ END -List Unencrypted EBS Volumes in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List Unencrypted EBS Volumes in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check for Unencrypted EBS Volumes in the specified region. [Tags] ebs storage aws volume encryption data:config ${c7n_output}= RW.CLI.Run Cli @@ -88,7 +88,7 @@ List Unencrypted EBS Volumes in AWS Region `${AWS_REGION}` in AWS account `${AWS END -List Unused EBS Snapshots in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List Unused EBS Snapshots in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check for Unused EBS Snapshots in the specified region. [Tags] ebs storage aws volume unused data:config ${c7n_output}= RW.CLI.Run Cli diff --git a/codebundles/aws-c7n-ebs-health/sli.robot b/codebundles/aws-c7n-ebs-health/sli.robot index 5ac2b0d..67259c7 100644 --- a/codebundles/aws-c7n-ebs-health/sli.robot +++ b/codebundles/aws-c7n-ebs-health/sli.robot @@ -11,7 +11,7 @@ Library RW.CLI Suite Setup Suite Initialization *** Tasks *** -Check Unattached EBS Volumes in `${AWS_REGION}` +Improve: Check Unattached EBS Volumes in `${AWS_REGION}` [Documentation] Check for unattached EBS volumes in the specified region. [Tags] ebs storage aws volume data:config ${c7n_output}= RW.CLI.Run Cli @@ -22,7 +22,7 @@ Check Unattached EBS Volumes in `${AWS_REGION}` ${unattached_ebs_event_score}= Evaluate 1 if int(${count.stdout}) <= int(${EVENT_THRESHOLD}) else 0 Set Global Variable ${unattached_ebs_event_score} -Check Unencrypted EBS Volumes in `${AWS_REGION}` +Improve: Check Unencrypted EBS Volumes in `${AWS_REGION}` [Documentation] Check for unencrypted EBS volumes and report any found that do not meet encryption requirements. [Tags] ebs storage aws security volume data:config ${c7n_output}= RW.CLI.Run Cli @@ -34,7 +34,7 @@ Check Unencrypted EBS Volumes in `${AWS_REGION}` Set Global Variable ${unencrypted_ebs_event_score} -Check Unused EBS Snapshots in `${AWS_REGION}` +Improve: Check Unused EBS Snapshots in `${AWS_REGION}` [Documentation] Check for unused EBS snapshots. [Tags] ebs storage aws snapshots volume data:config ${c7n_output}= RW.CLI.Run Cli @@ -46,7 +46,7 @@ Check Unused EBS Snapshots in `${AWS_REGION}` Set Global Variable ${unsued_ebs_snapshot_event_score} -Generate EBS Score +Improve: Generate EBS Score ${ebs_health_score}= Evaluate (${unattached_ebs_event_score} + ${unencrypted_ebs_event_score} + ${unsued_ebs_snapshot_event_score}) / 3 ${health_score}= Convert to Number ${ebs_health_score} 2 RW.Core.Push Metric ${health_score} diff --git a/codebundles/aws-c7n-ec2-health/runbook.robot b/codebundles/aws-c7n-ec2-health/runbook.robot index 95a940e..8af89da 100644 --- a/codebundles/aws-c7n-ec2-health/runbook.robot +++ b/codebundles/aws-c7n-ec2-health/runbook.robot @@ -13,9 +13,9 @@ Suite Setup Suite Initialization *** Tasks *** -List stale AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List stale AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] List stale EC2 instances in AWS Region. - [Tags] ec2 instance aws compute stale data:config + [Tags] ec2 instance aws compute stale data:config access:read-only # Generate the Cloud Custodian policy ${result}= CloudCustodian.Core.Generate Policy @@ -65,9 +65,9 @@ List stale AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS RW.Core.Add Pre To Report ${ec2_instances_list_length} stale instances found, below threshold of ${MAX_ALLOWED_STALE_INSTANCES}\n${report_data.stdout} END -List stopped AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List stopped AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] List stopped EC2 instances in AWS Region. - [Tags] ec2 instance aws compute data:config + [Tags] ec2 instance aws compute data:config access:read-only # Generate the Cloud Custodian policy ${result}= CloudCustodian.Core.Generate Policy @@ -118,9 +118,9 @@ List stopped AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${A RW.Core.Add Pre To Report ${ec2_instances_list_length} stopped instances found, below threshold of ${MAX_ALLOWED_STOPPED_INSTANCES}\n${report_data.stdout} END -List invalid AWS Auto Scaling Groups in AWS Region ${AWS_REGION} in AWS account ${AWS_ACCOUNT_NAME} +Improve: List invalid AWS Auto Scaling Groups in AWS Region ${AWS_REGION} in AWS account ${AWS_ACCOUNT_NAME} [Documentation] List invalid Auto Scaling Groups - [Tags] asg aws compute asg data:config + [Tags] asg aws compute asg data:config access:read-only # Run the Cloud Custodian policy ${c7n_output}= RW.CLI.Run Cli diff --git a/codebundles/aws-c7n-ec2-health/sli.robot b/codebundles/aws-c7n-ec2-health/sli.robot index 8c116d7..9cce7fc 100644 --- a/codebundles/aws-c7n-ec2-health/sli.robot +++ b/codebundles/aws-c7n-ec2-health/sli.robot @@ -13,7 +13,7 @@ Suite Setup Suite Initialization *** Tasks *** -Check for stale AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for stale AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check for stale EC2 instances in AWS Region. [Tags] ec2 instance aws compute data:config ${result}= CloudCustodian.Core.Generate Policy @@ -28,7 +28,7 @@ Check for stale AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account ` ${stale_ec2_instances_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_ALLOWED_STALE_INSTANCES}) else 0 Set Global Variable ${stale_ec2_instances_score} -Check for stopped AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for stopped AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check for stopped EC2 instances in AWS Region. [Tags] ec2 instance aws compute data:config ${result}= CloudCustodian.Core.Generate Policy @@ -43,7 +43,7 @@ Check for stopped AWS EC2 instances in AWS Region `${AWS_REGION}` in AWS account ${stopped_ec2_instances_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_ALLOWED_STOPPED_INSTANCES}) else 0 Set Global Variable ${stopped_ec2_instances_score} -Check for invalid AWS Auto Scaling Groups in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for invalid AWS Auto Scaling Groups in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check for invalid Auto Scaling Groups. [Tags] asg aws compute data:config ${c7n_output}= RW.CLI.Run Cli @@ -54,7 +54,7 @@ Check for invalid AWS Auto Scaling Groups in AWS Region `${AWS_REGION}` in AWS a ${invalid_asg_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_ALLOWED_INVALID_ASG}) else 0 Set Global Variable ${invalid_asg_score} -Generate Health Score +Improve: Generate Health Score ${health_score}= Evaluate (${stale_ec2_instances_score} + ${stopped_ec2_instances_score} + ${invalid_asg_score}) / 3 ${health_score}= Convert to Number ${health_score} 2 RW.Core.Push Metric ${health_score} diff --git a/codebundles/aws-c7n-monitoring-health/runbook.robot b/codebundles/aws-c7n-monitoring-health/runbook.robot index 64af076..4c48cb6 100644 --- a/codebundles/aws-c7n-monitoring-health/runbook.robot +++ b/codebundles/aws-c7n-monitoring-health/runbook.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -List CloudWatch Log Groups Without Retention Period in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List CloudWatch Log Groups Without Retention Period in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] List CloudWatch Log Groups Without Retention Period [Tags] aws cloudwatch logs data:config ${c7n_output}= RW.CLI.Run Cli @@ -51,7 +51,7 @@ List CloudWatch Log Groups Without Retention Period in AWS Region `${AWS_REGION} RW.Core.Add Pre To Report "No CloudWatch Log Groups without retention period found in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_ID}`" END -Check CloudTrail Configuration in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: Check CloudTrail Configuration in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Check if CloudTrail exists and is configured for multi-region [Tags] aws cloudtrail data:config ${c7n_output}= RW.CLI.Run Cli @@ -115,7 +115,7 @@ Check CloudTrail Configuration in AWS Region `${AWS_REGION}` in AWS Account `${A END END - Check for CloudTrail integration with CloudWatch Logs in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` + Improve: Check for CloudTrail integration with CloudWatch Logs in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Check for CloudTrail integration with CloudWatch Logs [Tags] aws cloudtrail cloudwatch logs data:config ${c7n_output}= RW.CLI.Run Cli diff --git a/codebundles/aws-c7n-monitoring-health/sli.robot b/codebundles/aws-c7n-monitoring-health/sli.robot index 21d5370..9887496 100644 --- a/codebundles/aws-c7n-monitoring-health/sli.robot +++ b/codebundles/aws-c7n-monitoring-health/sli.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -Check CloudWatch Log Groups Without Retention Period in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check CloudWatch Log Groups Without Retention Period in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Check CloudWatch Log Groups without retention period [Tags] aws cloudwatch logs data:config ${c7n_output}= RW.CLI.Run Cli @@ -23,7 +23,7 @@ Check CloudWatch Log Groups Without Retention Period in AWS Region `${AWS_REGION ${no_retention_score}= Evaluate 1 if int(${count.stdout}) <= int(${MAX_LOG_GROUPS_ALLOWED}) else 0 Set Global Variable ${no_retention_score} -Check if CloudTrail exists and is configured for multi-region in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: Check if CloudTrail exists and is configured for multi-region in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Check if CloudTrail exists and is configured for multi-region [Tags] aws cloudtrail logs data:config ${c7n_output}= RW.CLI.Run Cli @@ -65,8 +65,8 @@ Check if CloudTrail exists and is configured for multi-region in AWS Region `${A Set Global Variable ${cloudtrail_score} END -Check CloudTrail Without CloudWatch Logs in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` - [Documentation] Check if CloudTrail exists and is configured for multi-region in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: Check CloudTrail Without CloudWatch Logs in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` + [Documentation] Improve: Check if CloudTrail exists and is configured for multi-region in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Tags] aws cloudtrail cloudwatch logs data:config ${c7n_output}= RW.CLI.Run Cli ... cmd=custodian run -r ${AWS_REGION} --output-dir ${OUTPUT_DIR}/aws-c7n-monitoring-health ${CURDIR}/trail-without-cloudwatch-logs.yaml --cache-period 0 @@ -85,7 +85,7 @@ Check CloudTrail Without CloudWatch Logs in AWS Region `${AWS_REGION}` in AWS Ac ${cloudtrail_trails_without_cloudwatch_score}= Evaluate 1 if len(@{trails_without_cloudwatch}) <= int(${MAX_CLOUDTRAIL_TRAILS_WITHOUT_CLOUDWATCH_LOGS_ALLOWED}) else 0 Set Global Variable ${cloudtrail_trails_without_cloudwatch_score} -Generate Health Score +Improve: Generate Health Score ${health_score}= Evaluate (${no_retention_score} + ${cloudtrail_score} + ${cloudtrail_trails_without_cloudwatch_score}) / 3 ${health_score}= Convert to Number ${health_score} 2 RW.Core.Push Metric ${health_score} diff --git a/codebundles/aws-c7n-network-health/runbook.robot b/codebundles/aws-c7n-network-health/runbook.robot index 007e0b3..8a7978c 100644 --- a/codebundles/aws-c7n-network-health/runbook.robot +++ b/codebundles/aws-c7n-network-health/runbook.robot @@ -13,7 +13,7 @@ Suite Setup Suite Initialization *** Tasks *** -List Publicly Accessible Security Groups in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List Publicly Accessible Security Groups in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find publicly accessible security groups (e.g., "0.0.0.0/0" or "::/0") [Tags] tag aws security-group network data:config CloudCustodian.Core.Generate Policy @@ -54,7 +54,7 @@ List Publicly Accessible Security Groups in AWS account `${AWS_ACCOUNT_NAME}` END END -List unused Elastic IPs in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List unused Elastic IPs in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find unused Elastic IPs that are not associated with any instance or network interface [Tags] aws eip network data:config FOR ${region} IN @{AWS_ENABLED_REGIONS} @@ -94,7 +94,7 @@ List unused Elastic IPs in AWS account `${AWS_ACCOUNT_NAME}` END END -List unused ELBs in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List unused ELBs in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find unused Application Load Balancers (ALBs) and Network Load Balancers (NLBs) that do not have any associated targets [Tags] aws elb network data:config FOR ${region} IN @{AWS_ENABLED_REGIONS} @@ -134,7 +134,7 @@ List unused ELBs in AWS account `${AWS_ACCOUNT_NAME}` END END -List VPCs with Flow Logs Disabled in AWS account `${AWS_ACCOUNT_NAME}` +Improve: List VPCs with Flow Logs Disabled in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find VPCs that do not have flow logs enabled [Tags] aws vpc network data:config CloudCustodian.Core.Generate Policy diff --git a/codebundles/aws-c7n-network-health/sli.robot b/codebundles/aws-c7n-network-health/sli.robot index d6a4d25..75f33fd 100644 --- a/codebundles/aws-c7n-network-health/sli.robot +++ b/codebundles/aws-c7n-network-health/sli.robot @@ -13,7 +13,7 @@ Suite Setup Suite Initialization *** Tasks *** -Check for publicly accessible security groups in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for publicly accessible security groups in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find publicly accessible security groups (e.g., "0.0.0.0/0" or "::/0") [Tags] aws security-group network data:config CloudCustodian.Core.Generate Policy @@ -32,7 +32,7 @@ Check for publicly accessible security groups in AWS account `${AWS_ACCOUNT_NAME Set Global Variable ${public_ip_access_score} -Check for unused Elastic IPs in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for unused Elastic IPs in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find unused Elastic IPs that are not associated with any instance or network interface [Tags] aws eip network data:config ${total_count}= Set Variable 0 @@ -47,7 +47,7 @@ Check for unused Elastic IPs in AWS account `${AWS_ACCOUNT_NAME}` ${unattached_eip_score}= Evaluate 1 if ${total_count} <= int(${MAX_ALLOWED_UNUSED_RESOURCES}) else 0 Set Global Variable ${unattached_eip_score} -Check for unused ELBs in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for unused ELBs in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find unused Application Load Balancers (ALBs) and Network Load Balancers (NLBs) that do not have any associated targets [Tags] aws elb network data:config ${total_count}= Set Variable 0 @@ -62,7 +62,7 @@ Check for unused ELBs in AWS account `${AWS_ACCOUNT_NAME}` ${unused_elb_score}= Evaluate 1 if ${total_count} <= int(${MAX_ALLOWED_UNUSED_RESOURCES}) else 0 Set Global Variable ${unused_elb_score} -Check for VPCs with Flow Logs disabled in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for VPCs with Flow Logs disabled in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find VPCs that do not have Flow Logs enabled [Tags] aws vpc network data:config CloudCustodian.Core.Generate Policy @@ -80,7 +80,7 @@ Check for VPCs with Flow Logs disabled in AWS account `${AWS_ACCOUNT_NAME}` ${flow_log_disabled_vpc_score}= Evaluate 1 if ${total_count} <= int(${DISABLED_FLOW_LOG_THRESHOLD}) else 0 Set Global Variable ${flow_log_disabled_vpc_score} -Generate Health Score +Improve: Generate Health Score ${health_score}= Evaluate (${public_ip_access_score} + ${unattached_eip_score} + ${unused_elb_score} + ${flow_log_disabled_vpc_score}) / 4 ${health_score}= Convert to Number ${health_score} 2 RW.Core.Push Metric ${health_score} diff --git a/codebundles/aws-c7n-rds-health/runbook.robot b/codebundles/aws-c7n-rds-health/runbook.robot index 48482c1..43b436a 100644 --- a/codebundles/aws-c7n-rds-health/runbook.robot +++ b/codebundles/aws-c7n-rds-health/runbook.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -List Unencrypted RDS Instances in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Unencrypted RDS Instances in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find unencrypted RDS instances [Tags] aws rds database encryption data:config ${c7n_output}= RW.CLI.Run Cli @@ -49,7 +49,7 @@ List Unencrypted RDS Instances in AWS Region `${AWS_REGION}` in AWS Account `${A END -List Publicly Accessible RDS Instances in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List Publicly Accessible RDS Instances in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Find publicly accessible RDS instances [Tags] aws rds database security data:config ${c7n_output}= RW.CLI.Run Cli @@ -85,7 +85,7 @@ List Publicly Accessible RDS Instances in AWS Region `${AWS_REGION}` in AWS Acco END END -List RDS Instances with Backups Disabled in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List RDS Instances with Backups Disabled in AWS Region `${AWS_REGION}` in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Identify RDS instances with backups disabled [Tags] aws rds database backups data:config ${c7n_output}= RW.CLI.Run Cli diff --git a/codebundles/aws-c7n-rds-health/sli.robot b/codebundles/aws-c7n-rds-health/sli.robot index e44f8e9..3c5eb90 100644 --- a/codebundles/aws-c7n-rds-health/sli.robot +++ b/codebundles/aws-c7n-rds-health/sli.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -Check for unencrypted RDS instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for unencrypted RDS instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find unencrypted RDS instances [Tags] aws rds database encryption data:config ${c7n_output}= RW.CLI.Run Cli @@ -23,7 +23,7 @@ Check for unencrypted RDS instances in AWS Region `${AWS_REGION}` in AWS account ${unencrypted_rds_score}= Evaluate 1 if int(${count.stdout}) <= int(${EVENT_THRESHOLD}) else 0 Set Global Variable ${unencrypted_rds_score} -Check for publicly accessible RDS instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for publicly accessible RDS instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find publicly accessible RDS instances [Tags] aws rds database security data:config ${c7n_output}= RW.CLI.Run Cli @@ -34,7 +34,7 @@ Check for publicly accessible RDS instances in AWS Region `${AWS_REGION}` in AWS ${publicly_accessible_rds_score}= Evaluate 1 if int(${count.stdout}) <= int(${EVENT_THRESHOLD}) else 0 Set Global Variable ${publicly_accessible_rds_score} -Check for disabled backup RDS instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` +Improve: Check for disabled backup RDS instances in AWS Region `${AWS_REGION}` in AWS account `${AWS_ACCOUNT_NAME}` [Documentation] Find RDS instances with backups disabled [Tags] aws rds database backups data:config ${c7n_output}= RW.CLI.Run Cli @@ -46,7 +46,7 @@ Check for disabled backup RDS instances in AWS Region `${AWS_REGION}` in AWS acc Set Global Variable ${backup_disabled_rds_score} -Generate Health Score +Improve: Generate Health Score ${health_score}= Evaluate (${unencrypted_rds_score} + ${publicly_accessible_rds_score} + ${backup_disabled_rds_score}) / 3 ${health_score}= Convert to Number ${health_score} 2 RW.Core.Push Metric ${health_score} diff --git a/codebundles/aws-c7n-s3-health/runbook.robot b/codebundles/aws-c7n-s3-health/runbook.robot index 081474e..9206d9c 100644 --- a/codebundles/aws-c7n-s3-health/runbook.robot +++ b/codebundles/aws-c7n-s3-health/runbook.robot @@ -12,7 +12,7 @@ Library CloudCustodian.Core Suite Setup Suite Initialization *** Tasks *** -List S3 Buckets With Public Access in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: List S3 Buckets With Public Access in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Fetch total number of S3 buckets with public access enabled and raises an issue if any exist. [Tags] s3 storage aws security data:config ${c7n_output}= RW.CLI.Run Cli diff --git a/codebundles/aws-c7n-s3-health/sli.robot b/codebundles/aws-c7n-s3-health/sli.robot index b9e616f..10d5e9e 100644 --- a/codebundles/aws-c7n-s3-health/sli.robot +++ b/codebundles/aws-c7n-s3-health/sli.robot @@ -11,7 +11,7 @@ Library RW.CLI Suite Setup Suite Initialization *** Tasks *** -Count S3 Buckets With Public Access in AWS Account `${AWS_ACCOUNT_NAME}` +Improve: Count S3 Buckets With Public Access in AWS Account `${AWS_ACCOUNT_NAME}` [Documentation] Fetch total number of S3 buckets with public access enabled. [Tags] s3 storage aws security data:config ${c7n_output}= RW.CLI.Run Cli