diff --git a/.github/workflows/ansible-deploy-bonus.yml b/.github/workflows/ansible-deploy-bonus.yml new file mode 100644 index 0000000000..265ce22b59 --- /dev/null +++ b/.github/workflows/ansible-deploy-bonus.yml @@ -0,0 +1,53 @@ +--- +name: "Ansible - Deploy Go App" + +on: + push: + branches: [main, master, lab06] + paths: + - 'ansible/vars/app_bonus.yml' + - 'ansible/playbooks/deploy_bonus.yml' + - 'ansible/roles/web_app/**' + - '.github/workflows/ansible-deploy-bonus.yml' + pull_request: + branches: [main, master] + paths: + - 'ansible/vars/app_bonus.yml' + - 'ansible/playbooks/deploy_bonus.yml' + - 'ansible/roles/web_app/**' + +jobs: + lint: + name: "Ansible Lint - Bonus" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install ansible and ansible-lint + run: pip install ansible ansible-lint + + - name: Run ansible-lint + run: | + cd ansible + ansible-lint playbooks/deploy_bonus.yml + + deploy: + name: "Deploy Bonus App" + needs: lint + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + + - name: Run deploy playbook + run: | + cd ansible + ansible-playbook playbooks/deploy_bonus.yml + + - name: Verify bonus app health + run: | + sleep 5 + curl -f http://localhost:8001/health diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml new file mode 100644 index 0000000000..75268e780a --- /dev/null +++ b/.github/workflows/ansible-deploy.yml @@ -0,0 +1,53 @@ +--- +name: "Ansible - Deploy Python App" + +on: + push: + branches: [main, master, lab06] + paths: + - 'ansible/vars/app_python.yml' + - 'ansible/playbooks/deploy_python.yml' + - 'ansible/roles/web_app/**' + - '.github/workflows/ansible-deploy.yml' + pull_request: + branches: [main, master] + paths: + - 'ansible/vars/app_python.yml' + - 'ansible/playbooks/deploy_python.yml' + - 'ansible/roles/web_app/**' + +jobs: + lint: + name: "Ansible Lint" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install ansible and ansible-lint + run: pip install ansible ansible-lint + + - name: Run ansible-lint + run: | + cd ansible + ansible-lint playbooks/deploy_python.yml + + deploy: + name: "Deploy Python App" + needs: lint + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + + - name: Run deploy playbook + run: | + cd ansible + ansible-playbook playbooks/deploy_python.yml + + - name: Verify python app health + run: | + sleep 5 + curl -f http://localhost:8000/health diff --git a/ansible/.ansible-lint b/ansible/.ansible-lint new file mode 100644 index 0000000000..f8afc581f8 --- /dev/null +++ b/ansible/.ansible-lint @@ -0,0 +1,4 @@ +--- +profile: basic +skip_list: + - var-naming # web_app role uses shared variables intentionally for reusability diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index 6f38a00421..c3a1ffdfb0 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -1,12 +1,10 @@ [defaults] -inventory = inventory/hosts.ini -roles_path = roles -host_key_checking = False -remote_user = vagrant +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False retry_files_enabled = False -deprecation_warnings = False +stdout_callback = yaml +collections_paths = ~/.ansible/collections -[privilege_escalation] -become = True -become_method = sudo -become_user = root +[ssh_connection] +pipelining = True diff --git a/ansible/docs/LAB06.md b/ansible/docs/LAB06.md new file mode 100644 index 0000000000..e60a238acd --- /dev/null +++ b/ansible/docs/LAB06.md @@ -0,0 +1,648 @@ +# Lab 6: Advanced Ansible & CI/CD + +[![Ansible - Deploy Python App](https://github.com/3llimi/DevOps-Core-Course/actions/workflows/ansible-deploy.yml/badge.svg)](https://github.com/3llimi/DevOps-Core-Course/actions/workflows/ansible-deploy.yml) +[![Ansible - Deploy Go App](https://github.com/3llimi/DevOps-Core-Course/actions/workflows/ansible-deploy-bonus.yml/badge.svg)](https://github.com/3llimi/DevOps-Core-Course/actions/workflows/ansible-deploy-bonus.yml) + +--- + +## Task 1: Blocks & Tags (2 pts) + +### Overview + +All three roles were refactored to group related tasks inside `block:` sections. Each block has a `rescue:` section for error recovery and an `always:` section for post-execution logging. `become: true` and tag assignments were moved to the block level instead of being repeated on each individual task. + +### Tag Strategy + +| Tag | Role | Purpose | +|-----|------|---------| +| `common` | common | Entire common role | +| `packages` | common | Package installation block only | +| `users` | common | User management block only | +| `docker` | docker | Entire docker role | +| `docker_install` | docker | GPG key + packages only | +| `docker_config` | docker | daemon.json + group config only | +| `web_app_wipe` | web_app | Destructive cleanup only | +| `app_deploy` | web_app | Deployment block only | +| `compose` | web_app | Alias for compose tasks | + +### common role — roles/common/tasks/main.yml + +**Block 1 — Package installation (tags: `packages`, `common`)** +- Updates apt cache with `cache_valid_time: 3600` to avoid redundant updates +- Installs all packages from `common_packages` list +- `rescue:` uses `ansible.builtin.apt` with `force_apt_get: true` instead of raw `apt-get` command (lint compliance) +- `always:` writes a completion timestamp to `/tmp/ansible_common_complete.log` +- `become: true` applied once at block level + +**Block 2 — User management (tags: `users`, `common`)** +- Ensures `vagrant` user is in the `docker` group +- `rescue:` prints a diagnostic message if the docker group doesn't exist yet +- `always:` runs `id vagrant` and reports current group membership + +### docker role — roles/docker/tasks/main.yml + +**Block 1 — Docker installation (tags: `docker_install`, `docker`)** +- Creates `/etc/apt/keyrings` directory +- Downloads Docker GPG key with `force: false` — skips download if key already present (idempotent) +- Adds Docker APT repository +- Installs Docker packages +- `rescue:` waits 10 seconds then force-retries GPG key download (handles network timeouts) +- `always:` ensures Docker service is enabled and started with `failed_when: false` + +**Block 2 — Docker configuration (tags: `docker_config`, `docker`)** +- Writes `/etc/docker/daemon.json` with json-file log driver and size limits +- Notifies `Restart Docker` handler — handler only fires when file actually changed +- Adds vagrant user to docker group +- Installs Python Docker SDK via pip3 +- `rescue:` prints diagnostic on failure +- `always:` runs `docker info` and reports daemon status + +### Execution Examples + +```bash +# List all available tags +ansible-playbook playbooks/provision.yml --list-tags +# Output: +# TASK TAGS: [common, docker, docker_config, docker_install, packages, users] + +# Run only docker tasks — common role skipped entirely +ansible-playbook playbooks/provision.yml --tags docker + +# Run only package installation +ansible-playbook playbooks/provision.yml --tags packages + +# Skip common role +ansible-playbook playbooks/provision.yml --skip-tags common + +# Dry-run docker tasks +ansible-playbook playbooks/provision.yml --tags docker --check +``` + +### Selective Execution Evidence + +Running `--tags docker` produced 12 tasks — only docker role tasks, common role completely absent: +``` +PLAY RECAP +localhost : ok=12 changed=0 unreachable=0 failed=0 +``` + +Running `--tags packages` produced 4 tasks — only the package block from common: +``` +PLAY RECAP +localhost : ok=4 changed=0 unreachable=0 failed=0 +``` + +### Research Answers + +**Q: What happens if the rescue block also fails?** +Ansible marks the host as FAILED and adds it to the `failed` count in PLAY RECAP. The `always:` block still runs regardless. If the rescue failure is acceptable, `ignore_errors: true` can be added to rescue tasks. + +**Q: Can you have nested blocks?** +Yes. A task inside a `block:` can itself be another `block:` with its own `rescue:` and `always:`. Each block's rescue only handles failures from its own scope. + +**Q: How do tags inherit to tasks within blocks?** +Tags applied to a block are inherited by all tasks inside it — individual tasks don't need their own tag annotations. If a task inside the block also has its own tags, it receives both sets (union). `always:` tasks inside a block also inherit the block's tags. + +--- + +## Task 2: Docker Compose (3 pts) + +### Role Rename + +`app_deploy` was renamed to `web_app`: +```bash +# New structure under roles/web_app/ +roles/web_app/ +├── defaults/main.yml +├── handlers/main.yml +├── meta/main.yml +├── tasks/main.yml +├── tasks/wipe.yml +└── templates/docker-compose.yml.j2 +``` + +The name `web_app` is more specific and descriptive — it distinguishes from potential future `db_app` or `cache_app` roles, and aligns with the `web_app_wipe` variable naming convention. + +### Docker Compose Template — roles/web_app/templates/docker-compose.yml.j2 + +The template uses Jinja2 variable substitution for all dynamic values: + +```jinja2 +version: '{{ docker_compose_version }}' + +services: + {{ app_name }}: + image: {{ docker_image }}:{{ docker_tag }} + container_name: {{ app_name }} + ports: + - "{{ app_port }}:{{ app_internal_port }}" + environment: + APP_ENV: production + APP_PORT: "{{ app_internal_port }}" + SECRET_KEY: "{{ app_secret_key }}" + restart: unless-stopped + networks: + - app_network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:{{ app_internal_port }}/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 15s + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + +networks: + app_network: + driver: bridge +``` + +**Variables supported:** + +| Variable | Default | Purpose | +|----------|---------|---------| +| `app_name` | devops-app | Service and container name | +| `docker_image` | 3llimi/devops-info-service | Docker Hub image | +| `docker_tag` | latest | Image version | +| `app_port` | 8000 | Host-side port | +| `app_internal_port` | 8000 | Container listening port | +| `app_secret_key` | placeholder | Injected as SECRET_KEY env var | +| `docker_compose_version` | 3.8 | Compose file format version | + +### Role Dependencies — roles/web_app/meta/main.yml + +```yaml +dependencies: + - role: docker +``` + +Declaring `docker` as a dependency means Ansible automatically runs the docker role before `web_app` — even when calling `deploy.yml` which only lists `web_app`. This prevents "docker compose not found" errors and removes the need to manually order roles in every playbook. + +**Evidence — running `deploy.yml` (only lists web_app) automatically ran docker first:** +``` +TASK [docker : Create /etc/apt/keyrings directory] ok: [localhost] +TASK [docker : Download Docker GPG key] ok: [localhost] +... +TASK [web_app : Deploy application with Docker Compose] changed: [localhost] +``` + +### Deployment Tasks — roles/web_app/tasks/main.yml + +The deployment block: +1. Creates `/opt/{{ app_name }}` directory +2. Templates `docker-compose.yml` from Jinja2 template +3. Pulls Docker image (`changed_when` based on actual pull output) +4. Runs `docker compose up --detach --remove-orphans` +5. Waits for `/health` endpoint to return 200 +6. `rescue:` shows container logs on failure +7. `always:` shows `docker ps` output regardless of outcome + +### Idempotency Verification + +**First run:** +``` +TASK [web_app : Template docker-compose.yml] changed: [localhost] +TASK [web_app : Deploy with Docker Compose] changed: [localhost] +PLAY RECAP: ok=21 changed=4 failed=0 +``` + +**Second run (no config changes):** +``` +TASK [web_app : Template docker-compose.yml] ok: [localhost] +TASK [web_app : Deploy with Docker Compose] ok: [localhost] +PLAY RECAP: ok=21 changed=0 failed=0 +``` + +The `template` module only marks changed when rendered content differs from what's on disk. `changed_when` on the compose command ensures "changed" is only reported when Docker actually recreated a container. + +### Application Verification + +```bash +$ curl http://localhost:8000/health +{"status":"healthy","timestamp":"2026-02-22T12:25:40.976379+00:00","uptime_seconds":80} + +$ docker ps +CONTAINER ID IMAGE STATUS PORTS +71a88aec2ef9 3llimi/devops-info-service:latest Up 2 minutes 0.0.0.0:8000->8000/tcp + +$ cat /opt/devops-python/docker-compose.yml +version: '3.8' +services: + devops-python: + image: 3llimi/devops-info-service:latest + container_name: devops-python + ports: + - "8000:8000" + ... +``` + +### Research Answers + +**Q: `restart: always` vs `restart: unless-stopped`?** +`always` restarts the container unconditionally — including after a deliberate `docker compose stop`. This can be disruptive during maintenance. `unless-stopped` restarts after host reboots and Docker daemon restarts, but respects a deliberate manual stop — making it the better production choice. + +**Q: How do Docker Compose networks differ from Docker bridge networks?** +Docker Compose creates a project-scoped named bridge network (e.g., `devops-python_app_network`). Containers on it can reach each other by service name via DNS. The default `docker0` bridge uses only IP addresses — no DNS. Compose networks are also isolated from other Compose projects by default, improving security. + +**Q: Can you reference Ansible Vault variables in the template?** +Yes. Vault variables are decrypted in memory at playbook runtime. The template module renders the template with decrypted values and copies the result to the target. The plain-text value exists only in memory — it is never written to disk except as the final rendered compose file (protected by mode `0640`). + +--- + +## Task 3: Wipe Logic (1 pt) + +### Implementation + +**Gate 1 — Variable** (`roles/web_app/defaults/main.yml`): +```yaml +web_app_wipe: false # Safe default — never wipes unless explicitly set +``` + +**Gate 2 — Tag** (`roles/web_app/tasks/main.yml`): +```yaml +- name: Include wipe tasks + ansible.builtin.include_tasks: wipe.yml + tags: + - web_app_wipe # File only loads when --tags web_app_wipe is passed +``` + +**Wipe block** (`roles/web_app/tasks/wipe.yml`): +```yaml +- name: Wipe application + when: web_app_wipe | bool # Gate 1: skips if variable is false + become: true + tags: + - web_app_wipe + block: + - name: "[WIPE] Stop and remove containers" + ansible.builtin.command: docker compose ... down --remove-orphans + changed_when: true + failed_when: false # Safe if directory doesn't exist + + - name: "[WIPE] Remove application directory" + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: absent + + - name: "[WIPE] Remove Docker image" + ansible.builtin.command: docker rmi {{ docker_image }}:{{ docker_tag }} + changed_when: true + failed_when: false # Safe if image not present locally +``` +### Research Answers + +**Q: Why use both variable AND tag?** + +Using only the variable: someone accidentally passing `-e "web_app_wipe=true"` while testing another variable would destroy production. The tag requirement forces a second deliberate action — you must explicitly type `--tags web_app_wipe`. + +Using only the tag: someone might not realise the tag is destructive. The variable provides a human-readable intention signal visible in code review. + +Together they form a "break glass" mechanism — two independent explicit actions required before anything is deleted. + +**Q: What's the difference between `never` tag and this approach?** + +The `never` tag is a special Ansible built-in that means "skip unless explicitly requested with `--tags never`". The lab forbids it for two reasons: +1. Less readable — intent is not obvious from the name +2. Cannot be controlled from CI/CD pipelines via `-e` variables from secrets — harder to automate controlled wipes + +The variable + tag approach is more flexible, readable, and pipeline-friendly. + +**Q: Why must wipe logic come BEFORE deployment in main.yml?** + +Wipe is included before the deployment block to enable the clean reinstall use case: +```bash +ansible-playbook deploy.yml -e "web_app_wipe=true" +``` +If deploy came first, the new container would start and then be immediately destroyed. With wipe first: old installation removed → new installation deployed → clean state achieved. + +**Q: How would you extend this to wipe Docker images and volumes too?** + +Images are already wiped with `docker rmi {{ docker_image }}:{{ docker_tag }}`. To also wipe volumes, add: +```yaml +- name: "[WIPE] Remove Docker volumes" + ansible.builtin.command: > + docker compose -f {{ compose_project_dir }}/docker-compose.yml + down --volumes + failed_when: false +``` +This removes named volumes defined in the compose file. For anonymous volumes, `docker volume prune -f` cleans up dangling volumes after containers are removed. + +**Q: When would you want clean reinstallation vs. rolling update?** + +Clean reinstallation is appropriate when: configuration has changed significantly (environment variables, volume mounts, network settings), the container is in a broken state that `docker compose up` cannot recover from, or during major version upgrades where old state could cause conflicts. + +Rolling updates are preferred when: minimising downtime is critical, the change is only a new image version with no config changes, and the app supports multiple instances running simultaneously. Rolling updates avoid the gap between wipe and redeploy where the service is unavailable. + +### Test Results — All 4 Scenarios + +**Scenario 1: Normal deploy — wipe must NOT run** +```bash +ansible-playbook playbooks/deploy_python.yml +# Result: all 5 wipe tasks show "skipping" +# PLAY RECAP: ok=21 changed=1 failed=0 skipped=5 +``` +![Scenario 1 - Normal Deploy](screenshots/wipe-scenario1-normal-deploy.png) + +**Scenario 2: Wipe only** +```bash +ansible-playbook playbooks/deploy_python.yml \ + -e "web_app_wipe=true" --tags web_app_wipe + +# Result: wipe ran, deploy completely skipped +# PLAY RECAP: ok=7 changed=3 failed=0 + +# Verification: +$ docker ps # devops-python container absent ✅ +$ ls /opt # devops-python directory absent ✅ +``` +![Scenario 2 - Wipe Only](screenshots/wipe-scenario2-wipe-only.png) + +**Scenario 3: Clean reinstall** +```bash +ansible-playbook playbooks/deploy_python.yml -e "web_app_wipe=true" + +# Result: wipe ran first, deploy followed +# TASK [WIPE] Stop and remove containers → changed +# TASK [WIPE] Remove application directory → changed +# TASK Create application directory → changed +# TASK Deploy with Docker Compose → changed +# PLAY RECAP: ok=26 changed=5 failed=0 skipped=0 ignored=0 + +$ curl http://localhost:8000/health +{"status":"healthy",...} ✅ +``` +![Scenario 3 - Clean Reinstall](screenshots/wipe-scenario3-clean-reinstall.png) + +**Scenario 4a: Safety — tag passed but variable false** +```bash +ansible-playbook playbooks/deploy_python.yml --tags web_app_wipe + +# Result: variable gate (Gate 1) blocked everything +# All 5 wipe tasks show "skipping" +# PLAY RECAP: ok=2 changed=0 skipped=5 +``` +![Scenario 4a - Safety Check](screenshots/wipe-scenario4a-safety-check.png) + +--- + +## Task 4: CI/CD with GitHub Actions (3 pts) + +### Setup + +**Runner type:** Self-hosted runner installed on the Vagrant VM. Since Ansible runs with `ansible_connection=local`, no SSH overhead is needed — the runner executes playbooks directly on the target machine. + +**Installation:** +```bash +# On Vagrant VM: +mkdir ~/actions-runner && cd ~/actions-runner +curl -o actions-runner-linux-x64-2.331.0.tar.gz -L \ + https://github.com/actions/runner/releases/download/v2.331.0/actions-runner-linux-x64-2.331.0.tar.gz +tar xzf ./actions-runner-linux-x64-2.331.0.tar.gz +./config.sh --url https://github.com/3llimi/DevOps-Core-Course --token TOKEN +sudo ./svc.sh install && sudo ./svc.sh start +``` + +### Workflow Architecture + +``` +Code Push to main + │ + ▼ + Path Filter ── changes in ansible/? ── No ──► Skip + │ Yes + ▼ + Job: lint (runs-on: ubuntu-latest) + ├── actions/checkout@v4 + ├── pip install ansible ansible-lint + └── ansible-lint playbooks/deploy_python.yml + │ Pass + ▼ + Job: deploy (needs: lint, runs-on: self-hosted) + ├── actions/checkout@v4 + ├── ansible-playbook playbooks/deploy_python.yml + └── curl http://localhost:8000/health +``` + +### Path Filters + +```yaml +paths: + - 'ansible/vars/app_python.yml' + - 'ansible/playbooks/deploy_python.yml' + - 'ansible/roles/web_app/**' + - '.github/workflows/ansible-deploy.yml' +``` + +Path filters ensure the workflow only triggers when relevant code changes. Pushing only documentation or unrelated files does not trigger a deploy. + +### ansible-lint Passing Evidence + +``` +Passed: 0 failure(s), 0 warning(s) in 8 files processed of 8 encountered. +Last profile that met the validation criteria was 'production'. +``` +![Python Workflow Success](screenshots/cicd-python-workflow-success.png) + +### Deploy Job Evidence + +``` +TASK [web_app : Report deployment success] +ok: [localhost] => + msg: devops-python is running at http://localhost:8000 + +PLAY RECAP +localhost : ok=21 changed=0 unreachable=0 failed=0 +``` + +### Verification Step Evidence + +``` +Run sleep 5 && curl -f http://localhost:8000/health +{"status":"healthy","timestamp":"2026-02-22T12:31:45","uptime_seconds":10} +``` + +### Research Answers + +**Q: Security implications of SSH keys in GitHub Secrets?** +GitHub Secrets are encrypted at rest and masked in logs. Risks include: repo admins can create workflows that exfiltrate secrets, and malicious PRs could access secrets if `pull_request_target` is misused. Using a self-hosted runner mitigates this — secrets never leave the local network, and the runner token is the only credential stored in GitHub. + +**Q: How would you implement staging → production pipeline?** +Add a `staging` environment job that deploys to a staging VM and runs integration tests. Add a `production` job with `environment: production` and GitHub required reviewers — the deploy pauses until a human approves it in the GitHub UI. + +**Q: What would you add to make rollbacks possible?** +Pin `docker_tag` to a specific image digest instead of `latest`. Store the previous working tag in a GitHub Actions artifact or variable. On failure, re-trigger the workflow with the last known-good tag passed as `-e "docker_tag=sha256:previous"`. + +**Q: How does self-hosted runner improve security vs GitHub-hosted?** +Network traffic stays local — credentials never traverse the internet. The runner token is the only secret stored in GitHub. Secrets are only accessible to jobs on your specific runner, not GitHub's shared infrastructure. + +--- + +## Task 5: Documentation + +This file serves as the primary documentation for Lab 6. All roles contain inline comments explaining the purpose of each block, rescue/always section, tag, and variable. + +--- + +## Bonus Part 1: Multi-App Deployment (1.5 pts) + +### Role Reusability Pattern + +The same `web_app` role deploys both apps. No code is duplicated — the role is parameterised entirely through variables. Each app has its own vars file: + +- `ansible/vars/app_python.yml` — port 8000, image `3llimi/devops-info-service` +- `ansible/vars/app_bonus.yml` — port 8001, image `3llimi/devops-info-service-go` + +The port difference (8000 vs 8001) allows both containers to run simultaneously on the same VM without conflict. + +### Directory Structure + +``` +ansible/ +├── vars/ +│ ├── app_python.yml # Python app variables +│ └── app_bonus.yml # Go app variables +└── playbooks/ + ├── deploy_python.yml # Deploy Python only + ├── deploy_bonus.yml # Deploy Go only + └── deploy_all.yml # Deploy both using include_role +``` + +### deploy_all.yml — include_role Pattern + +```yaml +tasks: + - name: Deploy Python App + ansible.builtin.include_role: + name: web_app + vars: + app_name: devops-python + app_port: 8000 + ... + + - name: Deploy Bonus App + ansible.builtin.include_role: + name: web_app + vars: + app_name: devops-go + app_port: 8001 + app_internal_port: 8080 + ... +``` + +### Both Apps Running Evidence + +```bash +$ ansible-playbook playbooks/deploy_all.yml +# PLAY RECAP: ok=41 changed=7 failed=0 + +$ docker ps +CONTAINER ID IMAGE PORTS +79883e6aa01d 3llimi/devops-info-service-go:latest 0.0.0.0:8001->8080/tcp +71a88aec2ef9 3llimi/devops-info-service:latest 0.0.0.0:8000->8000/tcp + +$ curl http://localhost:8000/health +{"status":"healthy","timestamp":"2026-02-22T12:25:40.976379+00:00","uptime_seconds":80} + +$ curl http://localhost:8001/health +{"status":"healthy","timestamp":"2026-02-22T12:25:41Z","uptime_seconds":50} +``` + +### Independent Wipe Evidence + +```bash +# Wipe only Python app +ansible-playbook playbooks/deploy_python.yml \ + -e "web_app_wipe=true" --tags web_app_wipe + +$ docker ps +# Only devops-go running — Python app removed, Go app untouched ✅ +CONTAINER ID IMAGE PORTS +79883e6aa01d 3llimi/devops-info-service-go:latest 0.0.0.0:8001->8080/tcp +``` + +### Why Independent Wipe Works + +`compose_project_dir` is derived from `app_name` (`/opt/{{ app_name }}`). Since each app has a different `app_name`, each gets its own directory and Docker Compose project. Wipe logic for one app only removes its own directory — the other app's directory is untouched. + +### Idempotency for Multi-App + +```bash +# Run twice — second run shows no changes +ansible-playbook playbooks/deploy_all.yml +ansible-playbook playbooks/deploy_all.yml +# PLAY RECAP: ok=41 changed=0 failed=0 ✅ +``` + +--- + +## Bonus Part 2: Multi-App CI/CD (1 pt) + +### Two Independent Workflows + +**`.github/workflows/ansible-deploy.yml`** — Python app: +```yaml +paths: + - 'ansible/vars/app_python.yml' + - 'ansible/playbooks/deploy_python.yml' + - 'ansible/roles/web_app/**' +``` + +**`.github/workflows/ansible-deploy-bonus.yml`** — Go app: +```yaml +paths: + - 'ansible/vars/app_bonus.yml' + - 'ansible/playbooks/deploy_bonus.yml' + - 'ansible/roles/web_app/**' +``` + +### Path Filter Logic + +| Change | Python workflow | Bonus workflow | +|--------|----------------|----------------| +| `vars/app_python.yml` | ✅ Triggers | ❌ Skips | +| `vars/app_bonus.yml` | ❌ Skips | ✅ Triggers | +| `roles/web_app/**` | ✅ Triggers | ✅ Triggers | +| `docs/LAB06.md` | ❌ Skips | ❌ Skips | + +When `roles/web_app/**` changes, **both workflows fire** — correct behaviour since both apps use the shared role and both should be redeployed after a role change. + +### Both Workflows Passing + +Both `ansible-deploy.yml` and `ansible-deploy-bonus.yml` show green in GitHub Actions with lint and deploy jobs passing independently. + +![Independent Workflows](screenshots/cicd-independent-workflows.png) +![Python Workflow Success](screenshots/cicd-python-workflow-success.png) +![Go App Workflow Success](screenshots/cicd-bonus-workflow-success.png) + +--- + +## Summary + +### Technologies Used +- Ansible 2.10.8 on Ubuntu 22.04 (Vagrant VM, `ansible_connection=local`) +- Docker Compose v2 plugin (`docker compose` not `docker-compose`) +- GitHub Actions with self-hosted runner on the Vagrant VM +- Jinja2 templating for docker-compose.yml generation + +### Key Learnings + +- Blocks eliminate repetitive `become: true` and tag annotations — apply once at block level +- The `rescue/always` pattern makes failures informative rather than cryptic +- Double-gating (variable + tag) is a clean safety mechanism for destructive operations +- Role dependencies in `meta/main.yml` encode infrastructure order as code — can't accidentally skip Docker before deploying a container +- Path filters in CI/CD are as important as the workflow itself — without them every push triggers unnecessary deploys +- `docker compose` v2 (plugin) behaves differently from `docker-compose` v1 — using `ansible.builtin.command` avoids module version mismatches + +### Challenges & Solutions + +- **Port conflict on first deploy:** Lab 5 `devops-info-service` container was still running on port 8000. Solution: stopped and removed the old container before deploying the new Compose-managed one. +- **Stale Docker network:** First failed deploy left a stale `devops-app_app_network` network that blocked the second attempt. Solution: ran `docker compose down` manually to clean up, then reran the playbook. +- **ansible-lint violations:** 22 violations caught across meta files (missing `author`, `license`), task key ordering, `ignore_errors` usage, and variable naming. Fixed iteratively by running lint locally and in CI. +- **`docker compose` vs `docker-compose`:** The `community.docker.docker_compose` Ansible module targets the older v1 binary. Used `ansible.builtin.command: docker compose ...` instead to work with the v2 plugin. +- **Main workflow using wrong playbook:** After migrating to multi-app setup, the main workflow was still calling `deploy.yml` which deployed `devops-app` on port 8000 — conflicting with `devops-python`. Fixed by updating the workflow to use `deploy_python.yml`. + +### Total Time +Approximately 10 hours including iterative lint fixing, wipe scenario testing, runner setup, and CI/CD debugging. \ No newline at end of file diff --git a/ansible/docs/screenshots/cicd-bonus-workflow-success.png b/ansible/docs/screenshots/cicd-bonus-workflow-success.png new file mode 100644 index 0000000000..bbe271a365 Binary files /dev/null and b/ansible/docs/screenshots/cicd-bonus-workflow-success.png differ diff --git a/ansible/docs/screenshots/cicd-independent-workflows.png b/ansible/docs/screenshots/cicd-independent-workflows.png new file mode 100644 index 0000000000..a831ffb29c Binary files /dev/null and b/ansible/docs/screenshots/cicd-independent-workflows.png differ diff --git a/ansible/docs/screenshots/cicd-python-workflow-success.png b/ansible/docs/screenshots/cicd-python-workflow-success.png new file mode 100644 index 0000000000..930d7efa2d Binary files /dev/null and b/ansible/docs/screenshots/cicd-python-workflow-success.png differ diff --git a/ansible/docs/screenshots/wipe-scenario1-normal-deploy.png b/ansible/docs/screenshots/wipe-scenario1-normal-deploy.png new file mode 100644 index 0000000000..0ca1082fd6 Binary files /dev/null and b/ansible/docs/screenshots/wipe-scenario1-normal-deploy.png differ diff --git a/ansible/docs/screenshots/wipe-scenario2-wipe-only.png b/ansible/docs/screenshots/wipe-scenario2-wipe-only.png new file mode 100644 index 0000000000..ac36da8bdb Binary files /dev/null and b/ansible/docs/screenshots/wipe-scenario2-wipe-only.png differ diff --git a/ansible/docs/screenshots/wipe-scenario3-clean-reinstall.png b/ansible/docs/screenshots/wipe-scenario3-clean-reinstall.png new file mode 100644 index 0000000000..20ba36ffc8 Binary files /dev/null and b/ansible/docs/screenshots/wipe-scenario3-clean-reinstall.png differ diff --git a/ansible/docs/screenshots/wipe-scenario4a-safety-check.png b/ansible/docs/screenshots/wipe-scenario4a-safety-check.png new file mode 100644 index 0000000000..2c67d65518 Binary files /dev/null and b/ansible/docs/screenshots/wipe-scenario4a-safety-check.png differ diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index 20fde46a8f..ad412dca1d 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -1,18 +1,28 @@ -$ANSIBLE_VAULT;1.1;AES256 -37353731323564313431383962323137383261346563303561356530366133623439363562346662 -3562336265383731613638653637666136343761336338650a393861646465333163373232373437 -39343763383931633733366166626137613030356337353862636634656331626131383938653334 -3636626263653239610a396130313936626263623938316161386539383465653762613134333730 -31363135623164373236383930366137663436366138623330303866646332303030653932353264 -30366633636662666138646336386565636361346133303137386165656434303538356337376531 -63663037656132313565623034663864303561626132663332633561643737633561363830636462 -30613934623830653139646165303863656535666138323561643264643766383764626634626436 -64376464326434623464306339333430656263386563313730303761623436383432353836333331 -33353362326563633630313035633537626235653831663933336434333933353031363836646139 -38393733663936343162343131393566376232636438623938366237336331386232666566343034 -33663334366338333365396236373330353261393731343832626436626162396339663130386365 -38346636336564323365666238333636303836656264306362393635643934326364613362383732 -32336333636335323636353563613636323333346135366230346133363831313333396131303630 -31303731386661376338653331326339373066366666626365326663333766336131323137393364 -36323434326563393536663934333835663732333631653864636139313935303363643563623636 -3665 +--- +# Non-sensitive global variables +app_name: devops-app +docker_image: 3llimi/devops-info-service +docker_tag: latest +app_port: 8000 +app_internal_port: 8000 +compose_project_dir: "/opt/{{ app_name }}" +docker_compose_version: "3.8" + +docker_user: vagrant +deploy_user: vagrant + +common_packages: + - python3-pip + - curl + - git + - vim + - htop + - wget + - unzip + - ca-certificates + - gnupg + - lsb-release + - apt-transport-https + +# vault-encrypted value in production: +app_secret_key: "use-vault-in-production" diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini index e0fa72af1d..84218471a9 100644 --- a/ansible/inventory/hosts.ini +++ b/ansible/inventory/hosts.ini @@ -1,5 +1,5 @@ [webservers] localhost ansible_connection=local ansible_user=vagrant -[webservers:vars] +[all:vars] ansible_python_interpreter=/usr/bin/python3 diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml index fb448bca6e..26a4c7ab97 100644 --- a/ansible/playbooks/deploy.yml +++ b/ansible/playbooks/deploy.yml @@ -1,7 +1,15 @@ --- -- name: Deploy application +# Usage: +# Normal deploy: ansible-playbook playbooks/deploy.yml +# App only: ansible-playbook playbooks/deploy.yml --tags app_deploy +# Wipe only: ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +# Clean reinstall: ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" + +- name: Deploy web application hosts: webservers - become: no + become: true + gather_facts: true roles: - - app_deploy + - role: web_app + tags: [web_app] diff --git a/ansible/playbooks/deploy_all.yml b/ansible/playbooks/deploy_all.yml new file mode 100644 index 0000000000..941f57a575 --- /dev/null +++ b/ansible/playbooks/deploy_all.yml @@ -0,0 +1,32 @@ +--- +- name: Deploy All Applications + hosts: webservers + become: true + gather_facts: true + + tasks: + - name: Deploy Python App + ansible.builtin.include_role: + name: web_app + vars: + app_name: devops-python + docker_image: 3llimi/devops-info-service + docker_tag: latest + app_port: 8000 + app_internal_port: 8000 + compose_project_dir: /opt/devops-python + app_environment: + APP_LANG: python + + - name: Deploy Bonus App + ansible.builtin.include_role: + name: web_app + vars: + app_name: devops-go + docker_image: 3llimi/devops-info-service-go + docker_tag: latest + app_port: 8001 + app_internal_port: 8080 + compose_project_dir: /opt/devops-go + app_environment: + APP_LANG: go diff --git a/ansible/playbooks/deploy_bonus.yml b/ansible/playbooks/deploy_bonus.yml new file mode 100644 index 0000000000..bc6be243a2 --- /dev/null +++ b/ansible/playbooks/deploy_bonus.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Bonus Application + hosts: webservers + become: true + gather_facts: true + vars_files: + - ../vars/app_bonus.yml + roles: + - role: web_app diff --git a/ansible/playbooks/deploy_python.yml b/ansible/playbooks/deploy_python.yml new file mode 100644 index 0000000000..b9239d6fea --- /dev/null +++ b/ansible/playbooks/deploy_python.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Python Application + hosts: webservers + become: true + gather_facts: true + vars_files: + - ../vars/app_python.yml + roles: + - role: web_app diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml index f53efb0248..e50083be1b 100644 --- a/ansible/playbooks/provision.yml +++ b/ansible/playbooks/provision.yml @@ -1,8 +1,20 @@ --- +# Usage: +# Full provision: ansible-playbook playbooks/provision.yml +# Only docker: ansible-playbook playbooks/provision.yml --tags docker +# Skip common: ansible-playbook playbooks/provision.yml --skip-tags common +# Packages only: ansible-playbook playbooks/provision.yml --tags packages +# Dry-run: ansible-playbook playbooks/provision.yml --check +# List tags: ansible-playbook playbooks/provision.yml --list-tags + - name: Provision web servers hosts: webservers - become: yes + become: true + gather_facts: true roles: - - common - - docker + - role: common + tags: [common] + + - role: docker + tags: [docker] diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml index 16a0a9ed3a..f9054847f8 100644 --- a/ansible/roles/common/defaults/main.yml +++ b/ansible/roles/common/defaults/main.yml @@ -7,3 +7,9 @@ common_packages: - htop - wget - unzip + - ca-certificates + - gnupg + - lsb-release + - apt-transport-https + +common_log_path: /tmp/ansible_common_complete.log diff --git a/ansible/roles/common/meta/main.yml b/ansible/roles/common/meta/main.yml new file mode 100644 index 0000000000..047e938d2e --- /dev/null +++ b/ansible/roles/common/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: vagrant + role_name: common + description: Baseline system packages and user configuration + license: MIT + min_ansible_version: "2.10" +dependencies: [] diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml index 7f74c167c1..8d93c7c195 100644 --- a/ansible/roles/common/tasks/main.yml +++ b/ansible/roles/common/tasks/main.yml @@ -1,10 +1,65 @@ --- -- name: Update apt cache - apt: - update_cache: yes - cache_valid_time: 3600 - -- name: Install common packages - apt: - name: "{{ common_packages }}" - state: present +- name: Package installation block + become: true + tags: + - packages + - common + block: + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + + - name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + rescue: + - name: "[RESCUE] Fix broken apt and retry" + ansible.builtin.apt: + update_cache: true + force_apt_get: true + + - name: "[RESCUE] Retry package installation" + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + update_cache: true + + always: + - name: "[ALWAYS] Write completion marker" + ansible.builtin.copy: + dest: "{{ common_log_path }}" + content: | + Ansible common role - packages completed + Host: {{ inventory_hostname }} + mode: "0644" + +- name: User management block + become: true + tags: + - users + - common + block: + - name: Ensure vagrant is in docker group + ansible.builtin.user: + name: "{{ docker_user | default('vagrant') }}" + groups: docker + append: true + + rescue: + - name: "[RESCUE] Report user management failure" + ansible.builtin.debug: + msg: "User management failed - docker group may not exist yet" + + always: + - name: "[ALWAYS] Verify group membership" + ansible.builtin.command: "id {{ docker_user | default('vagrant') }}" + register: common_id_result + changed_when: false + failed_when: false + + - name: "[ALWAYS] Report membership" + ansible.builtin.debug: + msg: "{{ common_id_result.stdout | default('user not found') }}" diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml index dbb7083a98..5d5120343e 100644 --- a/ansible/roles/docker/defaults/main.yml +++ b/ansible/roles/docker/defaults/main.yml @@ -1,2 +1,19 @@ --- docker_user: vagrant + +docker_apt_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_apt_key_path: "/etc/apt/keyrings/docker.gpg" +docker_apt_repo: "https://download.docker.com/linux/ubuntu" + +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + +docker_daemon_config: + log-driver: "json-file" + log-opts: + max-size: "10m" + max-file: "3" diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml index 3627303e6b..a7d1929fa6 100644 --- a/ansible/roles/docker/handlers/main.yml +++ b/ansible/roles/docker/handlers/main.yml @@ -1,5 +1,6 @@ --- -- name: restart docker - service: +- name: Restart Docker + ansible.builtin.service: name: docker state: restarted + become: true diff --git a/ansible/roles/docker/meta/main.yml b/ansible/roles/docker/meta/main.yml new file mode 100644 index 0000000000..2c7d496c2c --- /dev/null +++ b/ansible/roles/docker/meta/main.yml @@ -0,0 +1,8 @@ +--- +galaxy_info: + author: vagrant + role_name: docker + description: Install and configure Docker CE with Compose plugin + license: MIT + min_ansible_version: "2.10" +dependencies: [] diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml index d257e0b0c0..9b25caafc6 100644 --- a/ansible/roles/docker/tasks/main.yml +++ b/ansible/roles/docker/tasks/main.yml @@ -1,54 +1,106 @@ --- -- name: Install prerequisites - apt: - name: - - ca-certificates - - curl - - gnupg - state: present - -- name: Create keyrings directory - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' - -- name: Add Docker GPG key - shell: | - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg - chmod a+r /etc/apt/keyrings/docker.gpg - args: - creates: /etc/apt/keyrings/docker.gpg - -- name: Add Docker repository - apt_repository: - repo: "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" - state: present - filename: docker - -- name: Install Docker packages - apt: - name: - - docker-ce - - docker-ce-cli - - containerd.io - state: present - update_cache: yes - notify: restart docker - -- name: Ensure Docker service is running and enabled - service: - name: docker - state: started - enabled: yes - -- name: Add user to docker group - user: - name: "{{ docker_user }}" - groups: docker - append: yes - -- name: Install python3-docker - apt: - name: python3-docker - state: present +- name: Docker installation block + become: true + tags: + - docker + - docker_install + block: + - name: Create /etc/apt/keyrings directory + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: "0755" + + - name: Download Docker GPG key + ansible.builtin.get_url: + url: "{{ docker_apt_key_url }}" + dest: "{{ docker_apt_key_path }}" + mode: "0644" + force: false + + - name: Add Docker APT repository + ansible.builtin.apt_repository: + repo: "deb [arch=amd64 signed-by={{ docker_apt_key_path }}] {{ docker_apt_repo }} {{ ansible_distribution_release }} stable" + state: present + filename: docker + update_cache: true + + - name: Install Docker packages + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + + rescue: + - name: "[RESCUE] Wait 10 seconds before retrying" + ansible.builtin.pause: + seconds: 10 + + - name: "[RESCUE] Force re-download Docker GPG key" + ansible.builtin.get_url: + url: "{{ docker_apt_key_url }}" + dest: "{{ docker_apt_key_path }}" + mode: "0644" + force: true + + - name: "[RESCUE] Retry Docker package install" + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + update_cache: true + + always: + - name: "[ALWAYS] Ensure Docker service is enabled and started" + ansible.builtin.service: + name: docker + enabled: true + state: started + failed_when: false + +- name: Docker configuration block + become: true + tags: + - docker + - docker_config + block: + - name: Ensure /etc/docker directory exists + ansible.builtin.file: + path: /etc/docker + state: directory + mode: "0755" + + - name: Write Docker daemon.json + ansible.builtin.copy: + dest: /etc/docker/daemon.json + content: "{{ docker_daemon_config | to_nice_json }}\n" + mode: "0644" + notify: Restart Docker + + - name: Add docker user to docker group + ansible.builtin.user: + name: "{{ docker_user }}" + groups: docker + append: true + + - name: Install Python Docker SDK + ansible.builtin.pip: + name: + - docker + - docker-compose + state: present + executable: pip3 + + rescue: + - name: "[RESCUE] Log Docker configuration failure" + ansible.builtin.debug: + msg: "Docker configuration failed - check Docker installation" + + always: + - name: "[ALWAYS] Verify Docker is responding" + ansible.builtin.command: docker info + register: docker_info + changed_when: false + failed_when: false + + - name: "[ALWAYS] Report Docker status" + ansible.builtin.debug: + msg: "Docker running: {{ docker_info.rc == 0 }}" diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..575b22f264 --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,13 @@ +--- +app_name: devops-app +docker_image: 3llimi/devops-info-service +docker_tag: latest +app_port: 8000 +app_internal_port: 8000 +compose_project_dir: "/opt/{{ app_name }}" +docker_compose_version: "3.8" +app_environment: {} +app_secret_key: "change-me-use-vault-in-production" + +# Wipe logic - both variable AND tag required to trigger +web_app_wipe: false diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml new file mode 100644 index 0000000000..f63f546c3b --- /dev/null +++ b/ansible/roles/web_app/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Recreate containers + ansible.builtin.command: > + docker compose -f {{ compose_project_dir }}/docker-compose.yml up --detach --force-recreate + changed_when: true + become: true diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..e2df03c3ad --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,9 @@ +--- +galaxy_info: + author: vagrant + role_name: web_app + description: Deploy a containerised web application via Docker Compose + license: MIT + min_ansible_version: "2.10" +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..056a37b73d --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,78 @@ +--- +- name: Include wipe tasks + ansible.builtin.include_tasks: wipe.yml + tags: + - web_app_wipe + +- name: Deploy application with Docker Compose + become: true + tags: + - app_deploy + - compose + block: + - name: Create application directory + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: directory + owner: "{{ docker_user | default('vagrant') }}" + group: "{{ docker_user | default('vagrant') }}" + mode: "0755" + + - name: Template docker-compose.yml to target host + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ compose_project_dir }}/docker-compose.yml" + owner: "{{ docker_user | default('vagrant') }}" + group: "{{ docker_user | default('vagrant') }}" + mode: "0640" + + - name: Pull Docker image + ansible.builtin.command: "docker pull {{ docker_image }}:{{ docker_tag }}" + register: web_app_pull_result + changed_when: "'Pull complete' in web_app_pull_result.stdout or 'Downloaded' in web_app_pull_result.stdout" + + - name: Deploy with Docker Compose + ansible.builtin.command: > + docker compose -f {{ compose_project_dir }}/docker-compose.yml up --detach --remove-orphans + register: web_app_compose_result + changed_when: "'Started' in web_app_compose_result.stderr or 'Recreated' in web_app_compose_result.stderr" + + - name: Wait for application to be healthy + ansible.builtin.uri: + url: "http://localhost:{{ app_port }}/health" + status_code: 200 + register: web_app_health_check + until: web_app_health_check.status == 200 + retries: 10 + delay: 5 + + - name: Report deployment success + ansible.builtin.debug: + msg: "{{ app_name }} is running at http://localhost:{{ app_port }}" + + rescue: + - name: "[RESCUE] Show container logs" + ansible.builtin.command: > + docker compose -f {{ compose_project_dir }}/docker-compose.yml logs --tail=30 + register: web_app_compose_logs + changed_when: false + failed_when: false + + - name: "[RESCUE] Print logs" + ansible.builtin.debug: + msg: "{{ web_app_compose_logs.stdout_lines | default([]) }}" + + - name: "[RESCUE] Fail with clear message" + ansible.builtin.fail: + msg: "Deployment of {{ app_name }} failed - check logs above" + + always: + - name: "[ALWAYS] Show running containers" + ansible.builtin.command: docker ps --format "table {% raw %}{{.Names}}\t{{.Status}}\t{{.Ports}}{% endraw %}" + register: web_app_docker_ps + changed_when: false + failed_when: false + + - name: "[ALWAYS] Report container status" + ansible.builtin.debug: + msg: "{{ web_app_docker_ps.stdout_lines }}" diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml new file mode 100644 index 0000000000..2c2d1fc6da --- /dev/null +++ b/ansible/roles/web_app/tasks/wipe.yml @@ -0,0 +1,30 @@ +--- +- name: "Wipe application" + when: web_app_wipe | bool + become: true + tags: + - web_app_wipe + block: + - name: "[WIPE] Announce wipe operation" + ansible.builtin.debug: + msg: "WARNING - Removing {{ app_name }} from {{ compose_project_dir }}" + + - name: "[WIPE] Stop and remove containers" + ansible.builtin.command: > + docker compose -f {{ compose_project_dir }}/docker-compose.yml down --remove-orphans + changed_when: true + failed_when: false + + - name: "[WIPE] Remove application directory" + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: absent + + - name: "[WIPE] Remove Docker image" + ansible.builtin.command: "docker rmi {{ docker_image }}:{{ docker_tag }}" + changed_when: true + failed_when: false + + - name: "[WIPE] Confirm completion" + ansible.builtin.debug: + msg: "{{ app_name }} has been wiped from {{ compose_project_dir }}" diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..71be74032e --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,35 @@ +version: '{{ docker_compose_version }}' + +services: + {{ app_name }}: + image: {{ docker_image }}:{{ docker_tag }} + container_name: {{ app_name }} + ports: + - "{{ app_port }}:{{ app_internal_port }}" + environment: + APP_ENV: production + APP_PORT: "{{ app_internal_port }}" + SECRET_KEY: "{{ app_secret_key }}" +{% if app_environment %} +{% for key, value in app_environment.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + restart: unless-stopped + networks: + - app_network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:{{ app_internal_port }}/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 15s + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + +networks: + app_network: + driver: bridge diff --git a/ansible/vars/app_bonus.yml b/ansible/vars/app_bonus.yml new file mode 100644 index 0000000000..b7f0925665 --- /dev/null +++ b/ansible/vars/app_bonus.yml @@ -0,0 +1,9 @@ +--- +app_name: devops-go +docker_image: 3llimi/devops-info-service-go +docker_tag: latest +app_port: 8001 +app_internal_port: 8080 +compose_project_dir: "/opt/devops-go" +app_environment: + APP_LANG: go diff --git a/ansible/vars/app_python.yml b/ansible/vars/app_python.yml new file mode 100644 index 0000000000..373f0ae01c --- /dev/null +++ b/ansible/vars/app_python.yml @@ -0,0 +1,9 @@ +--- +app_name: devops-python +docker_image: 3llimi/devops-info-service +docker_tag: latest +app_port: 8000 +app_internal_port: 8000 +compose_project_dir: "/opt/devops-python" +app_environment: + APP_LANG: python